diff options
-rw-r--r-- | mysql-test/main/fetch_first.result | 42 | ||||
-rw-r--r-- | mysql-test/main/fetch_first.test | 11 | ||||
-rw-r--r-- | mysql-test/main/order_by.result | 20 | ||||
-rw-r--r-- | sql/filesort.cc | 276 | ||||
-rw-r--r-- | sql/filesort_utils.cc | 243 | ||||
-rw-r--r-- | sql/filesort_utils.h | 77 | ||||
-rw-r--r-- | sql/sql_const.h | 14 | ||||
-rw-r--r-- | sql/sql_sort.h | 17 |
8 files changed, 467 insertions, 233 deletions
diff --git a/mysql-test/main/fetch_first.result b/mysql-test/main/fetch_first.result index c2aacbf3687..25459206d2d 100644 --- a/mysql-test/main/fetch_first.result +++ b/mysql-test/main/fetch_first.result @@ -435,14 +435,50 @@ id first_name last_name score 7 Bob Trasc 9 2 John Doe 6 6 John Elton 8.1 +analyze FORMAT=JSON select * from t1 -order by first_name, last_name +order by first_name, last_name, score +offset 2 rows +fetch first 3 rows only; +ANALYZE +{ + "query_block": { + "select_id": 1, + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "read_sorted_file": { + "r_rows": 5, + "filesort": { + "sort_key": "t1.first_name, t1.last_name, t1.score", + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "r_limit": 5, + "r_used_priority_queue": true, + "r_output_rows": 6, + "r_sort_mode": "sort_key,addon_fields", + "table": { + "table_name": "t1", + "access_type": "ALL", + "r_loops": 1, + "rows": 8, + "r_rows": 8, + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "filtered": 100, + "r_filtered": 100 + } + } + } + } +} +select * from t1 +order by first_name, last_name, score offset 2 rows fetch first 3 rows only; id first_name last_name score 2 John Doe 6 6 John Elton 8.1 -5 John Smith 7 +4 John Smith 6 select * from t1 order by first_name, last_name offset 2 rows @@ -454,7 +490,7 @@ id first_name last_name score 4 John Smith 6 5 John Smith 7 select * from t1 -order by first_name, last_name +order by first_name, last_name, score offset 3 rows fetch first 3 rows only; id first_name last_name score diff --git a/mysql-test/main/fetch_first.test b/mysql-test/main/fetch_first.test index 04015cfde57..ac3693f2099 100644 --- a/mysql-test/main/fetch_first.test +++ b/mysql-test/main/fetch_first.test @@ -347,8 +347,15 @@ order by first_name, last_name offset 1 rows fetch first 3 rows with ties; +--source include/analyze-format.inc +analyze FORMAT=JSON select * from t1 -order by first_name, last_name +order by first_name, last_name, score +offset 2 rows +fetch first 3 rows only; + +select * from t1 +order by first_name, last_name, score offset 2 rows fetch first 3 rows only; @@ -358,7 +365,7 @@ offset 2 rows fetch first 3 rows with ties; select * from t1 -order by first_name, last_name +order by first_name, last_name, score offset 3 rows fetch first 3 rows only; diff --git a/mysql-test/main/order_by.result b/mysql-test/main/order_by.result index 33eedf3f103..6c2ac78f57c 100644 --- a/mysql-test/main/order_by.result +++ b/mysql-test/main/order_by.result @@ -3660,16 +3660,16 @@ t2.key1 = t1.a and t2.key1 IS NOT NULL ORDER BY t2.key2 ASC LIMIT 1) -900-0-123456 -901-1-123456 -902-2-123456 -903-3-123456 -904-4-123456 -905-5-123456 -906-6-123456 -907-7-123456 -908-8-123456 -909-9-123456 +100-0-123456 +101-1-123456 +102-2-123456 +103-3-123456 +104-4-123456 +105-5-123456 +106-6-123456 +107-7-123456 +108-8-123456 +109-9-123456 drop table t1,t2; # End of 10.3 tests # diff --git a/sql/filesort.cc b/sql/filesort.cc index 83189365e5d..c97572c3bb1 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -63,10 +63,6 @@ static Addon_fields *get_addon_fields(TABLE *table, uint sortlength, uint *addon_length, uint *m_packable_length); -static bool check_if_pq_applicable(Sort_param *param, SORT_INFO *info, - TABLE *table, - ha_rows records, size_t memory_available); - static void store_key_part_length(uint32 num, uchar *to, uint bytes) { switch(bytes) { @@ -127,6 +123,7 @@ void Sort_param::init_for_filesort(uint sortlen, TABLE *table, } rec_length= sort_length + addon_length; limit_rows= maxrows; + sort_form= table; } @@ -161,6 +158,7 @@ void Sort_param::try_to_pack_addons(ulong max_length_for_sort_data) rec_length+= sz; } + /** Sort a table. Creates a set of pointers that can be used to read the rows @@ -269,16 +267,50 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, // If number of rows is not known, use as much of sort buffer as possible. num_rows= table->file->estimate_rows_upper_bound(); - if (check_if_pq_applicable(¶m, sort, - table, num_rows, memory_available)) + Sort_costs costs; + costs.compute_sort_costs(¶m, num_rows, memory_available, + param.using_addon_fields()); + + if (costs.fastest_sort == NO_SORT_POSSIBLE_OUT_OF_MEM) { - DBUG_PRINT("info", ("filesort PQ is applicable")); + my_error(ER_OUT_OF_SORTMEMORY,MYF(ME_ERROR_LOG + ME_FATAL)); + goto err; + } + + if (costs.fastest_sort == PQ_SORT_ALL_FIELDS || + costs.fastest_sort == PQ_SORT_ORDER_BY_FIELDS) + { + /* We are going to use priorty queue */ thd->query_plan_flags|= QPLAN_FILESORT_PRIORITY_QUEUE; status_var_increment(thd->status_var.filesort_pq_sorts_); tracker->incr_pq_used(); param.using_pq= true; const size_t compare_length= param.sort_length; DBUG_ASSERT(param.using_packed_sortkeys() == false); + + if (costs.fastest_sort == PQ_SORT_ORDER_BY_FIELDS && sort->addon_fields) + { + /* + Upper code have addon_fields enabled, which we have decided to + not use. Let's delete them. + */ + my_free(sort->addon_fields); + sort->addon_fields= NULL; + param.addon_fields= NULL; + param.res_length= param.ref_length; + /* + Add the ref (rowid which is stored last in the sort key) to the sort, + as we want to retrive rows in id order, if possible. + */ + param.sort_length+= param.ref_length; + param.rec_length= param.sort_length; + } + + /* Priority queues needs one extra element for doing INSERT */ + param.max_keys_per_buffer= param.limit_rows + 1; + if (!sort->alloc_sort_buffer(param.max_keys_per_buffer, param.rec_length)) + goto err; + /* For PQ queries (with limit) we know exactly how many pointers/records we have in the buffer, so to simplify things, we initialize @@ -312,9 +344,10 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, tracker->report_sort_keys_format(param.using_packed_sortkeys()); param.using_pq= false; + /* Allocate sort buffer. Use as much memory as possible. */ size_t min_sort_memory= MY_MAX(MIN_SORT_MEMORY, param.sort_length*MERGEBUFF2); - set_if_bigger(min_sort_memory, sizeof(Merge_chunk*)*MERGEBUFF2); + set_if_bigger(min_sort_memory, sizeof(Merge_chunk*) * MERGEBUFF2); while (memory_available >= min_sort_memory) { ulonglong keys= memory_available / (param.rec_length + sizeof(char*)); @@ -350,14 +383,13 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, DISK_CHUNK_SIZE, MYF(MY_WME))) goto err; - param.sort_form= table; param.local_sortorder= Bounds_checked_array<SORT_FIELD>(filesort->sortorder, s_length); num_rows= find_all_keys(thd, ¶m, select, sort, &buffpek_pointers, - &tempfile, + &tempfile, pq.is_initialized() ? &pq : NULL, &sort->found_rows); if (num_rows == HA_POS_ERROR) @@ -398,7 +430,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, buffpek= (Merge_chunk *) sort->buffpek.str; close_cached_file(&buffpek_pointers); /* Open cached file if it isn't open */ - if (! my_b_inited(outfile) && + if (!my_b_inited(outfile) && open_cached_file(outfile, mysql_tmpdir, TEMP_PREFIX, DISK_CHUNK_SIZE, MYF(MY_WME))) goto err; @@ -416,11 +448,11 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, maxbuffer--; // Offset from 0 if (merge_many_buff(¶m, sort->get_raw_buf(), - buffpek,&maxbuffer, - &tempfile)) + buffpek, &maxbuffer, + &tempfile)) goto err; if (flush_io_cache(&tempfile) || - reinit_io_cache(&tempfile,READ_CACHE,0L,0,0)) + reinit_io_cache(&tempfile, READ_CACHE, 0L,0,0)) goto err; if (merge_index(¶m, sort->get_raw_buf(), @@ -809,7 +841,7 @@ static void dbug_print_record(TABLE *table, bool print_rowid) { if (no free space in sort_keys buffers) { - sort sort_keys buffer; + qsort sort_keys buffer; dump sorted sequence to 'tempfile'; dump BUFFPEK describing sequence location into 'buffpek_pointers'; } @@ -836,7 +868,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, ha_rows *found_rows) { int error, quick_select; - uint idx, indexpos; + uint num_elements_in_buffer, indexpos; uchar *ref_pos, *next_pos, ref_buff[MAX_REFLENGTH]; TABLE *sort_form; handler *file; @@ -851,15 +883,14 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, (select ? select->quick ? "ranges" : "where": "every row"))); - idx=indexpos=0; - error=quick_select=0; - sort_form=param->sort_form; - file=sort_form->file; - ref_pos= ref_buff; - quick_select=select && select->quick; + num_elements_in_buffer= indexpos= 0; + error= 0; *found_rows= 0; - ref_pos= &file->ref[0]; - next_pos=ref_pos; + sort_form= param->sort_form; + file= sort_form->file; + ref_pos= ref_buff; + quick_select= select && select->quick; + next_pos= ref_pos= &file->ref[0]; DBUG_EXECUTE_IF("show_explain_in_find_all_keys", dbug_serve_apcs(thd, 1); @@ -867,7 +898,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, if (!quick_select) { - next_pos=(uchar*) 0; /* Find records in sequence */ + next_pos= (uchar*) 0; /* Find records in sequence */ DBUG_EXECUTE_IF("bug14365043_1", DBUG_SET("+d,ha_rnd_init_fail");); if (unlikely(file->ha_rnd_init_with_error(1))) @@ -966,12 +997,13 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, { if (fs_info->isfull()) { - if (write_keys(param, fs_info, idx, buffpek_pointers, tempfile)) + if (write_keys(param, fs_info, num_elements_in_buffer, + buffpek_pointers, tempfile)) goto err; - idx= 0; + num_elements_in_buffer= 0; indexpos++; } - if (idx == 0) + if (num_elements_in_buffer == 0) fs_info->init_next_record_pointer(); uchar *start_of_rec= fs_info->get_next_record_pointer(); @@ -979,7 +1011,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, ref_pos, using_packed_sortkeys); if (packed_format && rec_sz != param->rec_length) fs_info->adjust_next_record_pointer(rec_sz); - idx++; + num_elements_in_buffer++; } num_records++; (*param->accepted_rows)++; @@ -1015,8 +1047,9 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select, file->print_error(error,MYF(ME_ERROR_LOG)); DBUG_RETURN(HA_POS_ERROR); } - if (indexpos && idx && - write_keys(param, fs_info, idx, buffpek_pointers, tempfile)) + if (indexpos && num_elements_in_buffer && + write_keys(param, fs_info, num_elements_in_buffer, buffpek_pointers, + tempfile)) DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */ (*found_rows)= num_records; @@ -1496,145 +1529,6 @@ static bool save_index(Sort_param *param, uint count, } -/** - Test whether priority queue is worth using to get top elements of an - ordered result set. If it is, then allocates buffer for required amount of - records - - @param param Sort parameters. - @param filesort_info Filesort information. - @param table Table to sort. - @param num_rows Estimate of number of rows in source record set. - @param memory_available Memory available for sorting. - - DESCRIPTION - Given a query like this: - SELECT ... FROM t ORDER BY a1,...,an LIMIT max_rows; - This function tests whether a priority queue should be used to keep - the result. Necessary conditions are: - - estimate that it is actually cheaper than merge-sort - - enough memory to store the <max_rows> records. - - If we don't have space for <max_rows> records, but we *do* have - space for <max_rows> keys, we may rewrite 'table' to sort with - references to records instead of additional data. - (again, based on estimates that it will actually be cheaper). - - @retval - true - if it's ok to use PQ - false - PQ will be slower than merge-sort, or there is not enough memory. -*/ - -static bool check_if_pq_applicable(Sort_param *param, - SORT_INFO *filesort_info, - TABLE *table, ha_rows num_rows, - size_t memory_available) -{ - DBUG_ENTER("check_if_pq_applicable"); - - /* - How much Priority Queue sort is slower than qsort. - Measurements (see unit test) indicate that PQ is roughly 3 times slower. - */ - const double PQ_slowness= 3.0; - - if (param->limit_rows == HA_POS_ERROR) - { - DBUG_PRINT("info", ("No LIMIT")); - DBUG_RETURN(false); - } - - if (param->limit_rows >= UINT_MAX - 2) - { - DBUG_PRINT("info", ("Too large LIMIT")); - DBUG_RETURN(false); - } - - size_t num_available_keys= - memory_available / (param->rec_length + sizeof(char*)); - // We need 1 extra record in the buffer, when using PQ. - param->max_keys_per_buffer= (uint) param->limit_rows + 1; - - if (num_rows < num_available_keys) - { - // The whole source set fits into memory. - if (param->limit_rows < num_rows/PQ_slowness ) - { - filesort_info->alloc_sort_buffer(param->max_keys_per_buffer, - param->rec_length); - DBUG_RETURN(filesort_info->sort_buffer_size() != 0); - } - else - { - // PQ will be slower. - DBUG_RETURN(false); - } - } - - // Do we have space for LIMIT rows in memory? - if (param->max_keys_per_buffer < num_available_keys) - { - filesort_info->alloc_sort_buffer(param->max_keys_per_buffer, - param->rec_length); - DBUG_RETURN(filesort_info->sort_buffer_size() != 0); - } - - // Try to strip off addon fields. - if (param->addon_fields) - { - const size_t row_length= - param->sort_length + param->ref_length + sizeof(char*); - num_available_keys= memory_available / row_length; - - // Can we fit all the keys in memory? - if (param->max_keys_per_buffer < num_available_keys) - { - /* - Get cost of merge sort. Note that this does not include - scanning the table and comparing the rows with the where clause! - */ - const double sort_merge_cost= - get_merge_many_buffs_cost_fast(num_rows, - num_available_keys, - (uint)row_length, - ROWID_COMPARE_COST_THD(table->in_use)); - /* - PQ has cost: - (insert + qsort) * log(queue size) * WHERE_COMPARE_COST - cost of rowid lookup of the original row to find the addon fields. - */ - const double pq_cpu_cost= - (PQ_slowness * num_rows + param->max_keys_per_buffer) * - log((double) param->max_keys_per_buffer) * - ROWID_COMPARE_COST_THD(table->in_use); - const double pq_io_cost= table->file->ha_rndpos_time(param->limit_rows); - const double pq_cost= pq_cpu_cost + pq_io_cost; - - if (sort_merge_cost < pq_cost) - DBUG_RETURN(false); - - filesort_info->alloc_sort_buffer(param->max_keys_per_buffer, - param->sort_length + param->ref_length); - - if (filesort_info->sort_buffer_size() > 0) - { - /* Make attached data to be references instead of fields. */ - my_free(filesort_info->addon_fields); - filesort_info->addon_fields= NULL; - param->addon_fields= NULL; - - param->res_length= param->ref_length; - param->sort_length+= param->ref_length; - param->rec_length= param->sort_length; - - DBUG_RETURN(true); - } - } - } - DBUG_RETURN(false); -} - - /** Merge buffers to make < MERGEBUFF2 buffers. */ int merge_many_buff(Sort_param *param, Sort_buffer sort_buffer, @@ -1646,28 +1540,28 @@ int merge_many_buff(Sort_param *param, Sort_buffer sort_buffer, DBUG_ENTER("merge_many_buff"); if (*maxbuffer < MERGEBUFF2) - DBUG_RETURN(0); /* purecov: inspected */ + DBUG_RETURN(0); /* purecov: inspected */ if (flush_io_cache(t_file) || open_cached_file(&t_file2, mysql_tmpdir, TEMP_PREFIX, DISK_CHUNK_SIZE, MYF(MY_WME))) DBUG_RETURN(1); /* purecov: inspected */ - from_file= t_file ; to_file= &t_file2; + from_file= t_file; to_file= &t_file2; while (*maxbuffer >= MERGEBUFF2) { - if (reinit_io_cache(from_file,READ_CACHE,0L,0,0)) + if (reinit_io_cache(from_file, READ_CACHE, 0L, 0, 0)) goto cleanup; - if (reinit_io_cache(to_file,WRITE_CACHE,0L,0,0)) + if (reinit_io_cache(to_file, WRITE_CACHE,0L, 0, 0)) goto cleanup; lastbuff=buffpek; - for (i=0 ; i <= *maxbuffer-MERGEBUFF*3/2 ; i+=MERGEBUFF) + for (i= 0; i <= *maxbuffer - MERGEBUFF * 3 / 2 ; i+= MERGEBUFF) { - if (merge_buffers(param,from_file,to_file,sort_buffer, lastbuff++, - buffpek+i,buffpek+i+MERGEBUFF-1,0)) + if (merge_buffers(param, from_file, to_file, sort_buffer, lastbuff++, + buffpek + i, buffpek + i + MERGEBUFF - 1, 0)) goto cleanup; } - if (merge_buffers(param,from_file,to_file,sort_buffer, lastbuff++, - buffpek+i,buffpek+ *maxbuffer,0)) + if (merge_buffers(param, from_file, to_file, sort_buffer, lastbuff++, + buffpek + i, buffpek + *maxbuffer, 0)) break; /* purecov: inspected */ if (flush_io_cache(to_file)) break; /* purecov: inspected */ @@ -1879,17 +1773,17 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file, first_cmp_arg= param->get_compare_argument(&sort_length); } if (unlikely(init_queue(&queue, (uint) (Tb-Fb)+1, - offsetof(Merge_chunk,m_current_key), 0, + offsetof(Merge_chunk,m_current_key), 0, (queue_compare) cmp, first_cmp_arg, 0, 0))) DBUG_RETURN(1); /* purecov: inspected */ - const size_t chunk_sz = (sort_buffer.size()/((uint) (Tb-Fb) +1)); - for (buffpek= Fb ; buffpek <= Tb ; buffpek++) + const size_t chunk_sz= (sort_buffer.size()/((uint) (Tb-Fb) +1)); + for (buffpek= Fb; buffpek <= Tb; buffpek++) { buffpek->set_buffer(strpos, strpos + chunk_sz); buffpek->set_max_keys(maxcount); bytes_read= read_to_buffer(from_file, buffpek, param, packed_format); if (unlikely(bytes_read == (ulong) -1)) - goto err; /* purecov: inspected */ + goto err; /* purecov: inspected */ strpos+= chunk_sz; // If less data in buffers than expected buffpek->set_max_keys(buffpek->mem_count()); @@ -1971,11 +1865,11 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file, */ if (!check_dupl_count || dupl_count >= min_dupl_count) { - if(my_b_write(to_file, - src + (offset_for_packing ? - rec_length - res_length : // sort length - wr_offset), - bytes_to_write)) + if (my_b_write(to_file, + src + (offset_for_packing ? + rec_length - res_length : // sort length + wr_offset), + bytes_to_write)) goto err; /* purecov: inspected */ } if (cmp) @@ -2210,9 +2104,9 @@ Type_handler_decimal_result::sort_length(THD *thd, is not allowed @note - * sortorder->length and other members are updated for each sort item. - * TODO what is the meaning of this value if some fields are using packing while - others are not? + - sortorder->length and other members are updated for each sort item. + - TODO what is the meaning of this value if some fields are using packing + while others are not? @return Total length of sort buffer in bytes diff --git a/sql/filesort_utils.cc b/sql/filesort_utils.cc index 25359809e92..8449d73769e 100644 --- a/sql/filesort_utils.cc +++ b/sql/filesort_utils.cc @@ -23,21 +23,94 @@ PSI_memory_key key_memory_Filesort_buffer_sort_keys; + +/* + Different ways to do sorting: + Merge Sort -> Without addon Fields, with fixed length + Merge Sort -> Without addon Fields, with dynamic length + Merge Sort -> With addon Fields, with fixed length + Merge Sort -> With addon Fields, with dynamic length + + Priority queue -> Without addon fields + Priority queue -> With addon fields + + With PQ (Priority queue) we could have a simple key (memcmp) or a + complex key (double & varchar for example). This cost difference + is currently not considered. +*/ + + /** - A local helper function. See comments for get_merge_buffers_cost(). + Compute the cost of running qsort over a set of rows. + @param num_rows How many rows will be sorted. + @param with_addon_fields Set to true if the sorted rows include the whole + row (with addon fields) or just the keys themselves. + + @retval + Cost of the operation. */ static -double get_merge_cost(ha_rows num_elements, - ha_rows num_buffers, - uint elem_size, - double key_compare_cost) +double get_qsort_sort_cost(size_t num_rows, bool with_addon_fields) { - return (2.0 * ((double) num_elements * elem_size) / IO_SIZE - + (double) num_elements * log((double) num_buffers) * - key_compare_cost / M_LN2); + const double row_copy_cost= with_addon_fields ? DEFAULT_ROW_COPY_COST : + DEFAULT_KEY_COPY_COST; + const double key_cmp_cost= DEFAULT_KEY_COMPARE_COST; + const double qsort_constant_factor= QSORT_SORT_SLOWNESS_CORRECTION_FACTOR * + (row_copy_cost + key_cmp_cost); + + return qsort_constant_factor * num_rows * log2(1.0 + num_rows); } + +/** + Compute the cost of sorting num_rows and only retrieving queue_size rows. + @param num_rows How many rows will be sorted. + @param queue_size How many rows will be returned by the priority + queue. + @param with_addon_fields Set to true if the sorted rows include the whole + row (with addon fields) or just the keys themselves. + + @retval + Cost of the operation. +*/ + +double get_pq_sort_cost(size_t num_rows, size_t queue_size, + bool with_addon_fields) +{ + const double row_copy_cost= with_addon_fields ? DEFAULT_ROW_COPY_COST : + DEFAULT_KEY_COPY_COST; + const double key_cmp_cost= DEFAULT_KEY_COMPARE_COST; + /* 2 -> 1 insert, 1 pop from the queue*/ + const double pq_sort_constant_factor= PQ_SORT_SLOWNESS_CORRECTION_FACTOR * + 2.0 * (row_copy_cost + key_cmp_cost); + + return pq_sort_constant_factor * num_rows * log2(1.0 + queue_size); +} + + +/** + Compute the cost of merging "num_buffers" sorted buffers using a priority + queue. + + See comments for get_merge_buffers_cost(). +*/ + +static +double get_merge_cost(ha_rows num_elements, ha_rows num_buffers, + size_t elem_size, double compare_cost) +{ + /* 2 -> 1 read + 1 write */ + const double io_cost= (2.0 * (num_elements * elem_size + + DISK_CHUNK_SIZE - 1) / + DISK_CHUNK_SIZE); + /* 2 -> 1 insert, 1 pop for the priority queue used to merge the buffers. */ + const double cpu_cost= (2.0 * num_elements * log2(1.0 + num_buffers) * + compare_cost) * PQ_SORT_SLOWNESS_CORRECTION_FACTOR; + return io_cost + cpu_cost; +} + + /** This is a simplified, and faster version of @see get_merge_many_buffs_cost(). We calculate the cost of merging buffers, by simulating the actions @@ -48,28 +121,36 @@ double get_merge_cost(ha_rows num_elements, double get_merge_many_buffs_cost_fast(ha_rows num_rows, ha_rows num_keys_per_buffer, - uint elem_size, - double key_compare_cost) + size_t elem_size, + double key_compare_cost, + bool with_addon_fields) { + DBUG_ASSERT(num_keys_per_buffer != 0); + ha_rows num_buffers= num_rows / num_keys_per_buffer; ha_rows last_n_elems= num_rows % num_keys_per_buffer; double total_cost; + double full_buffer_sort_cost; - // Calculate CPU cost of sorting buffers. - total_cost= - ((num_buffers * num_keys_per_buffer * log(1.0 + num_keys_per_buffer) + - last_n_elems * log(1.0 + last_n_elems)) * key_compare_cost); + /* Calculate cost for sorting all merge buffers + the last one. */ + full_buffer_sort_cost= get_qsort_sort_cost(num_keys_per_buffer, + with_addon_fields); + total_cost= (num_buffers * full_buffer_sort_cost + + get_qsort_sort_cost(last_n_elems, with_addon_fields)); - // Simulate behavior of merge_many_buff(). + if (num_buffers >= MERGEBUFF2) + total_cost+= TMPFILE_CREATE_COST * 2; // We are creating 2 files. + + /* Simulate behavior of merge_many_buff(). */ while (num_buffers >= MERGEBUFF2) { - // Calculate # of calls to merge_buffers(). - const ha_rows loop_limit= num_buffers - MERGEBUFF*3/2; - const ha_rows num_merge_calls= 1 + loop_limit/MERGEBUFF; + /* Calculate # of calls to merge_buffers(). */ + const ha_rows loop_limit= num_buffers - MERGEBUFF * 3 / 2; + const ha_rows num_merge_calls= 1 + loop_limit / MERGEBUFF; const ha_rows num_remaining_buffs= num_buffers - num_merge_calls * MERGEBUFF; - // Cost of merge sort 'num_merge_calls'. + /* Cost of merge sort 'num_merge_calls'. */ total_cost+= num_merge_calls * get_merge_cost(num_keys_per_buffer * MERGEBUFF, MERGEBUFF, elem_size, @@ -94,6 +175,130 @@ double get_merge_many_buffs_cost_fast(ha_rows num_rows, return total_cost; } + +void Sort_costs::compute_fastest_sort() +{ + lowest_cost= DBL_MAX; + uint min_idx= NO_SORT_POSSIBLE_OUT_OF_MEM; + for (uint i= 0; i < FINAL_SORT_TYPE; i++) + { + if (lowest_cost > costs[i]) + { + min_idx= i; + lowest_cost= costs[i]; + } + } + fastest_sort= static_cast<enum sort_type>(min_idx); +} + + +/* + Calculate cost of using priority queue for filesort. + There are two options: using addon fields or not +*/ + +void Sort_costs::compute_pq_sort_costs(Sort_param *param, ha_rows num_rows, + size_t memory_available, + bool with_addon_fields) +{ + /* + Implementation detail of PQ. To be able to keep a PQ of size N we need + N+1 elements allocated so we can use the last element as "swap" space + for the "insert" operation. + TODO(cvicentiu): This should be left as an implementation detail inside + the PQ, not have the optimizer take it into account. + */ + size_t queue_size= param->limit_rows + 1; + size_t row_length, num_available_keys; + + costs[PQ_SORT_ALL_FIELDS]= DBL_MAX; + costs[PQ_SORT_ORDER_BY_FIELDS]= DBL_MAX; + + /* + We can't use priority queue if there's no limit or the limit is + too big. + */ + if (param->limit_rows == HA_POS_ERROR || + param->limit_rows >= UINT_MAX - 2) + return; + + /* Calculate cost without addon keys (probably using less memory) */ + row_length= param->sort_length + param->ref_length + sizeof(char*); + num_available_keys= memory_available / row_length; + + if (queue_size < num_available_keys) + { + costs[PQ_SORT_ORDER_BY_FIELDS]= + get_pq_sort_cost(num_rows, queue_size, false) + + param->sort_form->file->ha_rndpos_time(MY_MIN(queue_size - 1, num_rows)); + } + + /* Calculate cost with addon fields */ + if (with_addon_fields) + { + row_length= param->rec_length + sizeof(char *); + num_available_keys= memory_available / row_length; + + if (queue_size < num_available_keys) + costs[PQ_SORT_ALL_FIELDS]= get_pq_sort_cost(num_rows, queue_size, true); + } +} + +/* + Calculate cost of using qsort optional merge sort for resolving filesort. + There are two options: using addon fields or not +*/ + +void Sort_costs::compute_merge_sort_costs(Sort_param *param, + ha_rows num_rows, + size_t memory_available, + bool with_addon_fields) +{ + size_t row_length= param->sort_length + param->ref_length + sizeof(char *); + size_t num_available_keys= memory_available / row_length; + + costs[MERGE_SORT_ALL_FIELDS]= DBL_MAX; + costs[MERGE_SORT_ORDER_BY_FIELDS]= DBL_MAX; + + if (num_available_keys) + costs[MERGE_SORT_ORDER_BY_FIELDS]= + get_merge_many_buffs_cost_fast(num_rows, num_available_keys, + row_length, DEFAULT_KEY_COMPARE_COST, + false) + + param->sort_form->file->ha_rndpos_time(MY_MIN(param->limit_rows, + num_rows)); + + if (with_addon_fields) + { + /* Compute cost of merge sort *if* we strip addon fields. */ + row_length= param->rec_length + sizeof(char *); + num_available_keys= memory_available / row_length; + + if (num_available_keys) + costs[MERGE_SORT_ALL_FIELDS]= + get_merge_many_buffs_cost_fast(num_rows, num_available_keys, + row_length, DEFAULT_KEY_COMPARE_COST, + true); + } + + /* + TODO(cvicentiu) we do not handle dynamic length fields yet. + The code should decide here if the format is FIXED length or DYNAMIC + and fill in the appropriate costs. + */ +} + +void Sort_costs::compute_sort_costs(Sort_param *param, ha_rows num_rows, + size_t memory_available, + bool with_addon_fields) +{ + compute_pq_sort_costs(param, num_rows, memory_available, + with_addon_fields); + compute_merge_sort_costs(param, num_rows, memory_available, + with_addon_fields); + compute_fastest_sort(); +} + /* alloc_sort_buffer() diff --git a/sql/filesort_utils.h b/sql/filesort_utils.h index e2a46d930fe..d3684bab770 100644 --- a/sql/filesort_utils.h +++ b/sql/filesort_utils.h @@ -16,9 +16,13 @@ #ifndef FILESORT_UTILS_INCLUDED #define FILESORT_UTILS_INCLUDED +#include "my_global.h" #include "my_base.h" #include "sql_array.h" +#include <utility> + +class TABLE; class Sort_param; /** @@ -42,17 +46,81 @@ class Sort_param; */ double get_merge_many_buffs_cost_fast(ha_rows num_rows, ha_rows num_keys_per_buffer, - uint elem_size, - double key_compare_cost); + size_t elem_size, + double compare_cost, + bool with_addon_fields); + + + +/** + These are the current sorting algorithms we compute cost for: + + PQ_SORT_ALL_FIELDS Sort via priority queue, with addon fields. + PQ_SORT_ORDER_BY_FIELDS Sort via priority queue, without addon fields. + + MERGE_SORT_ALL_FIELDS Sort via merge sort, with addon fields. + MERGE_SORT_ORDER_BY_FIELDS Sort via merge sort, without addon fields. + + Note: + There is the possibility to do merge-sorting with dynamic length fields. + This is more expensive than if there are only fixed length fields, + however we do not (yet) account for that extra cost. We can extend the + cost computation in the future to cover that case as well. + + Effectively there are 4 possible combinations for merge sort: + With/without addon fields + With/without dynamic length fields. +*/ + +enum sort_type +{ + PQ_SORT_ALL_FIELDS= 0, + PQ_SORT_ORDER_BY_FIELDS, + MERGE_SORT_ALL_FIELDS, + MERGE_SORT_ORDER_BY_FIELDS, + + FINAL_SORT_TYPE, + + NO_SORT_POSSIBLE_OUT_OF_MEM, +}; + +struct Sort_costs +{ + Sort_costs() : + fastest_sort(NO_SORT_POSSIBLE_OUT_OF_MEM), lowest_cost(DBL_MAX) {} + void compute_sort_costs(Sort_param *param, ha_rows num_rows, + size_t memory_available, + bool with_addon_fields); + + /* Cache value for fastest_sort. */ + enum sort_type fastest_sort; + /* Cache value for lowest cost. */ + double lowest_cost; +private: + /* + Array to hold all computed costs. + TODO(cvicentiu) This array is only useful for debugging. If it's not + used in debugging code, it can be removed to reduce memory usage. + */ + double costs[FINAL_SORT_TYPE]; + + void compute_pq_sort_costs(Sort_param *param, ha_rows num_rows, + size_t memory_available, + bool with_addon_fields); + void compute_merge_sort_costs(Sort_param *param, ha_rows num_rows, + size_t memory_available, + bool with_addon_fields); + void compute_fastest_sort(); +}; /** A wrapper class around the buffer used by filesort(). The sort buffer is a contiguous chunk of memory, containing both records to be sorted, and pointers to said records: - <start of buffer | still unused | end of buffer> - |rec 0|record 1 |rec 2| ............ |ptr to rec2|ptr to rec1|ptr to rec0| + <start of buffer | still unused | end of buffer> + | rec0 | rec1 | rec2 | ............ |ptr to rec2|ptr to rec1|ptr to rec0| Records will be inserted "left-to-right". Records are not necessarily fixed-size, they can be packed and stored without any "gaps". @@ -284,5 +352,4 @@ private: int compare_packed_sort_keys(void *sort_keys, unsigned char **a, unsigned char **b); qsort2_cmp get_packed_keys_compare_ptr(); - #endif // FILESORT_UTILS_INCLUDED diff --git a/sql/sql_const.h b/sql/sql_const.h index 993c0b45b42..7d79ac85cae 100644 --- a/sql/sql_const.h +++ b/sql/sql_const.h @@ -239,6 +239,20 @@ static inline double cache_hit_ratio(uint ratio) #define ROW_LOOKUP_COST ((double) 1.0) /* + These constants impact the cost of QSORT and priority queue sorting, + scaling the "n * log(n)" operations cost proportionally. + These factors are < 1.0 to scale down the sorting cost to be comparable + to 'read a row' = 1.0, (or 0.55 with default caching). + A factor of 0.1 makes the cost of get_pq_sort_cost(10, 10, false) =0.52 + (Reading 10 rows into a priority queue of 10 elements). + + One consenquence if this factor is too high is that priority_queue will + not use addon fields (to solve the sort without having to do an extra + re-read of rows) even if the number of LIMIT is low. +*/ +#define QSORT_SORT_SLOWNESS_CORRECTION_FACTOR (0.1) +#define PQ_SORT_SLOWNESS_CORRECTION_FACTOR (0.1) +/* Cost of finding and copying keys from the storage engine index cache to an internal cache as part of an index scan. Used in handler::keyread_time() diff --git a/sql/sql_sort.h b/sql/sql_sort.h index 0d6e37471dd..f5e85cfde43 100644 --- a/sql/sql_sort.h +++ b/sql/sql_sort.h @@ -541,11 +541,22 @@ to be fixed later class Sort_param { public: - uint rec_length; // Length of sorted records. - uint sort_length; // Length of sorted columns. + // Length of sorted records. ALWAYS equal to sort_length + addon_length + uint rec_length; + /* + Length of what we need to sort: Sorted columns + ref_length if not + addon fields are used + */ + uint sort_length; + /* Length of the reference to the row (rowid or primary key etc */ uint ref_length; // Length of record ref. + /* Length of all addon fields. 0 if no addon fields */ uint addon_length; // Length of addon_fields - uint res_length; // Length of records in final sorted file/buffer. + /* + The length of the 'result' we are going to return to the caller for + each sort element. Also the length of data in final sorted file/buffer. + */ + uint res_length; uint max_keys_per_buffer; // Max keys / buffer. uint min_dupl_count; ha_rows limit_rows; // Select limit, or HA_POS_ERROR if unlimited. |