summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Widenius <monty@mariadb.org>2018-04-12 09:28:29 +0300
committerMonty <monty@mariadb.org>2018-05-07 00:07:33 +0300
commita0bc3b7eeef6a3bfd0e7eae1cceabcc73071a61a (patch)
tree5ee70953aa35c648bbc0c7c9f252bc23d7df1acd
parent062a3176e7d985540e1051a35f15c313c0296b47 (diff)
downloadmariadb-git-a0bc3b7eeef6a3bfd0e7eae1cceabcc73071a61a.tar.gz
Change read_to_buffer to use ulong instead of uint
This is mostly to document that read_to_buffer can read more than 65K. Also changed merge_buffers to return bool instead of int
-rw-r--r--sql/filesort.cc92
-rw-r--r--sql/sql_sort.h12
-rw-r--r--sql/uniques.cc10
3 files changed, 54 insertions, 60 deletions
diff --git a/sql/filesort.cc b/sql/filesort.cc
index e37c36da680..1f0f7c0f577 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -1508,21 +1508,21 @@ cleanup:
Read data to buffer.
@retval Number of bytes read
- (uint)-1 if something goes wrong
+ (ulong)-1 if something goes wrong
*/
-uint read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek,
- uint rec_length)
+ulong read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek,
+ uint rec_length)
{
- uint count;
- uint length= 0;
+ register ulong count;
+ ulong length= 0;
- if ((count=(uint) MY_MIN((ha_rows) buffpek->max_keys,buffpek->count)))
+ if ((count= (ulong) MY_MIN((ha_rows) buffpek->max_keys,buffpek->count)))
{
length= rec_length*count;
if (unlikely(my_b_pread(fromfile, (uchar*) buffpek->base, length,
buffpek->file_pos)))
- return ((uint) -1);
+ return ((ulong) -1);
buffpek->key=buffpek->base;
buffpek->file_pos+= length; /* New filepos */
buffpek->count-= count;
@@ -1582,18 +1582,18 @@ void reuse_freed_buff(QUEUE *queue, BUFFPEK *reuse, uint key_length)
@retval
0 OK
@retval
- other error
+ 1 ERROR
*/
-int merge_buffers(Sort_param *param, IO_CACHE *from_file,
- IO_CACHE *to_file, uchar *sort_buffer,
- BUFFPEK *lastbuff, BUFFPEK *Fb, BUFFPEK *Tb,
- int flag)
+bool merge_buffers(Sort_param *param, IO_CACHE *from_file,
+ IO_CACHE *to_file, uchar *sort_buffer,
+ BUFFPEK *lastbuff, BUFFPEK *Fb, BUFFPEK *Tb,
+ int flag)
{
- int error;
+ bool error= 0;
uint rec_length,res_length,offset;
size_t sort_length;
- ulong maxcount;
+ ulong maxcount, bytes_read;
ha_rows max_rows,org_max_rows;
my_off_t to_start_filepos;
uchar *strpos;
@@ -1611,7 +1611,6 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
thd->inc_status_sort_merge_passes();
thd->query_plan_fsort_passes++;
- error=0;
rec_length= param->rec_length;
res_length= param->res_length;
sort_length= param->sort_length;
@@ -1639,18 +1638,18 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
cmp= get_ptr_compare(sort_length);
first_cmp_arg= (void*) &sort_length;
}
- if (init_queue(&queue, (uint) (Tb-Fb)+1, offsetof(BUFFPEK,key), 0,
- (queue_compare) cmp, first_cmp_arg, 0, 0))
+ if (unlikely(init_queue(&queue, (uint) (Tb-Fb)+1, offsetof(BUFFPEK,key), 0,
+ (queue_compare) cmp, first_cmp_arg, 0, 0)))
DBUG_RETURN(1); /* purecov: inspected */
for (buffpek= Fb ; buffpek <= Tb ; buffpek++)
{
buffpek->base= strpos;
buffpek->max_keys= maxcount;
- strpos+=
- (uint) (error= (int) read_to_buffer(from_file, buffpek, rec_length));
-
- if (unlikely(error == -1))
+ bytes_read= read_to_buffer(from_file, buffpek, rec_length);
+ if (unlikely(bytes_read == (ulong) -1))
goto err; /* purecov: inspected */
+
+ strpos+= bytes_read;
buffpek->max_keys= buffpek->mem_count; // If less data in buffers than expected
queue_insert(&queue, (uchar*) buffpek);
}
@@ -1670,13 +1669,13 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
buffpek->key+= rec_length;
if (! --buffpek->mem_count)
{
- if (unlikely(!(error= (int) read_to_buffer(from_file, buffpek,
- rec_length))))
+ if (unlikely(!(bytes_read= read_to_buffer(from_file, buffpek,
+ rec_length))))
{
(void) queue_remove_top(&queue);
reuse_freed_buff(&queue, buffpek, rec_length);
}
- else if (unlikely(error == -1))
+ else if (unlikely(bytes_read == (ulong) -1))
goto err; /* purecov: inspected */
}
queue_replace_top(&queue); // Top element has been used
@@ -1687,9 +1686,8 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
while (queue.elements > 1)
{
if (killable && unlikely(thd->check_killed()))
- {
- error= 1; goto err; /* purecov: inspected */
- }
+ goto err; /* purecov: inspected */
+
for (;;)
{
buffpek= (BUFFPEK*) queue_top(&queue);
@@ -1726,9 +1724,7 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
if (!check_dupl_count || dupl_count >= min_dupl_count)
{
if (my_b_write(to_file, src+wr_offset, wr_len))
- {
- error=1; goto err; /* purecov: inspected */
- }
+ goto err; /* purecov: inspected */
}
if (cmp)
{
@@ -1739,7 +1735,7 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
}
if (!--max_rows)
{
- error= 0; /* purecov: inspected */
+ /* Nothing more to do */
goto end; /* purecov: inspected */
}
@@ -1747,14 +1743,14 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
buffpek->key+= rec_length;
if (! --buffpek->mem_count)
{
- if (unlikely(!(error= (int) read_to_buffer(from_file, buffpek,
- rec_length))))
+ if (unlikely(!(bytes_read= read_to_buffer(from_file, buffpek,
+ rec_length))))
{
(void) queue_remove_top(&queue);
reuse_freed_buff(&queue, buffpek, rec_length);
break; /* One buffer have been removed */
}
- else if (error == -1)
+ else if (unlikely(bytes_read == (ulong) -1))
goto err; /* purecov: inspected */
}
queue_replace_top(&queue); /* Top element has been replaced */
@@ -1790,14 +1786,9 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
{
src= unique_buff;
if (my_b_write(to_file, src+wr_offset, wr_len))
- {
- error=1; goto err; /* purecov: inspected */
- }
+ goto err; /* purecov: inspected */
if (!--max_rows)
- {
- error= 0;
goto end;
- }
}
}
@@ -1813,9 +1804,7 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
{
if (my_b_write(to_file, (uchar*) buffpek->key,
(size_t)(rec_length*buffpek->mem_count)))
- {
- error= 1; goto err; /* purecov: inspected */
- }
+ goto err; /* purecov: inspected */
}
else
{
@@ -1832,21 +1821,26 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
continue;
}
if (my_b_write(to_file, src, wr_len))
- {
- error=1; goto err;
- }
+ goto err;
}
}
}
- while (likely((error=(int) read_to_buffer(from_file, buffpek, rec_length))
- != -1 && error != 0));
+ while (likely(!(error=
+ (bytes_read= read_to_buffer(from_file, buffpek,
+ rec_length)) == (ulong) -1)) &&
+ bytes_read != 0);
end:
lastbuff->count= MY_MIN(org_max_rows-max_rows, param->max_rows);
lastbuff->file_pos= to_start_filepos;
-err:
+cleanup:
delete_queue(&queue);
DBUG_RETURN(error);
+
+err:
+ error= 1;
+ goto cleanup;
+
} /* merge_buffers */
diff --git a/sql/sql_sort.h b/sql/sql_sort.h
index d57239671a8..c29bf1440c9 100644
--- a/sql/sql_sort.h
+++ b/sql/sql_sort.h
@@ -100,12 +100,12 @@ public:
int merge_many_buff(Sort_param *param, uchar *sort_buffer,
BUFFPEK *buffpek,
uint *maxbuffer, IO_CACHE *t_file);
-uint read_to_buffer(IO_CACHE *fromfile,BUFFPEK *buffpek,
- uint sort_length);
-int merge_buffers(Sort_param *param,IO_CACHE *from_file,
- IO_CACHE *to_file, uchar *sort_buffer,
- BUFFPEK *lastbuff,BUFFPEK *Fb,
- BUFFPEK *Tb,int flag);
+ulong read_to_buffer(IO_CACHE *fromfile,BUFFPEK *buffpek,
+ uint sort_length);
+bool merge_buffers(Sort_param *param,IO_CACHE *from_file,
+ IO_CACHE *to_file, uchar *sort_buffer,
+ BUFFPEK *lastbuff,BUFFPEK *Fb,
+ BUFFPEK *Tb,int flag);
int merge_index(Sort_param *param, uchar *sort_buffer,
BUFFPEK *buffpek, uint maxbuffer,
IO_CACHE *tempfile, IO_CACHE *outfile);
diff --git a/sql/uniques.cc b/sql/uniques.cc
index 00112d714c7..6bc870133ff 100644
--- a/sql/uniques.cc
+++ b/sql/uniques.cc
@@ -509,7 +509,7 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size,
key_length);
/* if piece_size is aligned reuse_freed_buffer will always hit */
uint piece_size= max_key_count_per_piece * key_length;
- uint bytes_read; /* to hold return value of read_to_buffer */
+ ulong bytes_read; /* to hold return value of read_to_buffer */
BUFFPEK *top;
int res= 1;
uint cnt_ofs= key_length - (with_counters ? sizeof(element_count) : 0);
@@ -525,7 +525,7 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size,
top->base= merge_buffer + (top - begin) * piece_size;
top->max_keys= max_key_count_per_piece;
bytes_read= read_to_buffer(file, top, key_length);
- if (unlikely(bytes_read == (uint) (-1)))
+ if (unlikely(bytes_read == (ulong) -1))
goto end;
DBUG_ASSERT(bytes_read);
queue_insert(&queue, (uchar *) top);
@@ -554,9 +554,9 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size,
memcpy(save_key_buff, old_key, key_length);
old_key= save_key_buff;
bytes_read= read_to_buffer(file, top, key_length);
- if (unlikely(bytes_read == (uint) (-1)))
+ if (unlikely(bytes_read == (ulong) -1))
goto end;
- else if (bytes_read > 0) /* top->key, top->mem_count are reset */
+ else if (bytes_read) /* top->key, top->mem_count are reset */
queue_replace_top(&queue); /* in read_to_buffer */
else
{
@@ -602,7 +602,7 @@ static bool merge_walk(uchar *merge_buffer, size_t merge_buffer_size,
}
while (--top->mem_count);
bytes_read= read_to_buffer(file, top, key_length);
- if (unlikely(bytes_read == (uint) (-1)))
+ if (unlikely(bytes_read == (ulong) -1))
goto end;
}
while (bytes_read);