summaryrefslogtreecommitdiff
path: root/sql/ha_partition.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/ha_partition.cc')
-rw-r--r--sql/ha_partition.cc94
1 files changed, 47 insertions, 47 deletions
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index e741ef7deb6..e9f4bc4c745 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -1224,9 +1224,9 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
const char *path,
ulonglong *copied,
ulonglong *deleted,
- const void *pack_frm_data
+ const uchar *pack_frm_data
__attribute__((unused)),
- uint pack_frm_len
+ size_t pack_frm_len
__attribute__((unused)))
{
List_iterator<partition_element> part_it(m_part_info->partitions);
@@ -1964,8 +1964,8 @@ bool ha_partition::create_handler_file(const char *name)
if ((file= my_create(file_name, CREATE_MODE, O_RDWR | O_TRUNC,
MYF(MY_WME))) >= 0)
{
- result= my_write(file, (byte *) file_buffer, tot_len_byte,
- MYF(MY_WME | MY_NABP));
+ result= my_write(file, (uchar *) file_buffer, tot_len_byte,
+ MYF(MY_WME | MY_NABP)) != 0;
VOID(my_close(file, MYF(0)));
}
else
@@ -2146,14 +2146,14 @@ bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root)
/* Following could be done with my_stat to read in whole file */
if ((file= my_open(buff, O_RDONLY | O_SHARE, MYF(0))) < 0)
DBUG_RETURN(TRUE);
- if (my_read(file, (byte *) & buff[0], 8, MYF(MY_NABP)))
+ if (my_read(file, (uchar *) & buff[0], 8, MYF(MY_NABP)))
goto err1;
len_words= uint4korr(buff);
len_bytes= 4 * len_words;
- if (!(file_buffer= my_malloc(len_bytes, MYF(0))))
+ if (!(file_buffer= (char*) my_malloc(len_bytes, MYF(0))))
goto err1;
VOID(my_seek(file, 0, MY_SEEK_SET, MYF(0)));
- if (my_read(file, (byte *) file_buffer, len_bytes, MYF(MY_NABP)))
+ if (my_read(file, (uchar *) file_buffer, len_bytes, MYF(MY_NABP)))
goto err2;
chksum= 0;
@@ -2252,7 +2252,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
alloc_len+= table->s->max_key_length;
if (!m_ordered_rec_buffer)
{
- if (!(m_ordered_rec_buffer= (byte*)my_malloc(alloc_len, MYF(MY_WME))))
+ if (!(m_ordered_rec_buffer= (uchar*)my_malloc(alloc_len, MYF(MY_WME))))
{
DBUG_RETURN(1);
}
@@ -2271,7 +2271,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
int2store(ptr, i);
ptr+= m_rec_length + PARTITION_BYTES_IN_POS;
} while (++i < m_tot_parts);
- m_start_key.key= (const byte*)ptr;
+ m_start_key.key= (const uchar*)ptr;
}
}
@@ -2657,13 +2657,13 @@ void ha_partition::unlock_row()
may be used in determining which partition the row should be written to.
*/
-int ha_partition::write_row(byte * buf)
+int ha_partition::write_row(uchar * buf)
{
uint32 part_id;
int error;
longlong func_value;
#ifdef NOT_NEEDED
- byte *rec0= m_rec0;
+ uchar *rec0= m_rec0;
#endif
DBUG_ENTER("ha_partition::write_row");
DBUG_ASSERT(buf == m_rec0);
@@ -2737,7 +2737,7 @@ int ha_partition::write_row(byte * buf)
old_data is normally record[1] but may be anything
*/
-int ha_partition::update_row(const byte *old_data, byte *new_data)
+int ha_partition::update_row(const uchar *old_data, uchar *new_data)
{
uint32 new_part_id, old_part_id;
int error;
@@ -2809,7 +2809,7 @@ int ha_partition::update_row(const byte *old_data, byte *new_data)
buf is either record[0] or record[1]
*/
-int ha_partition::delete_row(const byte *buf)
+int ha_partition::delete_row(const uchar *buf)
{
uint32 part_id;
int error;
@@ -3080,7 +3080,7 @@ int ha_partition::rnd_end()
sql_table.cc, and sql_update.cc.
*/
-int ha_partition::rnd_next(byte *buf)
+int ha_partition::rnd_next(uchar *buf)
{
handler *file;
int result= HA_ERR_END_OF_FILE;
@@ -3173,7 +3173,7 @@ end:
Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
*/
-void ha_partition::position(const byte *record)
+void ha_partition::position(const uchar *record)
{
handler *file= m_file[m_last_part];
DBUG_ENTER("ha_partition::position");
@@ -3213,13 +3213,13 @@ void ha_partition::position(const byte *record)
sql_update.cc.
*/
-int ha_partition::rnd_pos(byte * buf, byte *pos)
+int ha_partition::rnd_pos(uchar * buf, uchar *pos)
{
uint part_id;
handler *file;
DBUG_ENTER("ha_partition::rnd_pos");
- part_id= uint2korr((const byte *) pos);
+ part_id= uint2korr((const uchar *) pos);
DBUG_ASSERT(part_id < m_tot_parts);
file= m_file[part_id];
m_last_part= part_id;
@@ -3351,7 +3351,7 @@ int ha_partition::index_end()
used in conjuntion with multi read ranges.
*/
-int ha_partition::index_read(byte * buf, const byte * key,
+int ha_partition::index_read(uchar * buf, const uchar * key,
key_part_map keypart_map,
enum ha_rkey_function find_flag)
{
@@ -3372,7 +3372,7 @@ int ha_partition::index_read(byte * buf, const byte * key,
see index_read for rest
*/
-int ha_partition::common_index_read(byte *buf, const byte *key,
+int ha_partition::common_index_read(uchar *buf, const uchar *key,
key_part_map keypart_map,
enum ha_rkey_function find_flag)
{
@@ -3449,7 +3449,7 @@ int ha_partition::common_index_read(byte *buf, const byte *key,
and sql_select.cc.
*/
-int ha_partition::index_first(byte * buf)
+int ha_partition::index_first(uchar * buf)
{
DBUG_ENTER("ha_partition::index_first");
@@ -3480,7 +3480,7 @@ int ha_partition::index_first(byte * buf)
and sql_select.cc.
*/
-int ha_partition::index_last(byte * buf)
+int ha_partition::index_last(uchar * buf)
{
DBUG_ENTER("ha_partition::index_last");
@@ -3497,7 +3497,7 @@ int ha_partition::index_last(byte * buf)
see index_first for rest
*/
-int ha_partition::common_first_last(byte *buf)
+int ha_partition::common_first_last(uchar *buf)
{
int error;
@@ -3528,7 +3528,7 @@ int ha_partition::common_first_last(byte *buf)
Can only be used on indexes supporting HA_READ_ORDER
*/
-int ha_partition::index_read_last(byte *buf, const byte *key,
+int ha_partition::index_read_last(uchar *buf, const uchar *key,
key_part_map keypart_map)
{
DBUG_ENTER("ha_partition::index_read_last");
@@ -3555,7 +3555,7 @@ int ha_partition::index_read_last(byte *buf, const byte *key,
Used to read forward through the index.
*/
-int ha_partition::index_next(byte * buf)
+int ha_partition::index_next(uchar * buf)
{
DBUG_ENTER("ha_partition::index_next");
@@ -3591,7 +3591,7 @@ int ha_partition::index_next(byte * buf)
as supplied in the call.
*/
-int ha_partition::index_next_same(byte *buf, const byte *key, uint keylen)
+int ha_partition::index_next_same(uchar *buf, const uchar *key, uint keylen)
{
DBUG_ENTER("ha_partition::index_next_same");
@@ -3618,7 +3618,7 @@ int ha_partition::index_next_same(byte *buf, const byte *key, uint keylen)
Used to read backwards through the index.
*/
-int ha_partition::index_prev(byte * buf)
+int ha_partition::index_prev(uchar * buf)
{
DBUG_ENTER("ha_partition::index_prev");
@@ -3727,7 +3727,7 @@ int ha_partition::read_range_next()
of them
*/
-int ha_partition::partition_scan_set_up(byte * buf, bool idx_read_flag)
+int ha_partition::partition_scan_set_up(uchar * buf, bool idx_read_flag)
{
DBUG_ENTER("ha_partition::partition_scan_set_up");
@@ -3808,7 +3808,7 @@ int ha_partition::partition_scan_set_up(byte * buf, bool idx_read_flag)
perform any sort.
*/
-int ha_partition::handle_unordered_next(byte *buf, bool is_next_same)
+int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
{
handler *file= file= m_file[m_part_spec.start_part];
int error;
@@ -3863,7 +3863,7 @@ int ha_partition::handle_unordered_next(byte *buf, bool is_next_same)
Both initial start and after completing scan on one partition.
*/
-int ha_partition::handle_unordered_scan_next_partition(byte * buf)
+int ha_partition::handle_unordered_scan_next_partition(uchar * buf)
{
uint i;
DBUG_ENTER("ha_partition::handle_unordered_scan_next_partition");
@@ -3950,7 +3950,7 @@ int ha_partition::handle_unordered_scan_next_partition(byte * buf)
entries.
*/
-int ha_partition::handle_ordered_index_scan(byte *buf, bool reverse_order)
+int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
{
uint i;
uint j= 0;
@@ -3965,7 +3965,7 @@ int ha_partition::handle_ordered_index_scan(byte *buf, bool reverse_order)
{
if (!(bitmap_is_set(&(m_part_info->used_partitions), i)))
continue;
- byte *rec_buf_ptr= rec_buf(i);
+ uchar *rec_buf_ptr= rec_buf(i);
int error;
handler *file= m_file[i];
@@ -4000,7 +4000,7 @@ int ha_partition::handle_ordered_index_scan(byte *buf, bool reverse_order)
/*
Initialise queue without order first, simply insert
*/
- queue_element(&m_queue, j++)= (byte*)queue_buf(i);
+ queue_element(&m_queue, j++)= (uchar*)queue_buf(i);
}
else if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
{
@@ -4037,11 +4037,11 @@ int ha_partition::handle_ordered_index_scan(byte *buf, bool reverse_order)
NONE
*/
-void ha_partition::return_top_record(byte *buf)
+void ha_partition::return_top_record(uchar *buf)
{
uint part_id;
- byte *key_buffer= queue_top(&m_queue);
- byte *rec_buffer= key_buffer + PARTITION_BYTES_IN_POS;
+ uchar *key_buffer= queue_top(&m_queue);
+ uchar *rec_buffer= key_buffer + PARTITION_BYTES_IN_POS;
part_id= uint2korr(key_buffer);
memcpy(buf, rec_buffer, m_rec_length);
@@ -4064,7 +4064,7 @@ void ha_partition::return_top_record(byte *buf)
other Error code
*/
-int ha_partition::handle_ordered_next(byte *buf, bool is_next_same)
+int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same)
{
int error;
uint part_id= m_top_entry;
@@ -4113,7 +4113,7 @@ int ha_partition::handle_ordered_next(byte *buf, bool is_next_same)
other Error code
*/
-int ha_partition::handle_ordered_prev(byte *buf)
+int ha_partition::handle_ordered_prev(uchar *buf)
{
int error;
uint part_id= m_top_entry;
@@ -5358,7 +5358,7 @@ uint ha_partition::min_record_length(uint options) const
they are the same. Sort in partition id order if not equal.
*/
-int ha_partition::cmp_ref(const byte *ref1, const byte *ref2)
+int ha_partition::cmp_ref(const uchar *ref1, const uchar *ref2)
{
uint part_id;
my_ptrdiff_t diff1, diff2;
@@ -5567,11 +5567,11 @@ static int partition_init= 0;
Function we use in the creation of our hash to get key.
*/
-static byte *partition_get_key(PARTITION_SHARE *share, uint *length,
+static uchar *partition_get_key(PARTITION_SHARE *share, size_t *length,
my_bool not_used __attribute__ ((unused)))
{
*length= share->table_name_length;
- return (byte *) share->table_name;
+ return (uchar *) share->table_name;
}
/*
@@ -5611,12 +5611,12 @@ static PARTITION_SHARE *get_share(const char *table_name, TABLE *table)
length= (uint) strlen(table_name);
if (!(share= (PARTITION_SHARE *) hash_search(&partition_open_tables,
- (byte *) table_name, length)))
+ (uchar *) table_name, length)))
{
if (!(share= (PARTITION_SHARE *)
my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
- &share, sizeof(*share),
- &tmp_name, length + 1, NullS)))
+ &share, (uint) sizeof(*share),
+ &tmp_name, (uint) length + 1, NullS)))
{
pthread_mutex_unlock(&partition_mutex);
return NULL;
@@ -5626,7 +5626,7 @@ static PARTITION_SHARE *get_share(const char *table_name, TABLE *table)
share->table_name_length= length;
share->table_name= tmp_name;
strmov(share->table_name, table_name);
- if (my_hash_insert(&partition_open_tables, (byte *) share))
+ if (my_hash_insert(&partition_open_tables, (uchar *) share))
goto error;
thr_lock_init(&share->lock);
pthread_mutex_init(&share->mutex, MY_MUTEX_INIT_FAST);
@@ -5638,7 +5638,7 @@ static PARTITION_SHARE *get_share(const char *table_name, TABLE *table)
error:
pthread_mutex_unlock(&partition_mutex);
- my_free((gptr) share, MYF(0));
+ my_free((uchar*) share, MYF(0));
return NULL;
}
@@ -5655,10 +5655,10 @@ static int free_share(PARTITION_SHARE *share)
pthread_mutex_lock(&partition_mutex);
if (!--share->use_count)
{
- hash_delete(&partition_open_tables, (byte *) share);
+ hash_delete(&partition_open_tables, (uchar *) share);
thr_lock_delete(&share->lock);
pthread_mutex_destroy(&share->mutex);
- my_free((gptr) share, MYF(0));
+ my_free((uchar*) share, MYF(0));
}
pthread_mutex_unlock(&partition_mutex);