summaryrefslogtreecommitdiff
path: root/sql/ha_partition.cc
diff options
context:
space:
mode:
authormonty@mysql.com/narttu.mysql.fi <>2007-05-10 12:59:39 +0300
committermonty@mysql.com/narttu.mysql.fi <>2007-05-10 12:59:39 +0300
commit088e2395f1833f16c2ea3f7405f604165b4aa2cc (patch)
tree6480cbef09e9dec2fa347b1899963ab3658d692f /sql/ha_partition.cc
parent9078e630c64a313301cd13ce71d0854fbcf2fd0b (diff)
downloadmariadb-git-088e2395f1833f16c2ea3f7405f604165b4aa2cc.tar.gz
WL#3817: Simplify string / memory area types and make things more consistent (first part)
The following type conversions was done: - Changed byte to uchar - Changed gptr to uchar* - Change my_string to char * - Change my_size_t to size_t - Change size_s to size_t Removed declaration of byte, gptr, my_string, my_size_t and size_s. Following function parameter changes was done: - All string functions in mysys/strings was changed to use size_t instead of uint for string lengths. - All read()/write() functions changed to use size_t (including vio). - All protocoll functions changed to use size_t instead of uint - Functions that used a pointer to a string length was changed to use size_t* - Changed malloc(), free() and related functions from using gptr to use void * as this requires fewer casts in the code and is more in line with how the standard functions work. - Added extra length argument to dirname_part() to return the length of the created string. - Changed (at least) following functions to take uchar* as argument: - db_dump() - my_net_write() - net_write_command() - net_store_data() - DBUG_DUMP() - decimal2bin() & bin2decimal() - Changed my_compress() and my_uncompress() to use size_t. Changed one argument to my_uncompress() from a pointer to a value as we only return one value (makes function easier to use). - Changed type of 'pack_data' argument to packfrm() to avoid casts. - Changed in readfrm() and writefrom(), ha_discover and handler::discover() the type for argument 'frmdata' to uchar** to avoid casts. - Changed most Field functions to use uchar* instead of char* (reduced a lot of casts). - Changed field->val_xxx(xxx, new_ptr) to take const pointers. Other changes: - Removed a lot of not needed casts - Added a few new cast required by other changes - Added some cast to my_multi_malloc() arguments for safety (as string lengths needs to be uint, not size_t). - Fixed all calls to hash-get-key functions to use size_t*. (Needed to be done explicitely as this conflict was often hided by casting the function to hash_get_key). - Changed some buffers to memory regions to uchar* to avoid casts. - Changed some string lengths from uint to size_t. - Changed field->ptr to be uchar* instead of char*. This allowed us to get rid of a lot of casts. - Some changes from true -> TRUE, false -> FALSE, unsigned char -> uchar - Include zlib.h in some files as we needed declaration of crc32() - Changed MY_FILE_ERROR to be (size_t) -1. - Changed many variables to hold the result of my_read() / my_write() to be size_t. This was needed to properly detect errors (which are returned as (size_t) -1). - Removed some very old VMS code - Changed packfrm()/unpackfrm() to not be depending on uint size (portability fix) - Removed windows specific code to restore cursor position as this causes slowdown on windows and we should not mix read() and pread() calls anyway as this is not thread safe. Updated function comment to reflect this. Changed function that depended on original behavior of my_pwrite() to itself restore the cursor position (one such case). - Added some missing checking of return value of malloc(). - Changed definition of MOD_PAD_CHAR_TO_FULL_LENGTH to avoid 'long' overflow. - Changed type of table_def::m_size from my_size_t to ulong to reflect that m_size is the number of elements in the array, not a string/memory length. - Moved THD::max_row_length() to table.cc (as it's not depending on THD). Inlined max_row_length_blob() into this function. - More function comments - Fixed some compiler warnings when compiled without partitions. - Removed setting of LEX_STRING() arguments in declaration (portability fix). - Some trivial indentation/variable name changes. - Some trivial code simplifications: - Replaced some calls to alloc_root + memcpy to use strmake_root()/strdup_root(). - Changed some calls from memdup() to strmake() (Safety fix) - Simpler loops in client-simple.c
Diffstat (limited to 'sql/ha_partition.cc')
-rw-r--r--sql/ha_partition.cc92
1 files changed, 46 insertions, 46 deletions
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index d3979fa0718..5393e8499b0 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -1224,9 +1224,9 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
const char *path,
ulonglong *copied,
ulonglong *deleted,
- const void *pack_frm_data
+ const uchar *pack_frm_data
__attribute__((unused)),
- uint pack_frm_len
+ size_t pack_frm_len
__attribute__((unused)))
{
List_iterator<partition_element> part_it(m_part_info->partitions);
@@ -1964,7 +1964,7 @@ bool ha_partition::create_handler_file(const char *name)
if ((file= my_create(file_name, CREATE_MODE, O_RDWR | O_TRUNC,
MYF(MY_WME))) >= 0)
{
- result= my_write(file, (byte *) file_buffer, tot_len_byte,
+ result= my_write(file, (uchar *) file_buffer, tot_len_byte,
MYF(MY_WME | MY_NABP));
VOID(my_close(file, MYF(0)));
}
@@ -2141,14 +2141,14 @@ bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root)
/* Following could be done with my_stat to read in whole file */
if ((file= my_open(buff, O_RDONLY | O_SHARE, MYF(0))) < 0)
DBUG_RETURN(TRUE);
- if (my_read(file, (byte *) & buff[0], 8, MYF(MY_NABP)))
+ if (my_read(file, (uchar *) & buff[0], 8, MYF(MY_NABP)))
goto err1;
len_words= uint4korr(buff);
len_bytes= 4 * len_words;
- if (!(file_buffer= my_malloc(len_bytes, MYF(0))))
+ if (!(file_buffer= (char*) my_malloc(len_bytes, MYF(0))))
goto err1;
VOID(my_seek(file, 0, MY_SEEK_SET, MYF(0)));
- if (my_read(file, (byte *) file_buffer, len_bytes, MYF(MY_NABP)))
+ if (my_read(file, (uchar *) file_buffer, len_bytes, MYF(MY_NABP)))
goto err2;
chksum= 0;
@@ -2237,7 +2237,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
alloc_len+= table->s->max_key_length;
if (!m_ordered_rec_buffer)
{
- if (!(m_ordered_rec_buffer= (byte*)my_malloc(alloc_len, MYF(MY_WME))))
+ if (!(m_ordered_rec_buffer= (uchar*)my_malloc(alloc_len, MYF(MY_WME))))
{
DBUG_RETURN(1);
}
@@ -2256,7 +2256,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
int2store(ptr, i);
ptr+= m_rec_length + PARTITION_BYTES_IN_POS;
} while (++i < m_tot_parts);
- m_start_key.key= (const byte*)ptr;
+ m_start_key.key= (const uchar*)ptr;
}
}
@@ -2641,13 +2641,13 @@ void ha_partition::unlock_row()
may be used in determining which partition the row should be written to.
*/
-int ha_partition::write_row(byte * buf)
+int ha_partition::write_row(uchar * buf)
{
uint32 part_id;
int error;
longlong func_value;
#ifdef NOT_NEEDED
- byte *rec0= m_rec0;
+ uchar *rec0= m_rec0;
#endif
DBUG_ENTER("ha_partition::write_row");
DBUG_ASSERT(buf == m_rec0);
@@ -2721,7 +2721,7 @@ int ha_partition::write_row(byte * buf)
old_data is normally record[1] but may be anything
*/
-int ha_partition::update_row(const byte *old_data, byte *new_data)
+int ha_partition::update_row(const uchar *old_data, uchar *new_data)
{
uint32 new_part_id, old_part_id;
int error;
@@ -2793,7 +2793,7 @@ int ha_partition::update_row(const byte *old_data, byte *new_data)
buf is either record[0] or record[1]
*/
-int ha_partition::delete_row(const byte *buf)
+int ha_partition::delete_row(const uchar *buf)
{
uint32 part_id;
int error;
@@ -3064,7 +3064,7 @@ int ha_partition::rnd_end()
sql_table.cc, and sql_update.cc.
*/
-int ha_partition::rnd_next(byte *buf)
+int ha_partition::rnd_next(uchar *buf)
{
handler *file;
int result= HA_ERR_END_OF_FILE;
@@ -3157,7 +3157,7 @@ end:
Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
*/
-void ha_partition::position(const byte *record)
+void ha_partition::position(const uchar *record)
{
handler *file= m_file[m_last_part];
DBUG_ENTER("ha_partition::position");
@@ -3197,13 +3197,13 @@ void ha_partition::position(const byte *record)
sql_update.cc.
*/
-int ha_partition::rnd_pos(byte * buf, byte *pos)
+int ha_partition::rnd_pos(uchar * buf, uchar *pos)
{
uint part_id;
handler *file;
DBUG_ENTER("ha_partition::rnd_pos");
- part_id= uint2korr((const byte *) pos);
+ part_id= uint2korr((const uchar *) pos);
DBUG_ASSERT(part_id < m_tot_parts);
file= m_file[part_id];
m_last_part= part_id;
@@ -3335,7 +3335,7 @@ int ha_partition::index_end()
used in conjuntion with multi read ranges.
*/
-int ha_partition::index_read(byte * buf, const byte * key,
+int ha_partition::index_read(uchar * buf, const uchar * key,
key_part_map keypart_map,
enum ha_rkey_function find_flag)
{
@@ -3356,7 +3356,7 @@ int ha_partition::index_read(byte * buf, const byte * key,
see index_read for rest
*/
-int ha_partition::common_index_read(byte *buf, const byte *key,
+int ha_partition::common_index_read(uchar *buf, const uchar *key,
key_part_map keypart_map,
enum ha_rkey_function find_flag)
{
@@ -3433,7 +3433,7 @@ int ha_partition::common_index_read(byte *buf, const byte *key,
and sql_select.cc.
*/
-int ha_partition::index_first(byte * buf)
+int ha_partition::index_first(uchar * buf)
{
DBUG_ENTER("ha_partition::index_first");
@@ -3464,7 +3464,7 @@ int ha_partition::index_first(byte * buf)
and sql_select.cc.
*/
-int ha_partition::index_last(byte * buf)
+int ha_partition::index_last(uchar * buf)
{
DBUG_ENTER("ha_partition::index_last");
@@ -3481,7 +3481,7 @@ int ha_partition::index_last(byte * buf)
see index_first for rest
*/
-int ha_partition::common_first_last(byte *buf)
+int ha_partition::common_first_last(uchar *buf)
{
int error;
@@ -3512,7 +3512,7 @@ int ha_partition::common_first_last(byte *buf)
Can only be used on indexes supporting HA_READ_ORDER
*/
-int ha_partition::index_read_last(byte *buf, const byte *key,
+int ha_partition::index_read_last(uchar *buf, const uchar *key,
key_part_map keypart_map)
{
DBUG_ENTER("ha_partition::index_read_last");
@@ -3539,7 +3539,7 @@ int ha_partition::index_read_last(byte *buf, const byte *key,
Used to read forward through the index.
*/
-int ha_partition::index_next(byte * buf)
+int ha_partition::index_next(uchar * buf)
{
DBUG_ENTER("ha_partition::index_next");
@@ -3575,7 +3575,7 @@ int ha_partition::index_next(byte * buf)
as supplied in the call.
*/
-int ha_partition::index_next_same(byte *buf, const byte *key, uint keylen)
+int ha_partition::index_next_same(uchar *buf, const uchar *key, uint keylen)
{
DBUG_ENTER("ha_partition::index_next_same");
@@ -3602,7 +3602,7 @@ int ha_partition::index_next_same(byte *buf, const byte *key, uint keylen)
Used to read backwards through the index.
*/
-int ha_partition::index_prev(byte * buf)
+int ha_partition::index_prev(uchar * buf)
{
DBUG_ENTER("ha_partition::index_prev");
@@ -3711,7 +3711,7 @@ int ha_partition::read_range_next()
of them
*/
-int ha_partition::partition_scan_set_up(byte * buf, bool idx_read_flag)
+int ha_partition::partition_scan_set_up(uchar * buf, bool idx_read_flag)
{
DBUG_ENTER("ha_partition::partition_scan_set_up");
@@ -3792,7 +3792,7 @@ int ha_partition::partition_scan_set_up(byte * buf, bool idx_read_flag)
perform any sort.
*/
-int ha_partition::handle_unordered_next(byte *buf, bool is_next_same)
+int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
{
handler *file= file= m_file[m_part_spec.start_part];
int error;
@@ -3847,7 +3847,7 @@ int ha_partition::handle_unordered_next(byte *buf, bool is_next_same)
Both initial start and after completing scan on one partition.
*/
-int ha_partition::handle_unordered_scan_next_partition(byte * buf)
+int ha_partition::handle_unordered_scan_next_partition(uchar * buf)
{
uint i;
DBUG_ENTER("ha_partition::handle_unordered_scan_next_partition");
@@ -3934,7 +3934,7 @@ int ha_partition::handle_unordered_scan_next_partition(byte * buf)
entries.
*/
-int ha_partition::handle_ordered_index_scan(byte *buf, bool reverse_order)
+int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
{
uint i;
uint j= 0;
@@ -3949,7 +3949,7 @@ int ha_partition::handle_ordered_index_scan(byte *buf, bool reverse_order)
{
if (!(bitmap_is_set(&(m_part_info->used_partitions), i)))
continue;
- byte *rec_buf_ptr= rec_buf(i);
+ uchar *rec_buf_ptr= rec_buf(i);
int error;
handler *file= m_file[i];
@@ -3984,7 +3984,7 @@ int ha_partition::handle_ordered_index_scan(byte *buf, bool reverse_order)
/*
Initialise queue without order first, simply insert
*/
- queue_element(&m_queue, j++)= (byte*)queue_buf(i);
+ queue_element(&m_queue, j++)= (uchar*)queue_buf(i);
}
else if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
{
@@ -4021,11 +4021,11 @@ int ha_partition::handle_ordered_index_scan(byte *buf, bool reverse_order)
NONE
*/
-void ha_partition::return_top_record(byte *buf)
+void ha_partition::return_top_record(uchar *buf)
{
uint part_id;
- byte *key_buffer= queue_top(&m_queue);
- byte *rec_buffer= key_buffer + PARTITION_BYTES_IN_POS;
+ uchar *key_buffer= queue_top(&m_queue);
+ uchar *rec_buffer= key_buffer + PARTITION_BYTES_IN_POS;
part_id= uint2korr(key_buffer);
memcpy(buf, rec_buffer, m_rec_length);
@@ -4048,7 +4048,7 @@ void ha_partition::return_top_record(byte *buf)
other Error code
*/
-int ha_partition::handle_ordered_next(byte *buf, bool is_next_same)
+int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same)
{
int error;
uint part_id= m_top_entry;
@@ -4097,7 +4097,7 @@ int ha_partition::handle_ordered_next(byte *buf, bool is_next_same)
other Error code
*/
-int ha_partition::handle_ordered_prev(byte *buf)
+int ha_partition::handle_ordered_prev(uchar *buf)
{
int error;
uint part_id= m_top_entry;
@@ -5342,7 +5342,7 @@ uint ha_partition::min_record_length(uint options) const
they are the same. Sort in partition id order if not equal.
*/
-int ha_partition::cmp_ref(const byte *ref1, const byte *ref2)
+int ha_partition::cmp_ref(const uchar *ref1, const uchar *ref2)
{
uint part_id;
my_ptrdiff_t diff1, diff2;
@@ -5551,11 +5551,11 @@ static int partition_init= 0;
Function we use in the creation of our hash to get key.
*/
-static byte *partition_get_key(PARTITION_SHARE *share, uint *length,
+static uchar *partition_get_key(PARTITION_SHARE *share, size_t *length,
my_bool not_used __attribute__ ((unused)))
{
*length= share->table_name_length;
- return (byte *) share->table_name;
+ return (uchar *) share->table_name;
}
/*
@@ -5595,12 +5595,12 @@ static PARTITION_SHARE *get_share(const char *table_name, TABLE *table)
length= (uint) strlen(table_name);
if (!(share= (PARTITION_SHARE *) hash_search(&partition_open_tables,
- (byte *) table_name, length)))
+ (uchar *) table_name, length)))
{
if (!(share= (PARTITION_SHARE *)
my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
- &share, sizeof(*share),
- &tmp_name, length + 1, NullS)))
+ &share, (uint) sizeof(*share),
+ &tmp_name, (uint) length + 1, NullS)))
{
pthread_mutex_unlock(&partition_mutex);
return NULL;
@@ -5610,7 +5610,7 @@ static PARTITION_SHARE *get_share(const char *table_name, TABLE *table)
share->table_name_length= length;
share->table_name= tmp_name;
strmov(share->table_name, table_name);
- if (my_hash_insert(&partition_open_tables, (byte *) share))
+ if (my_hash_insert(&partition_open_tables, (uchar *) share))
goto error;
thr_lock_init(&share->lock);
pthread_mutex_init(&share->mutex, MY_MUTEX_INIT_FAST);
@@ -5622,7 +5622,7 @@ static PARTITION_SHARE *get_share(const char *table_name, TABLE *table)
error:
pthread_mutex_unlock(&partition_mutex);
- my_free((gptr) share, MYF(0));
+ my_free((uchar*) share, MYF(0));
return NULL;
}
@@ -5639,10 +5639,10 @@ static int free_share(PARTITION_SHARE *share)
pthread_mutex_lock(&partition_mutex);
if (!--share->use_count)
{
- hash_delete(&partition_open_tables, (byte *) share);
+ hash_delete(&partition_open_tables, (uchar *) share);
thr_lock_delete(&share->lock);
pthread_mutex_destroy(&share->mutex);
- my_free((gptr) share, MYF(0));
+ my_free((uchar*) share, MYF(0));
}
pthread_mutex_unlock(&partition_mutex);