summaryrefslogtreecommitdiff
path: root/sql/ha_partition.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/ha_partition.cc')
-rw-r--r--sql/ha_partition.cc258
1 files changed, 157 insertions, 101 deletions
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 82bd39220a9..c580ae86439 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -55,6 +55,7 @@
#include "sql_priv.h"
#include "sql_parse.h" // append_file_to_dir
+#include "create_options.h"
#ifdef WITH_PARTITION_STORAGE_ENGINE
#include "ha_partition.h"
@@ -167,6 +168,7 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share)
:handler(hton, share)
{
DBUG_ENTER("ha_partition::ha_partition(table)");
+ init_alloc_root(&m_mem_root, 512, 512);
init_handler_variables();
DBUG_VOID_RETURN;
}
@@ -188,6 +190,7 @@ ha_partition::ha_partition(handlerton *hton, partition_info *part_info)
{
DBUG_ENTER("ha_partition::ha_partition(part_info)");
DBUG_ASSERT(part_info);
+ init_alloc_root(&m_mem_root, 512, 512);
init_handler_variables();
m_part_info= part_info;
m_create_handler= TRUE;
@@ -214,6 +217,7 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share,
:handler(hton, share)
{
DBUG_ENTER("ha_partition::ha_partition(clone)");
+ init_alloc_root(&m_mem_root, 512, 512);
init_handler_variables();
m_part_info= part_info_arg;
m_create_handler= TRUE;
@@ -241,6 +245,7 @@ void ha_partition::init_handler_variables()
m_file_buffer= NULL;
m_name_buffer_ptr= NULL;
m_engine_array= NULL;
+ m_connect_string= NULL;
m_file= NULL;
m_file_tot_parts= 0;
m_reorged_file= NULL;
@@ -264,7 +269,6 @@ void ha_partition::init_handler_variables()
m_extra_prepare_for_update= FALSE;
m_extra_cache_part_id= NO_CURRENT_PART_ID;
m_handler_status= handler_not_initialized;
- m_low_byte_first= 1;
m_part_field_array= NULL;
m_ordered_rec_buffer= NULL;
m_top_entry= NO_CURRENT_PART_ID;
@@ -320,8 +324,12 @@ ha_partition::~ha_partition()
delete m_file[i];
}
my_free(m_ordered_rec_buffer);
+ m_ordered_rec_buffer= NULL;
clear_handler_file();
+
+ free_root(&m_mem_root, MYF(0));
+
DBUG_VOID_RETURN;
}
@@ -366,7 +374,7 @@ ha_partition::~ha_partition()
The flag HA_READ_ORDER will be reset for the time being to indicate no
ordered output is available from partition handler indexes. Later a merge
sort will be performed using the underlying handlers.
- 5) primary_key_is_clustered, has_transactions and low_byte_first is
+ 5) primary_key_is_clustered and has_transactions are
calculated here.
*/
@@ -402,24 +410,17 @@ bool ha_partition::initialize_partition(MEM_ROOT *mem_root)
We create all underlying table handlers here. We do it in this special
method to be able to report allocation errors.
- Set up low_byte_first, primary_key_is_clustered and
+ Set up primary_key_is_clustered and
has_transactions since they are called often in all kinds of places,
other parameters are calculated on demand.
Verify that all partitions have the same table_flags.
*/
check_table_flags= m_file[0]->ha_table_flags();
- m_low_byte_first= m_file[0]->low_byte_first();
m_pkey_is_clustered= TRUE;
file_array= m_file;
do
{
file= *file_array;
- if (m_low_byte_first != file->low_byte_first())
- {
- // Cannot have handlers with different endian
- my_error(ER_MIX_HANDLER_ERROR, MYF(0));
- DBUG_RETURN(1);
- }
if (!file->primary_key_is_clustered())
m_pkey_is_clustered= FALSE;
if (check_table_flags != file->ha_table_flags())
@@ -589,6 +590,13 @@ int ha_partition::create(const char *name, TABLE *table_arg,
char t_name[FN_REFLEN];
DBUG_ENTER("ha_partition::create");
+ if (create_info->used_fields & HA_CREATE_USED_CONNECTION)
+ {
+ my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0),
+ "CONNECTION not valid for partition");
+ DBUG_RETURN(1);
+ }
+
strmov(t_name, name);
DBUG_ASSERT(*fn_rext((char*)name) == '\0');
if (del_ren_cre_table(t_name, NULL, table_arg, create_info))
@@ -1079,8 +1087,8 @@ static bool print_admin_msg(THD* thd, const char* msg_type,
va_list args;
Protocol *protocol= thd->protocol;
uint length, msg_length;
- char msgbuf[MI_MAX_MSG_BUF];
- char name[NAME_LEN*2+2];
+ char msgbuf[MYSQL_ERRMSG_SIZE];
+ char name[SAFE_NAME_LEN*2+2];
va_start(args, fmt);
msg_length= my_vsnprintf(msgbuf, sizeof(msgbuf), fmt, args);
@@ -1090,7 +1098,7 @@ static bool print_admin_msg(THD* thd, const char* msg_type,
if (!thd->vio_ok())
{
- sql_print_error("%s", msgbuf);
+ sql_print_error(fmt, args);
return TRUE;
}
@@ -1173,7 +1181,8 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
error != HA_ADMIN_ALREADY_DONE &&
error != HA_ADMIN_TRY_ALTER)
{
- print_admin_msg(thd, "error", table_share->db.str, table->alias,
+ print_admin_msg(thd, "error", table_share->db.str,
+ table->alias.c_ptr(),
opt_op_name[flag],
"Subpartition %s returned error",
sub_elem->partition_name);
@@ -1199,7 +1208,8 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
error != HA_ADMIN_ALREADY_DONE &&
error != HA_ADMIN_TRY_ALTER)
{
- print_admin_msg(thd, "error", table_share->db.str, table->alias,
+ print_admin_msg(thd, "error", table_share->db.str,
+ table->alias.c_ptr(),
opt_op_name[flag], "Partition %s returned error",
part_elem->partition_name);
}
@@ -1308,6 +1318,7 @@ int ha_partition::prepare_new_partition(TABLE *tbl,
if ((error= set_up_table_before_create(tbl, part_name, create_info,
0, p_elem)))
goto error_create;
+ tbl->s->connect_string = p_elem->connect_string;
if ((error= file->ha_create(part_name, tbl, create_info)))
{
/*
@@ -1337,7 +1348,7 @@ int ha_partition::prepare_new_partition(TABLE *tbl,
DBUG_RETURN(0);
error_external_lock:
- (void) file->close();
+ (void) file->ha_close();
error_open:
(void) file->ha_delete_table(part_name);
error_create:
@@ -1383,7 +1394,7 @@ void ha_partition::cleanup_new_partition(uint part_count)
while ((part_count > 0) && (*file))
{
(*file)->ha_external_lock(thd, F_UNLCK);
- (*file)->close();
+ (*file)->ha_close();
/* Leave the (*file)->ha_delete_table(part_name) to the ddl-log */
@@ -1748,11 +1759,11 @@ int ha_partition::copy_partitions(ulonglong * const copied,
uint32 new_part;
late_extra_cache(reorg_part);
- if ((result= file->ha_rnd_init(1)))
+ if ((result= file->ha_rnd_init_with_error(1)))
goto error;
while (TRUE)
{
- if ((result= file->rnd_next(m_rec0)))
+ if ((result= file->ha_rnd_next(m_rec0)))
{
if (result == HA_ERR_RECORD_DELETED)
continue; //Probably MyISAM
@@ -1828,6 +1839,8 @@ void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
create_info->auto_increment_value= stats.auto_increment_value;
create_info->data_file_name= create_info->index_file_name = NULL;
+ create_info->connect_string.str= NULL;
+ create_info->connect_string.length= 0;
return;
}
@@ -1974,6 +1987,8 @@ uint ha_partition::del_ren_cre_table(const char *from,
{
if ((error= set_up_table_before_create(table_arg, from_buff,
create_info, i, NULL)) ||
+ parse_engine_table_options(ha_thd(), (*file)->ht,
+ (*file)->table_share) ||
((error= (*file)->ha_create(from_buff, table_arg, create_info))))
goto create_error;
}
@@ -2114,6 +2129,10 @@ int ha_partition::set_up_table_before_create(TABLE *tbl,
}
info->index_file_name= part_elem->index_file_name;
info->data_file_name= part_elem->data_file_name;
+ info->connect_string= part_elem->connect_string;
+ if (info->connect_string.length)
+ info->used_fields|= HA_CREATE_USED_CONNECTION;
+ tbl->s->connect_string= part_elem->connect_string;
DBUG_RETURN(0);
}
@@ -2228,8 +2247,10 @@ bool ha_partition::create_handler_file(const char *name)
/* 4 static words (tot words, checksum, tot partitions, name length) */
tot_len_words= 4 + tot_partition_words + tot_name_words;
tot_len_byte= PAR_WORD_SIZE * tot_len_words;
- if (!(file_buffer= (uchar *) my_malloc(tot_len_byte, MYF(MY_ZEROFILL))))
+ file_buffer= (uchar *) my_alloca(tot_len_byte);
+ if (!file_buffer)
DBUG_RETURN(TRUE);
+ bzero(file_buffer, tot_len_byte);
engine_array= (file_buffer + PAR_ENGINES_OFFSET);
name_buffer_ptr= (char*) (engine_array + tot_partition_words * PAR_WORD_SIZE
+ PAR_WORD_SIZE);
@@ -2289,11 +2310,28 @@ bool ha_partition::create_handler_file(const char *name)
{
result= mysql_file_write(file, (uchar *) file_buffer, tot_len_byte,
MYF(MY_WME | MY_NABP)) != 0;
+
+ /* Write connection information (for federatedx engine) */
+ part_it.rewind();
+ for (i= 0; i < num_parts && !result; i++)
+ {
+ uchar buffer[4];
+ part_elem= part_it++;
+ uint length = part_elem->connect_string.length;
+ int4store(buffer, length);
+ if (my_write(file, buffer, 4, MYF(MY_WME | MY_NABP)) ||
+ my_write(file, (uchar *) part_elem->connect_string.str, length,
+ MYF(MY_WME | MY_NABP)))
+ {
+ result= TRUE;
+ break;
+ }
+ }
(void) mysql_file_close(file, MYF(0));
}
else
result= TRUE;
- my_free(file_buffer);
+ my_afree((char*) file_buffer);
DBUG_RETURN(result);
}
@@ -2306,10 +2344,10 @@ void ha_partition::clear_handler_file()
{
if (m_engine_array)
plugin_unlock_list(NULL, m_engine_array, m_tot_parts);
- my_free(m_file_buffer);
- my_free(m_engine_array);
+ free_root(&m_mem_root, MYF(MY_KEEP_PREALLOC));
m_file_buffer= NULL;
m_engine_array= NULL;
+ m_connect_string= NULL;
}
@@ -2466,7 +2504,7 @@ bool ha_partition::read_par_file(const char *name)
len_bytes= PAR_WORD_SIZE * len_words;
if (mysql_file_seek(file, 0, MY_SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR)
goto err1;
- if (!(file_buffer= (char*) my_malloc(len_bytes, MYF(0))))
+ if (!(file_buffer= (char*) alloc_root(&m_mem_root, len_bytes)))
goto err1;
if (mysql_file_read(file, (uchar *) file_buffer, len_bytes, MYF(MY_NABP)))
goto err2;
@@ -2490,14 +2528,37 @@ bool ha_partition::read_par_file(const char *name)
*/
if (len_words != (tot_partition_words + tot_name_words + 4))
goto err2;
- (void) mysql_file_close(file, MYF(0));
m_file_buffer= file_buffer; // Will be freed in clear_handler_file()
m_name_buffer_ptr= tot_name_len_offset + PAR_WORD_SIZE;
+ if (!(m_connect_string= (LEX_STRING*)
+ alloc_root(&m_mem_root, m_tot_parts * sizeof(LEX_STRING))))
+ goto err2;
+ bzero(m_connect_string, m_tot_parts * sizeof(LEX_STRING));
+
+ /* Read connection arguments (for federated X engine) */
+ for (i= 0; i < m_tot_parts; i++)
+ {
+ LEX_STRING connect_string;
+ uchar buffer[4];
+ if (my_read(file, buffer, 4, MYF(MY_NABP)))
+ {
+ /* No extra options; Probably not a federatedx engine */
+ break;
+ }
+ connect_string.length= uint4korr(buffer);
+ connect_string.str= (char*) alloc_root(&m_mem_root, connect_string.length+1);
+ if (my_read(file, (uchar*) connect_string.str, connect_string.length,
+ MYF(MY_NABP)))
+ break;
+ connect_string.str[connect_string.length]= 0;
+ m_connect_string[i]= connect_string;
+ }
+
+ (void) mysql_file_close(file, MYF(0));
DBUG_RETURN(false);
err2:
- my_free(file_buffer);
err1:
(void) mysql_file_close(file, MYF(0));
DBUG_RETURN(true);
@@ -2536,13 +2597,13 @@ bool ha_partition::setup_engine_array(MEM_ROOT *mem_root)
goto err;
}
if (!(m_engine_array= (plugin_ref*)
- my_malloc(m_tot_parts * sizeof(plugin_ref), MYF(MY_WME))))
+ alloc_root(&m_mem_root, m_tot_parts * sizeof(plugin_ref))))
goto err;
for (i= 0; i < m_tot_parts; i++)
m_engine_array[i]= ha_lock_engine(NULL, engine_array[i]);
- my_afree((gptr) engine_array);
+ my_afree(engine_array);
if (create_handlers(mem_root))
{
@@ -2553,7 +2614,7 @@ bool ha_partition::setup_engine_array(MEM_ROOT *mem_root)
DBUG_RETURN(false);
err:
- my_afree((gptr) engine_array);
+ my_afree(engine_array);
DBUG_RETURN(true);
}
@@ -2653,7 +2714,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
name_buffer_ptr= m_name_buffer_ptr;
m_start_key.length= 0;
m_rec0= table->record[0];
- m_rec_length= table_share->reclength;
+ m_rec_length= table_share->stored_rec_length;
alloc_len= m_tot_parts * (m_rec_length + PARTITION_BYTES_IN_POS);
alloc_len+= table_share->max_key_length;
if (!m_ordered_rec_buffer)
@@ -2731,8 +2792,10 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
{
create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME,
FALSE);
+ table->s->connect_string = m_connect_string[(uint)(file-m_file)];
if ((error= (*file)->ha_open(table, name_buff, mode, test_if_locked)))
goto err_handler;
+ bzero(&table->s->connect_string, sizeof(LEX_STRING));
m_num_locks+= (*file)->lock_count();
name_buffer_ptr+= strlen(name_buffer_ptr) + 1;
} while (*(++file));
@@ -2779,7 +2842,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
Initialize priority queue, initialized to reading forward.
*/
if ((error= init_queue(&m_queue, m_tot_parts, (uint) PARTITION_BYTES_IN_POS,
- 0, key_rec_cmp, (void*)this)))
+ 0, key_rec_cmp, (void*)this, 0, 0)))
goto err_handler;
/*
@@ -2828,7 +2891,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
err_handler:
DEBUG_SYNC(ha_thd(), "partition_open_error");
while (file-- != m_file)
- (*file)->close();
+ (*file)->ha_close();
err_alloc:
bitmap_free(&m_bulk_insert_started);
if (!m_is_clone_of)
@@ -2914,7 +2977,7 @@ int ha_partition::close(void)
repeat:
do
{
- (*file)->close();
+ (*file)->ha_close();
} while (*(++file));
if (first && m_added_file && m_added_file[0])
@@ -3793,7 +3856,7 @@ ha_rows ha_partition::guess_bulk_insert_rows()
0 Success
Note: end_bulk_insert can be called without start_bulk_insert
- being called, see bugĀ¤44108.
+ being called, see bug#44108.
*/
@@ -4006,6 +4069,7 @@ int ha_partition::rnd_next(uchar *buf)
int result= HA_ERR_END_OF_FILE;
uint part_id= m_part_spec.start_part;
DBUG_ENTER("ha_partition::rnd_next");
+ decrement_statistics(&SSV::ha_read_rnd_next_count);
if (NO_CURRENT_PART_ID == part_id)
{
@@ -4021,7 +4085,7 @@ int ha_partition::rnd_next(uchar *buf)
while (TRUE)
{
- result= file->rnd_next(buf);
+ result= file->ha_rnd_next(buf);
if (!result)
{
m_last_part= part_id;
@@ -4147,6 +4211,7 @@ int ha_partition::rnd_pos(uchar * buf, uchar *pos)
uint part_id;
handler *file;
DBUG_ENTER("ha_partition::rnd_pos");
+ decrement_statistics(&SSV::ha_read_rnd_count);
part_id= uint2korr((const uchar *) pos);
DBUG_ASSERT(part_id < m_tot_parts);
@@ -4233,6 +4298,7 @@ int ha_partition::index_init(uint inx, bool sorted)
m_part_spec.start_part= NO_CURRENT_PART_ID;
m_start_key.length= 0;
m_ordered= sorted;
+ m_ordered_scan_ongoing= FALSE;
m_curr_key_info[0]= table->key_info+inx;
if (m_pkey_is_clustered && table->s->primary_key != MAX_KEY)
{
@@ -4359,6 +4425,7 @@ int ha_partition::index_read_map(uchar *buf, const uchar *key,
enum ha_rkey_function find_flag)
{
DBUG_ENTER("ha_partition::index_read_map");
+ decrement_statistics(&SSV::ha_read_key_count);
end_range= 0;
m_index_scan_type= partition_index_read;
m_start_key.key= key;
@@ -4486,6 +4553,7 @@ int ha_partition::common_index_read(uchar *buf, bool have_start_key)
int ha_partition::index_first(uchar * buf)
{
DBUG_ENTER("ha_partition::index_first");
+ decrement_statistics(&SSV::ha_read_first_count);
end_range= 0;
m_index_scan_type= partition_index_first;
@@ -4517,6 +4585,7 @@ int ha_partition::index_first(uchar * buf)
int ha_partition::index_last(uchar * buf)
{
DBUG_ENTER("ha_partition::index_last");
+ decrement_statistics(&SSV::ha_read_last_count);
m_index_scan_type= partition_index_last;
DBUG_RETURN(common_first_last(buf));
@@ -4545,39 +4614,6 @@ int ha_partition::common_first_last(uchar *buf)
/*
- Read last using key
-
- SYNOPSIS
- index_read_last_map()
- buf Read row in MySQL Row Format
- key Key
- keypart_map Which part of key is used
-
- RETURN VALUE
- >0 Error code
- 0 Success
-
- DESCRIPTION
- This is used in join_read_last_key to optimise away an ORDER BY.
- Can only be used on indexes supporting HA_READ_ORDER
-*/
-
-int ha_partition::index_read_last_map(uchar *buf, const uchar *key,
- key_part_map keypart_map)
-{
- DBUG_ENTER("ha_partition::index_read_last");
-
- m_ordered= TRUE; // Safety measure
- end_range= 0;
- m_index_scan_type= partition_index_read_last;
- m_start_key.key= key;
- m_start_key.keypart_map= keypart_map;
- m_start_key.flag= HA_READ_PREFIX_LAST;
- DBUG_RETURN(common_index_read(buf, TRUE));
-}
-
-
-/*
Optimization of the default implementation to take advantage of dynamic
partition pruning.
*/
@@ -4653,6 +4689,7 @@ int ha_partition::index_read_idx_map(uchar *buf, uint index,
int ha_partition::index_next(uchar * buf)
{
DBUG_ENTER("ha_partition::index_next");
+ decrement_statistics(&SSV::ha_read_next_count);
/*
TODO(low priority):
@@ -4689,6 +4726,7 @@ int ha_partition::index_next(uchar * buf)
int ha_partition::index_next_same(uchar *buf, const uchar *key, uint keylen)
{
DBUG_ENTER("ha_partition::index_next_same");
+ decrement_statistics(&SSV::ha_read_next_count);
DBUG_ASSERT(keylen == m_start_key.length);
DBUG_ASSERT(m_index_scan_type != partition_index_last);
@@ -4716,6 +4754,7 @@ int ha_partition::index_next_same(uchar *buf, const uchar *key, uint keylen)
int ha_partition::index_prev(uchar * buf)
{
DBUG_ENTER("ha_partition::index_prev");
+ decrement_statistics(&SSV::ha_read_prev_count);
/* TODO: read comment in index_next */
DBUG_ASSERT(m_index_scan_type != partition_index_first);
@@ -4928,8 +4967,8 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
}
else if (is_next_same)
{
- if (!(error= file->index_next_same(buf, m_start_key.key,
- m_start_key.length)))
+ if (!(error= file->ha_index_next_same(buf, m_start_key.key,
+ m_start_key.length)))
{
m_last_part= m_part_spec.start_part;
DBUG_RETURN(0);
@@ -4937,7 +4976,7 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
}
else
{
- if (!(error= file->index_next(buf)))
+ if (!(error= file->ha_index_next(buf)))
{
m_last_part= m_part_spec.start_part;
DBUG_RETURN(0); // Row was in range
@@ -4992,13 +5031,13 @@ int ha_partition::handle_unordered_scan_next_partition(uchar * buf)
break;
case partition_index_read:
DBUG_PRINT("info", ("index_read on partition %d", i));
- error= file->index_read_map(buf, m_start_key.key,
- m_start_key.keypart_map,
- m_start_key.flag);
+ error= file->ha_index_read_map(buf, m_start_key.key,
+ m_start_key.keypart_map,
+ m_start_key.flag);
break;
case partition_index_first:
DBUG_PRINT("info", ("index_first on partition %d", i));
- error= file->index_first(buf);
+ error= file->ha_index_first(buf);
break;
case partition_index_first_unordered:
/*
@@ -5061,7 +5100,7 @@ int ha_partition::handle_unordered_scan_next_partition(uchar * buf)
int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
{
uint i;
- uint j= 0;
+ uint j= queue_first_element(&m_queue);
bool found= FALSE;
DBUG_ENTER("ha_partition::handle_ordered_index_scan");
@@ -5077,25 +5116,25 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
int error;
handler *file= m_file[i];
+ /*
+ Reset null bits (to avoid valgrind warnings) and to give a default
+ value for not read null fields.
+ */
+ bfill(rec_buf_ptr, table->s->null_bytes, 255);
+
switch (m_index_scan_type) {
case partition_index_read:
- error= file->index_read_map(rec_buf_ptr,
- m_start_key.key,
- m_start_key.keypart_map,
- m_start_key.flag);
+ error= file->ha_index_read_map(rec_buf_ptr,
+ m_start_key.key,
+ m_start_key.keypart_map,
+ m_start_key.flag);
break;
case partition_index_first:
- error= file->index_first(rec_buf_ptr);
+ error= file->ha_index_first(rec_buf_ptr);
reverse_order= FALSE;
break;
case partition_index_last:
- error= file->index_last(rec_buf_ptr);
- reverse_order= TRUE;
- break;
- case partition_index_read_last:
- error= file->index_read_last_map(rec_buf_ptr,
- m_start_key.key,
- m_start_key.keypart_map);
+ error= file->ha_index_last(rec_buf_ptr);
reverse_order= TRUE;
break;
case partition_read_range:
@@ -5135,7 +5174,7 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
*/
queue_set_max_at_top(&m_queue, reverse_order);
queue_set_cmp_arg(&m_queue, (void*)m_curr_key_info);
- m_queue.elements= j;
+ m_queue.elements= j - queue_first_element(&m_queue);
queue_fix(&m_queue);
return_top_record(buf);
table->status= 0;
@@ -5197,16 +5236,16 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same)
memcpy(rec_buf(part_id), table->record[0], m_rec_length);
}
else if (!is_next_same)
- error= file->index_next(rec_buf(part_id));
+ error= file->ha_index_next(rec_buf(part_id));
else
- error= file->index_next_same(rec_buf(part_id), m_start_key.key,
- m_start_key.length);
+ error= file->ha_index_next_same(rec_buf(part_id), m_start_key.key,
+ m_start_key.length);
if (error)
{
if (error == HA_ERR_END_OF_FILE)
{
/* Return next buffered row */
- queue_remove(&m_queue, (uint) 0);
+ queue_remove_top(&m_queue);
if (m_queue.elements)
{
DBUG_PRINT("info", ("Record returned from partition %u (2)",
@@ -5218,7 +5257,7 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same)
}
DBUG_RETURN(error);
}
- queue_replaced(&m_queue);
+ queue_replace_top(&m_queue);
return_top_record(buf);
DBUG_PRINT("info", ("Record returned from partition %u", m_top_entry));
DBUG_RETURN(0);
@@ -5245,11 +5284,11 @@ int ha_partition::handle_ordered_prev(uchar *buf)
handler *file= m_file[part_id];
DBUG_ENTER("ha_partition::handle_ordered_prev");
- if ((error= file->index_prev(rec_buf(part_id))))
+ if ((error= file->ha_index_prev(rec_buf(part_id))))
{
if (error == HA_ERR_END_OF_FILE)
{
- queue_remove(&m_queue, (uint) 0);
+ queue_remove_top(&m_queue);
if (m_queue.elements)
{
return_top_record(buf);
@@ -5261,7 +5300,7 @@ int ha_partition::handle_ordered_prev(uchar *buf)
}
DBUG_RETURN(error);
}
- queue_replaced(&m_queue);
+ queue_replace_top(&m_queue);
return_top_record(buf);
DBUG_PRINT("info", ("Record returned from partition %d", m_top_entry));
DBUG_RETURN(0);
@@ -5582,7 +5621,7 @@ void ha_partition::get_dynamic_partition_info(PARTITION_STATS *stat_info,
stat_info->update_time= file->stats.update_time;
stat_info->check_time= file->stats.check_time;
stat_info->check_sum= 0;
- if (file->ha_table_flags() & HA_HAS_CHECKSUM)
+ if (file->ha_table_flags() & (HA_HAS_OLD_CHECKSUM | HA_HAS_NEW_CHECKSUM))
stat_info->check_sum= file->checksum();
return;
}
@@ -5909,6 +5948,7 @@ int ha_partition::extra(enum ha_extra_function operation)
case HA_EXTRA_KEYREAD:
case HA_EXTRA_NO_KEYREAD:
case HA_EXTRA_FLUSH:
+ case HA_EXTRA_PREPARE_FOR_FORCED_CLOSE:
DBUG_RETURN(loop_extra(operation));
/* Category 2), used by non-MyISAM handlers */
@@ -5945,9 +5985,7 @@ int ha_partition::extra(enum ha_extra_function operation)
case HA_EXTRA_PREPARE_FOR_DROP:
case HA_EXTRA_FLUSH_CACHE:
{
- if (m_myisam)
- DBUG_RETURN(loop_extra(operation));
- break;
+ DBUG_RETURN(loop_extra(operation));
}
case HA_EXTRA_NO_READCHECK:
{
@@ -6222,6 +6260,7 @@ void ha_partition::late_extra_cache(uint partition_id)
}
if (m_extra_prepare_for_update)
{
+ DBUG_ASSERT(m_extra_cache);
(void) file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
}
m_extra_cache_part_id= partition_id;
@@ -7187,5 +7226,22 @@ mysql_declare_plugin(partition)
0, /* flags */
}
mysql_declare_plugin_end;
+maria_declare_plugin(partition)
+{
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ &partition_storage_engine,
+ "partition",
+ "Mikael Ronstrom, MySQL AB",
+ "Partition Storage Engine Helper",
+ PLUGIN_LICENSE_GPL,
+ partition_initialize, /* Plugin Init */
+ NULL, /* Plugin Deinit */
+ 0x0100, /* 1.0 */
+ NULL, /* status variables */
+ NULL, /* system variables */
+ "1.0", /* string version */
+ MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
+}
+maria_declare_plugin_end;
#endif