summaryrefslogtreecommitdiff
path: root/sql/sql_partition.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/sql_partition.cc')
-rw-r--r--sql/sql_partition.cc3059
1 files changed, 1897 insertions, 1162 deletions
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index d10f695f535..d388333abe8 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -18,28 +18,64 @@
to partitioning introduced in MySQL version 5.1. It contains functionality
used by all handlers that support partitioning, such as
the partitioning handler itself and the NDB handler.
+ (Much of the code in this file has been split into partition_info.cc and
+ the header files partition_info.h + partition_element.h + sql_partition.h)
- The first version was written by Mikael Ronstrom.
+ The first version was written by Mikael Ronstrom 2004-2006.
+ Various parts of the optimizer code was written by Sergey Petrunia.
+ Code have been maintained by Mattias Jonsson.
+ The second version was written by Mikael Ronstrom 2006-2007 with some
+ final fixes for partition pruning in 2008-2009 with assistance from Sergey
+ Petrunia and Mattias Jonsson.
- This version supports RANGE partitioning, LIST partitioning, HASH
+ The first version supports RANGE partitioning, LIST partitioning, HASH
partitioning and composite partitioning (hereafter called subpartitioning)
where each RANGE/LIST partitioning is HASH partitioned. The hash function
can either be supplied by the user or by only a list of fields (also
called KEY partitioning), where the MySQL server will use an internal
hash function.
There are quite a few defaults that can be used as well.
+
+ The second version introduces a new variant of RANGE and LIST partitioning
+ which is often referred to as column lists in the code variables. This
+ enables a user to specify a set of columns and their concatenated value
+ as the partition value. By comparing the concatenation of these values
+ the proper partition can be choosen.
*/
/* Some general useful functions */
#define MYSQL_LEX 1
-#include "mysql_priv.h"
+#include "sql_priv.h"
+#include "unireg.h" // REQUIRED: for other includes
+#include "sql_partition.h"
+#include "key.h" // key_restore
+#include "sql_parse.h" // parse_sql
+#include "sql_cache.h" // query_cache_invalidate3
+#include "lock.h" // mysql_lock_remove
+#include "sql_show.h" // append_identifier
#include <errno.h>
#include <m_ctype.h>
#include "my_md5.h"
+#include "transaction.h"
+
+#include "sql_base.h" // close_all_tables_for_name
+#include "sql_table.h" // build_table_filename,
+ // build_table_shadow_filename,
+ // table_to_filename
+ // mysql_*_alter_copy_data
+#include "opt_range.h" // store_key_image_to_rec
+#include "sql_analyse.h" // append_escaped
#ifdef WITH_PARTITION_STORAGE_ENGINE
#include "ha_partition.h"
+
+/* TODO: Change abort() to DBUG_SUICIDE() when bug#52002 is pushed */
+#define ERROR_INJECT_CRASH(code) \
+ DBUG_EVALUATE_IF(code, (abort(), 0), 0)
+#define ERROR_INJECT_ERROR(code) \
+ DBUG_EVALUATE_IF(code, (my_error(ER_UNKNOWN_ERROR, MYF(0)), TRUE), 0)
+
/*
Partition related functions declarations and some static constants;
*/
@@ -50,7 +86,8 @@ const LEX_STRING partition_keywords[]=
{ C_STRING_WITH_LEN("LIST") },
{ C_STRING_WITH_LEN("KEY") },
{ C_STRING_WITH_LEN("MAXVALUE") },
- { C_STRING_WITH_LEN("LINEAR ") }
+ { C_STRING_WITH_LEN("LINEAR ") },
+ { C_STRING_WITH_LEN(" COLUMNS") }
};
static const char *part_str= "PARTITION";
static const char *sub_str= "SUB";
@@ -61,26 +98,23 @@ static const char *end_paren_str= ")";
static const char *begin_paren_str= "(";
static const char *comma_str= ",";
-static int get_part_id_charset_func_all(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-static int get_part_id_charset_func_part(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-static int get_part_id_charset_func_subpart(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-static int get_part_part_id_charset_func(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-static int get_subpart_id_charset_func(partition_info *part_info,
- uint32 *part_id);
+int get_partition_id_list_col(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
int get_partition_id_list(partition_info *part_info,
uint32 *part_id,
longlong *func_value);
+int get_partition_id_range_col(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
int get_partition_id_range(partition_info *part_info,
uint32 *part_id,
longlong *func_value);
+static int get_part_id_charset_func_part(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+static int get_part_id_charset_func_subpart(partition_info *part_info,
+ uint32 *part_id);
int get_partition_id_hash_nosub(partition_info *part_info,
uint32 *part_id,
longlong *func_value);
@@ -93,30 +127,9 @@ int get_partition_id_linear_hash_nosub(partition_info *part_info,
int get_partition_id_linear_key_nosub(partition_info *part_info,
uint32 *part_id,
longlong *func_value);
-int get_partition_id_range_sub_hash(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_range_sub_key(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_range_sub_linear_hash(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_range_sub_linear_key(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_list_sub_hash(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_list_sub_key(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_list_sub_linear_hash(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_list_sub_linear_key(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
+int get_partition_id_with_sub(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
int get_partition_id_hash_sub(partition_info *part_info,
uint32 *part_id);
int get_partition_id_key_sub(partition_info *part_info,
@@ -134,16 +147,64 @@ uint32 get_next_partition_id_range(PARTITION_ITERATOR* part_iter);
uint32 get_next_partition_id_list(PARTITION_ITERATOR* part_iter);
int get_part_iter_for_interval_via_mapping(partition_info *part_info,
bool is_subpart,
+ uint32 *store_length_array,
uchar *min_value, uchar *max_value,
+ uint min_len, uint max_len,
uint flags,
PARTITION_ITERATOR *part_iter);
+int get_part_iter_for_interval_cols_via_map(partition_info *part_info,
+ bool is_subpart,
+ uint32 *store_length_array,
+ uchar *min_value, uchar *max_value,
+ uint min_len, uint max_len,
+ uint flags,
+ PARTITION_ITERATOR *part_iter);
int get_part_iter_for_interval_via_walking(partition_info *part_info,
bool is_subpart,
+ uint32 *store_length_array,
uchar *min_value, uchar *max_value,
+ uint min_len, uint max_len,
uint flags,
PARTITION_ITERATOR *part_iter);
#ifdef WITH_PARTITION_STORAGE_ENGINE
+static int cmp_rec_and_tuple(part_column_list_val *val, uint32 nvals_in_rec);
+static int cmp_rec_and_tuple_prune(part_column_list_val *val,
+ uint32 n_vals_in_rec,
+ bool tail_is_min);
+
+/*
+ Convert constants in VALUES definition to the character set the
+ corresponding field uses.
+
+ SYNOPSIS
+ convert_charset_partition_constant()
+ item Item to convert
+ cs Character set to convert to
+
+ RETURN VALUE
+ NULL Error
+ item New converted item
+*/
+
+Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs)
+{
+ THD *thd= current_thd;
+ Name_resolution_context *context= &thd->lex->current_select->context;
+ TABLE_LIST *save_list= context->table_list;
+ const char *save_where= thd->where;
+
+ item= item->safe_charset_converter(cs);
+ context->table_list= NULL;
+ thd->where= "convert character set partition constant";
+ if (!item || item->fix_fields(thd, (Item**)NULL))
+ item= NULL;
+ thd->where= save_where;
+ context->table_list= save_list;
+ return item;
+}
+
+
/*
A support function to check if a name is in a list of strings
@@ -161,7 +222,7 @@ bool is_name_in_list(char *name,
List<char> list_names)
{
List_iterator<char> names_it(list_names);
- uint no_names= list_names.elements;
+ uint num_names= list_names.elements;
uint i= 0;
do
@@ -169,7 +230,7 @@ bool is_name_in_list(char *name,
char *list_name= names_it++;
if (!(my_strcasecmp(system_charset_info, name, list_name)))
return TRUE;
- } while (++i < no_names);
+ } while (++i < num_names);
return FALSE;
}
@@ -198,24 +259,24 @@ bool partition_default_handling(TABLE *table, partition_info *part_info,
if (!is_create_table_ind)
{
- if (part_info->use_default_no_partitions)
+ if (part_info->use_default_num_partitions)
{
- if (table->file->get_no_parts(normalized_path, &part_info->no_parts))
+ if (table->file->get_no_parts(normalized_path, &part_info->num_parts))
{
DBUG_RETURN(TRUE);
}
}
else if (part_info->is_sub_partitioned() &&
- part_info->use_default_no_subpartitions)
+ part_info->use_default_num_subpartitions)
{
- uint no_parts;
- if (table->file->get_no_parts(normalized_path, &no_parts))
+ uint num_parts;
+ if (table->file->get_no_parts(normalized_path, &num_parts))
{
DBUG_RETURN(TRUE);
}
- DBUG_ASSERT(part_info->no_parts > 0);
- DBUG_ASSERT((no_parts % part_info->no_parts) == 0);
- part_info->no_subparts= no_parts / part_info->no_parts;
+ DBUG_ASSERT(part_info->num_parts > 0);
+ DBUG_ASSERT((num_parts % part_info->num_parts) == 0);
+ part_info->num_subparts= num_parts / part_info->num_parts;
}
}
part_info->set_up_defaults_for_partitioning(table->file,
@@ -249,8 +310,8 @@ bool check_reorganise_list(partition_info *new_part_info,
List<char> list_part_names)
{
uint new_count, old_count;
- uint no_new_parts= new_part_info->partitions.elements;
- uint no_old_parts= old_part_info->partitions.elements;
+ uint num_new_parts= new_part_info->partitions.elements;
+ uint num_old_parts= old_part_info->partitions.elements;
List_iterator<partition_element> new_parts_it(new_part_info->partitions);
bool same_part_info= (new_part_info == old_part_info);
DBUG_ENTER("check_reorganise_list");
@@ -273,8 +334,8 @@ bool check_reorganise_list(partition_info *new_part_info,
if (!is_name_in_list(old_name, list_part_names))
DBUG_RETURN(TRUE);
}
- } while (old_count < no_old_parts);
- } while (new_count < no_new_parts);
+ } while (old_count < num_old_parts);
+ } while (new_count < num_new_parts);
DBUG_RETURN(FALSE);
}
@@ -449,9 +510,10 @@ static bool set_up_field_array(TABLE *table,
bool is_sub_part)
{
Field **ptr, *field, **field_array;
- uint no_fields= 0;
+ uint num_fields= 0;
uint size_field_array;
uint i= 0;
+ uint inx;
partition_info *part_info= table->part_info;
int result= FALSE;
DBUG_ENTER("set_up_field_array");
@@ -460,9 +522,19 @@ static bool set_up_field_array(TABLE *table,
while ((field= *(ptr++)))
{
if (field->flags & GET_FIXED_FIELDS_FLAG)
- no_fields++;
+ num_fields++;
}
- if (no_fields == 0)
+ if (num_fields > MAX_REF_PARTS)
+ {
+ char *ptr;
+ if (is_sub_part)
+ ptr= (char*)"subpartition function";
+ else
+ ptr= (char*)"partition function";
+ my_error(ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR, MYF(0), ptr);
+ DBUG_RETURN(TRUE);
+ }
+ if (num_fields == 0)
{
/*
We are using hidden key as partitioning field
@@ -470,8 +542,8 @@ static bool set_up_field_array(TABLE *table,
DBUG_ASSERT(!is_sub_part);
DBUG_RETURN(result);
}
- size_field_array= (no_fields+1)*sizeof(Field*);
- field_array= (Field**)sql_alloc(size_field_array);
+ size_field_array= (num_fields+1)*sizeof(Field*);
+ field_array= (Field**)sql_calloc(size_field_array);
if (unlikely(!field_array))
{
mem_alloc_error(size_field_array);
@@ -486,7 +558,32 @@ static bool set_up_field_array(TABLE *table,
field->flags|= FIELD_IN_PART_FUNC_FLAG;
if (likely(!result))
{
- field_array[i++]= field;
+ if (!is_sub_part && part_info->column_list)
+ {
+ List_iterator<char> it(part_info->part_field_list);
+ char *field_name;
+
+ DBUG_ASSERT(num_fields == part_info->part_field_list.elements);
+ inx= 0;
+ do
+ {
+ field_name= it++;
+ if (!my_strcasecmp(system_charset_info,
+ field_name,
+ field->field_name))
+ break;
+ } while (++inx < num_fields);
+ if (inx == num_fields)
+ {
+ mem_alloc_error(1);
+ result= TRUE;
+ continue;
+ }
+ }
+ else
+ inx= i;
+ field_array[inx]= field;
+ i++;
/*
We check that the fields are proper. It is required for each
@@ -504,16 +601,16 @@ static bool set_up_field_array(TABLE *table,
}
}
}
- field_array[no_fields]= 0;
+ field_array[num_fields]= 0;
if (!is_sub_part)
{
part_info->part_field_array= field_array;
- part_info->no_part_fields= no_fields;
+ part_info->num_part_fields= num_fields;
}
else
{
part_info->subpart_field_array= field_array;
- part_info->no_subpart_fields= no_fields;
+ part_info->num_subpart_fields= num_fields;
}
DBUG_RETURN(result);
}
@@ -552,36 +649,36 @@ static bool create_full_part_field_array(THD *thd, TABLE *table,
if (!part_info->is_sub_partitioned())
{
part_info->full_part_field_array= part_info->part_field_array;
- part_info->no_full_part_fields= part_info->no_part_fields;
+ part_info->num_full_part_fields= part_info->num_part_fields;
}
else
{
Field *field, **field_array;
- uint no_part_fields=0, size_field_array;
+ uint num_part_fields=0, size_field_array;
ptr= table->field;
while ((field= *(ptr++)))
{
if (field->flags & FIELD_IN_PART_FUNC_FLAG)
- no_part_fields++;
+ num_part_fields++;
}
- size_field_array= (no_part_fields+1)*sizeof(Field*);
- field_array= (Field**)sql_alloc(size_field_array);
+ size_field_array= (num_part_fields+1)*sizeof(Field*);
+ field_array= (Field**)sql_calloc(size_field_array);
if (unlikely(!field_array))
{
mem_alloc_error(size_field_array);
result= TRUE;
goto end;
}
- no_part_fields= 0;
+ num_part_fields= 0;
ptr= table->field;
while ((field= *(ptr++)))
{
if (field->flags & FIELD_IN_PART_FUNC_FLAG)
- field_array[no_part_fields++]= field;
+ field_array[num_part_fields++]= field;
}
- field_array[no_part_fields]=0;
+ field_array[num_part_fields]=0;
part_info->full_part_field_array= field_array;
- part_info->no_full_part_fields= no_part_fields;
+ part_info->num_full_part_fields= num_part_fields;
}
/*
@@ -761,9 +858,6 @@ static bool handle_list_of_fields(List_iterator<char> it,
bool result;
char *field_name;
bool is_list_empty= TRUE;
- int fields_handled = 0;
- char* field_name_array[MAX_KEY];
-
DBUG_ENTER("handle_list_of_fields");
while ((field_name= it++))
@@ -779,36 +873,17 @@ static bool handle_list_of_fields(List_iterator<char> it,
result= TRUE;
goto end;
}
-
- /*
- Check for duplicate fields in the list.
- Assuming that there are not many fields in the partition key list.
- If there were, it would be better to replace the for-loop
- with a more efficient algorithm.
- */
-
- field_name_array[fields_handled] = field_name;
- for (int i = 0; i < fields_handled; ++i)
- {
- if (my_strcasecmp(system_charset_info,
- field_name_array[i], field_name) == 0)
- {
- my_error(ER_FIELD_NOT_FOUND_PART_ERROR, MYF(0));
- DBUG_RETURN(TRUE);
- }
- }
- fields_handled++;
}
- if (is_list_empty)
+ if (is_list_empty && part_info->part_type == HASH_PARTITION)
{
uint primary_key= table->s->primary_key;
if (primary_key != MAX_KEY)
{
- uint no_key_parts= table->key_info[primary_key].key_parts, i;
+ uint num_key_parts= table->key_info[primary_key].key_parts, i;
/*
In the case of an empty list we use primary key as partition key.
*/
- for (i= 0; i < no_key_parts; i++)
+ for (i= 0; i < num_key_parts; i++)
{
Field *field= table->key_info[primary_key].key_part[i].field;
field->flags|= GET_FIXED_FIELDS_FLAG;
@@ -872,11 +947,90 @@ int check_signed_flag(partition_info *part_info)
error= ER_PARTITION_CONST_DOMAIN_ERROR;
break;
}
- } while (++i < part_info->no_parts);
+ } while (++i < part_info->num_parts);
}
return error;
}
+/**
+ Initialize lex object for use in fix_fields and parsing.
+
+ SYNOPSIS
+ init_lex_with_single_table()
+ @param thd The thread object
+ @param table The table object
+ @return Operation status
+ @retval TRUE An error occurred, memory allocation error
+ @retval FALSE Ok
+
+ DESCRIPTION
+ This function is used to initialize a lex object on the
+ stack for use by fix_fields and for parsing. In order to
+ work properly it also needs to initialize the
+ Name_resolution_context object of the lexer.
+ Finally it needs to set a couple of variables to ensure
+ proper functioning of fix_fields.
+*/
+
+static int
+init_lex_with_single_table(THD *thd, TABLE *table, LEX *lex)
+{
+ TABLE_LIST *table_list;
+ Table_ident *table_ident;
+ SELECT_LEX *select_lex= &lex->select_lex;
+ Name_resolution_context *context= &select_lex->context;
+ /*
+ We will call the parser to create a part_info struct based on the
+ partition string stored in the frm file.
+ We will use a local lex object for this purpose. However we also
+ need to set the Name_resolution_object for this lex object. We
+ do this by using add_table_to_list where we add the table that
+ we're working with to the Name_resolution_context.
+ */
+ thd->lex= lex;
+ lex_start(thd);
+ context->init();
+ if ((!(table_ident= new Table_ident(thd,
+ table->s->table_name,
+ table->s->db, TRUE))) ||
+ (!(table_list= select_lex->add_table_to_list(thd,
+ table_ident,
+ NULL,
+ 0))))
+ return TRUE;
+ context->resolve_in_table_list_only(table_list);
+ lex->use_only_table_context= TRUE;
+ select_lex->cur_pos_in_select_list= UNDEF_POS;
+ table->map= 1; //To ensure correct calculation of const item
+ table->get_fields_in_item_tree= TRUE;
+ table_list->table= table;
+ return FALSE;
+}
+
+/**
+ End use of local lex with single table
+
+ SYNOPSIS
+ end_lex_with_single_table()
+ @param thd The thread object
+ @param table The table object
+ @param old_lex The real lex object connected to THD
+
+ DESCRIPTION
+ This function restores the real lex object after calling
+ init_lex_with_single_table and also restores some table
+ variables temporarily set.
+*/
+
+static void
+end_lex_with_single_table(THD *thd, TABLE *table, LEX *old_lex)
+{
+ LEX *lex= thd->lex;
+ table->map= 0;
+ table->get_fields_in_item_tree= FALSE;
+ lex_end(lex);
+ thd->lex= old_lex;
+}
/*
The function uses a new feature in fix_fields where the flag
@@ -891,7 +1045,6 @@ int check_signed_flag(partition_info *part_info)
table The table object
part_info Reference to partitioning data structure
is_sub_part Is the table subpartitioned as well
- is_field_to_be_setup Flag if we are to set-up field arrays
is_create_table_ind Indicator of whether openfrm was called as part of
CREATE or ALTER TABLE
@@ -917,69 +1070,22 @@ int check_signed_flag(partition_info *part_info)
*/
static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
- bool is_sub_part, bool is_field_to_be_setup,
- bool is_create_table_ind)
+ bool is_sub_part, bool is_create_table_ind)
{
partition_info *part_info= table->part_info;
- uint dir_length, home_dir_length;
bool result= TRUE;
- TABLE_LIST tables;
- TABLE_LIST *save_table_list, *save_first_table, *save_last_table;
int error;
- Name_resolution_context *context;
- const char *save_where;
- char* db_name;
- char db_name_string[FN_REFLEN];
- bool save_use_only_table_context;
+ LEX *old_lex= thd->lex;
+ LEX lex;
uint8 saved_full_group_by_flag;
nesting_map saved_allow_sum_func;
DBUG_ENTER("fix_fields_part_func");
- if (part_info->fixed)
- {
- if (!(is_sub_part || (error= check_signed_flag(part_info))))
- result= FALSE;
+ if (init_lex_with_single_table(thd, table, &lex))
goto end;
- }
-
- /*
- Set-up the TABLE_LIST object to be a list with a single table
- Set the object to zero to create NULL pointers and set alias
- and real name to table name and get database name from file name.
- TODO: Consider generalizing or refactoring Lex::add_table_to_list() so
- it can be used in all places where we create TABLE_LIST objects.
- Also consider creating appropriate constructors for TABLE_LIST.
- */
- bzero((void*)&tables, sizeof(TABLE_LIST));
- tables.alias= tables.table_name= (char*) table->s->table_name.str;
- tables.table= table;
- tables.next_local= 0;
- tables.next_name_resolution_table= 0;
- /*
- Cache the table in Item_fields. All the tables can be cached except
- the trigger pseudo table.
- */
- tables.cacheable_table= TRUE;
- context= thd->lex->current_context();
- tables.select_lex= context->select_lex;
- strmov(db_name_string, table->s->normalized_path.str);
- dir_length= dirname_length(db_name_string);
- db_name_string[dir_length - 1]= 0;
- home_dir_length= dirname_length(db_name_string);
- db_name= &db_name_string[home_dir_length];
- tables.db= db_name;
-
- table->map= 1; //To ensure correct calculation of const item
- table->get_fields_in_item_tree= TRUE;
- save_table_list= context->table_list;
- save_first_table= context->first_name_resolution_table;
- save_last_table= context->last_name_resolution_table;
- context->table_list= &tables;
- context->first_name_resolution_table= &tables;
- context->last_name_resolution_table= NULL;
- func_expr->walk(&Item::change_context_processor, 0, (uchar*) context);
- save_where= thd->where;
+ func_expr->walk(&Item::change_context_processor, 0,
+ (uchar*) &lex.select_lex.context);
thd->where= "partition function";
/*
In execution we must avoid the use of thd->change_item_tree since
@@ -993,19 +1099,14 @@ static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
that does this during val_int must be disallowed as partition
function.
SEE Bug #21658
- */
- /*
+
This is a tricky call to prepare for since it can have a large number
of interesting side effects, both desirable and undesirable.
*/
-
- save_use_only_table_context= thd->lex->use_only_table_context;
- thd->lex->use_only_table_context= TRUE;
- thd->lex->current_select->cur_pos_in_select_list= UNDEF_POS;
saved_full_group_by_flag= thd->lex->current_select->full_group_by_flag;
saved_allow_sum_func= thd->lex->allow_sum_func;
thd->lex->allow_sum_func= 0;
-
+
error= func_expr->fix_fields(thd, (Item**)&func_expr);
/*
@@ -1015,19 +1116,12 @@ static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
thd->lex->current_select->full_group_by_flag= saved_full_group_by_flag;
thd->lex->allow_sum_func= saved_allow_sum_func;
- thd->lex->use_only_table_context= save_use_only_table_context;
-
- context->table_list= save_table_list;
- context->first_name_resolution_table= save_first_table;
- context->last_name_resolution_table= save_last_table;
if (unlikely(error))
{
DBUG_PRINT("info", ("Field in partition function not part of table"));
- if (is_field_to_be_setup)
- clear_field_flag(table);
+ clear_field_flag(table);
goto end;
}
- thd->where= save_where;
if (unlikely(func_expr->const_item()))
{
my_error(ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR, MYF(0));
@@ -1058,14 +1152,13 @@ static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
if ((!is_sub_part) && (error= check_signed_flag(part_info)))
goto end;
- result= FALSE;
- if (is_field_to_be_setup)
- result= set_up_field_array(table, is_sub_part);
- if (!is_sub_part)
- part_info->fixed= TRUE;
+ result= set_up_field_array(table, is_sub_part);
end:
- table->get_fields_in_item_tree= FALSE;
- table->map= 0; //Restore old value
+ end_lex_with_single_table(thd, table, old_lex);
+#if !defined(DBUG_OFF)
+ func_expr->walk(&Item::change_context_processor, 0,
+ (uchar*) 0);
+#endif
DBUG_RETURN(result);
}
@@ -1241,9 +1334,9 @@ void check_range_capable_PF(TABLE *table)
static bool set_up_partition_bitmap(THD *thd, partition_info *part_info)
{
uint32 *bitmap_buf;
- uint bitmap_bits= part_info->no_subparts?
- (part_info->no_subparts* part_info->no_parts):
- part_info->no_parts;
+ uint bitmap_bits= part_info->num_subparts?
+ (part_info->num_subparts* part_info->num_parts):
+ part_info->num_parts;
uint bitmap_bytes= bitmap_buffer_size(bitmap_bits);
DBUG_ENTER("set_up_partition_bitmap");
@@ -1343,64 +1436,47 @@ static void set_up_partition_func_pointers(partition_info *part_info)
if (part_info->is_sub_partitioned())
{
+ part_info->get_partition_id= get_partition_id_with_sub;
if (part_info->part_type == RANGE_PARTITION)
{
- part_info->get_part_partition_id= get_partition_id_range;
+ if (part_info->column_list)
+ part_info->get_part_partition_id= get_partition_id_range_col;
+ else
+ part_info->get_part_partition_id= get_partition_id_range;
if (part_info->list_of_subpart_fields)
{
if (part_info->linear_hash_ind)
- {
- part_info->get_partition_id= get_partition_id_range_sub_linear_key;
part_info->get_subpartition_id= get_partition_id_linear_key_sub;
- }
else
- {
- part_info->get_partition_id= get_partition_id_range_sub_key;
part_info->get_subpartition_id= get_partition_id_key_sub;
- }
}
else
{
if (part_info->linear_hash_ind)
- {
- part_info->get_partition_id= get_partition_id_range_sub_linear_hash;
part_info->get_subpartition_id= get_partition_id_linear_hash_sub;
- }
else
- {
- part_info->get_partition_id= get_partition_id_range_sub_hash;
part_info->get_subpartition_id= get_partition_id_hash_sub;
- }
}
}
else /* LIST Partitioning */
{
- part_info->get_part_partition_id= get_partition_id_list;
+ if (part_info->column_list)
+ part_info->get_part_partition_id= get_partition_id_list_col;
+ else
+ part_info->get_part_partition_id= get_partition_id_list;
if (part_info->list_of_subpart_fields)
{
if (part_info->linear_hash_ind)
- {
- part_info->get_partition_id= get_partition_id_list_sub_linear_key;
part_info->get_subpartition_id= get_partition_id_linear_key_sub;
- }
else
- {
- part_info->get_partition_id= get_partition_id_list_sub_key;
part_info->get_subpartition_id= get_partition_id_key_sub;
- }
}
else
{
if (part_info->linear_hash_ind)
- {
- part_info->get_partition_id= get_partition_id_list_sub_linear_hash;
part_info->get_subpartition_id= get_partition_id_linear_hash_sub;
- }
else
- {
- part_info->get_partition_id= get_partition_id_list_sub_hash;
part_info->get_subpartition_id= get_partition_id_hash_sub;
- }
}
}
}
@@ -1409,9 +1485,19 @@ static void set_up_partition_func_pointers(partition_info *part_info)
part_info->get_part_partition_id= NULL;
part_info->get_subpartition_id= NULL;
if (part_info->part_type == RANGE_PARTITION)
- part_info->get_partition_id= get_partition_id_range;
+ {
+ if (part_info->column_list)
+ part_info->get_partition_id= get_partition_id_range_col;
+ else
+ part_info->get_partition_id= get_partition_id_range;
+ }
else if (part_info->part_type == LIST_PARTITION)
- part_info->get_partition_id= get_partition_id_list;
+ {
+ if (part_info->column_list)
+ part_info->get_partition_id= get_partition_id_list_col;
+ else
+ part_info->get_partition_id= get_partition_id_list;
+ }
else /* HASH partitioning */
{
if (part_info->list_of_part_fields)
@@ -1430,32 +1516,43 @@ static void set_up_partition_func_pointers(partition_info *part_info)
}
}
}
- if (part_info->full_part_charset_field_array)
- {
- DBUG_ASSERT(part_info->get_partition_id);
- part_info->get_partition_id_charset= part_info->get_partition_id;
- if (part_info->part_charset_field_array &&
- part_info->subpart_charset_field_array)
- part_info->get_partition_id= get_part_id_charset_func_all;
- else if (part_info->part_charset_field_array)
- part_info->get_partition_id= get_part_id_charset_func_part;
- else
- part_info->get_partition_id= get_part_id_charset_func_subpart;
- }
- if (part_info->part_charset_field_array &&
- part_info->is_sub_partitioned())
+ /*
+ We need special functions to handle character sets since they require copy
+ of field pointers and restore afterwards. For subpartitioned tables we do
+ the copy and restore individually on the part and subpart parts. For non-
+ subpartitioned tables we use the same functions as used for the parts part
+ of subpartioning.
+ Thus for subpartitioned tables the get_partition_id is always
+ get_partition_id_with_sub, even when character sets exists.
+ */
+ if (part_info->part_charset_field_array)
{
- DBUG_ASSERT(part_info->get_part_partition_id);
- part_info->get_part_partition_id_charset=
+ if (part_info->is_sub_partitioned())
+ {
+ DBUG_ASSERT(part_info->get_part_partition_id);
+ if (!part_info->column_list)
+ {
+ part_info->get_part_partition_id_charset=
part_info->get_part_partition_id;
- part_info->get_part_partition_id= get_part_part_id_charset_func;
+ part_info->get_part_partition_id= get_part_id_charset_func_part;
+ }
+ }
+ else
+ {
+ DBUG_ASSERT(part_info->get_partition_id);
+ if (!part_info->column_list)
+ {
+ part_info->get_part_partition_id_charset= part_info->get_partition_id;
+ part_info->get_part_partition_id= get_part_id_charset_func_part;
+ }
+ }
}
if (part_info->subpart_charset_field_array)
{
DBUG_ASSERT(part_info->get_subpartition_id);
part_info->get_subpartition_id_charset=
part_info->get_subpartition_id;
- part_info->get_subpartition_id= get_subpart_id_charset_func;
+ part_info->get_subpartition_id= get_part_id_charset_func_subpart;
}
DBUG_VOID_RETURN;
}
@@ -1463,22 +1560,22 @@ static void set_up_partition_func_pointers(partition_info *part_info)
/*
For linear hashing we need a mask which is on the form 2**n - 1 where
- 2**n >= no_parts. Thus if no_parts is 6 then mask is 2**3 - 1 = 8 - 1 = 7.
+ 2**n >= num_parts. Thus if num_parts is 6 then mask is 2**3 - 1 = 8 - 1 = 7.
SYNOPSIS
set_linear_hash_mask()
part_info Reference to partitioning data structure
- no_parts Number of parts in linear hash partitioning
+ num_parts Number of parts in linear hash partitioning
RETURN VALUE
NONE
*/
-void set_linear_hash_mask(partition_info *part_info, uint no_parts)
+void set_linear_hash_mask(partition_info *part_info, uint num_parts)
{
uint mask;
- for (mask= 1; mask < no_parts; mask<<=1)
+ for (mask= 1; mask < num_parts; mask<<=1)
;
part_info->linear_hash_mask= mask - 1;
}
@@ -1492,7 +1589,7 @@ void set_linear_hash_mask(partition_info *part_info, uint no_parts)
get_part_id_from_linear_hash()
hash_value Hash value calculated by HASH function or KEY function
mask Mask calculated previously by set_linear_hash_mask
- no_parts Number of partitions in HASH partitioned part
+ num_parts Number of partitions in HASH partitioned part
RETURN VALUE
part_id The calculated partition identity (starting at 0)
@@ -1505,11 +1602,11 @@ void set_linear_hash_mask(partition_info *part_info, uint no_parts)
*/
static uint32 get_part_id_from_linear_hash(longlong hash_value, uint mask,
- uint no_parts)
+ uint num_parts)
{
uint32 part_id= (uint32)(hash_value & mask);
- if (part_id >= no_parts)
+ if (part_id >= num_parts)
{
uint new_mask= ((mask + 1) >> 1) - 1;
part_id= (uint32)(hash_value & new_mask);
@@ -1653,7 +1750,7 @@ bool fix_partition_func(THD *thd, TABLE *table,
function is correct.
*/
if (part_info->linear_hash_ind)
- set_linear_hash_mask(part_info, part_info->no_subparts);
+ set_linear_hash_mask(part_info, part_info->num_subparts);
if (part_info->list_of_subpart_fields)
{
List_iterator<char> it(part_info->subpart_field_list);
@@ -1663,13 +1760,11 @@ bool fix_partition_func(THD *thd, TABLE *table,
else
{
if (unlikely(fix_fields_part_func(thd, part_info->subpart_expr,
- table, TRUE, TRUE,
- is_create_table_ind)))
+ table, TRUE, is_create_table_ind)))
goto end;
if (unlikely(part_info->subpart_expr->result_type() != INT_RESULT))
{
- my_error(ER_PARTITION_FUNC_NOT_ALLOWED_ERROR, MYF(0),
- "SUBPARTITION");
+ part_info->report_part_expr_error(TRUE);
goto end;
}
}
@@ -1682,7 +1777,7 @@ bool fix_partition_func(THD *thd, TABLE *table,
if (part_info->part_type == HASH_PARTITION)
{
if (part_info->linear_hash_ind)
- set_linear_hash_mask(part_info, part_info->no_parts);
+ set_linear_hash_mask(part_info, part_info->num_parts);
if (part_info->list_of_part_fields)
{
List_iterator<char> it(part_info->part_field_list);
@@ -1692,34 +1787,42 @@ bool fix_partition_func(THD *thd, TABLE *table,
else
{
if (unlikely(fix_fields_part_func(thd, part_info->part_expr,
- table, FALSE, TRUE,
- is_create_table_ind)))
+ table, FALSE, is_create_table_ind)))
goto end;
if (unlikely(part_info->part_expr->result_type() != INT_RESULT))
{
- my_error(ER_PARTITION_FUNC_NOT_ALLOWED_ERROR, MYF(0), part_str);
+ part_info->report_part_expr_error(FALSE);
goto end;
}
- part_info->part_result_type= INT_RESULT;
}
+ part_info->fixed= TRUE;
}
else
{
const char *error_str;
- if (unlikely(fix_fields_part_func(thd, part_info->part_expr,
- table, FALSE, TRUE,
- is_create_table_ind)))
- goto end;
+ if (part_info->column_list)
+ {
+ List_iterator<char> it(part_info->part_field_list);
+ if (unlikely(handle_list_of_fields(it, table, part_info, FALSE)))
+ goto end;
+ }
+ else
+ {
+ if (unlikely(fix_fields_part_func(thd, part_info->part_expr,
+ table, FALSE, is_create_table_ind)))
+ goto end;
+ }
+ part_info->fixed= TRUE;
if (part_info->part_type == RANGE_PARTITION)
{
error_str= partition_keywords[PKW_RANGE].str;
- if (unlikely(part_info->check_range_constants()))
+ if (unlikely(part_info->check_range_constants(thd)))
goto end;
}
else if (part_info->part_type == LIST_PARTITION)
{
error_str= partition_keywords[PKW_LIST].str;
- if (unlikely(part_info->check_list_constants()))
+ if (unlikely(part_info->check_list_constants(thd)))
goto end;
}
else
@@ -1728,24 +1831,30 @@ bool fix_partition_func(THD *thd, TABLE *table,
my_error(ER_INCONSISTENT_PARTITION_INFO_ERROR, MYF(0));
goto end;
}
- if (unlikely(part_info->no_parts < 1))
+ if (unlikely(part_info->num_parts < 1))
{
my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), error_str);
goto end;
}
- if (unlikely(part_info->part_expr->result_type() != INT_RESULT))
+ if (unlikely(!part_info->column_list &&
+ part_info->part_expr->result_type() != INT_RESULT))
{
- my_error(ER_PARTITION_FUNC_NOT_ALLOWED_ERROR, MYF(0), part_str);
+ part_info->report_part_expr_error(FALSE);
goto end;
}
}
if (((part_info->part_type != HASH_PARTITION ||
- part_info->list_of_part_fields == FALSE) &&
- check_part_func_fields(part_info->part_field_array, TRUE)) ||
+ part_info->list_of_part_fields == FALSE) &&
+ !part_info->column_list &&
+ check_part_func_fields(part_info->part_field_array, TRUE)) ||
(part_info->list_of_subpart_fields == FALSE &&
part_info->is_sub_partitioned() &&
check_part_func_fields(part_info->subpart_field_array, TRUE)))
{
+ /*
+ Range/List/HASH (but not KEY) and not COLUMNS or HASH subpartitioning
+ with columns in the partitioning expression using unallowed charset.
+ */
my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
goto end;
}
@@ -1764,6 +1873,11 @@ bool fix_partition_func(THD *thd, TABLE *table,
my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
goto end;
}
+ if (unlikely(part_info->check_partition_field_length()))
+ {
+ my_error(ER_PARTITION_FIELDS_TOO_LONG, MYF(0));
+ goto end;
+ }
check_range_capable_PF(table);
set_up_partition_key_maps(table, part_info);
set_up_partition_func_pointers(part_info);
@@ -1786,9 +1900,9 @@ end:
static int add_write(File fptr, const char *buf, uint len)
{
- uint len_written= my_write(fptr, (const uchar*)buf, len, MYF(0));
+ uint ret_code= mysql_file_write(fptr, (const uchar*)buf, len, MYF(MY_FNABP));
- if (likely(len == len_written))
+ if (likely(ret_code == 0))
return 0;
else
return 1;
@@ -1837,14 +1951,8 @@ static int add_begin_parenthesis(File fptr)
static int add_part_key_word(File fptr, const char *key_string)
{
int err= add_string(fptr, key_string);
-
err+= add_space(fptr);
- return err + add_begin_parenthesis(fptr);
-}
-
-static int add_hash(File fptr)
-{
- return add_part_key_word(fptr, partition_keywords[PKW_HASH].str);
+ return err;
}
static int add_partition(File fptr)
@@ -1875,30 +1983,31 @@ static int add_subpartition_by(File fptr)
return err + add_partition_by(fptr);
}
-static int add_key_partition(File fptr, List<char> field_list)
+static int add_part_field_list(File fptr, List<char> field_list)
{
- uint i, no_fields;
- int err;
+ uint i, num_fields;
+ int err= 0;
List_iterator<char> part_it(field_list);
- err= add_part_key_word(fptr, partition_keywords[PKW_KEY].str);
- no_fields= field_list.elements;
+ num_fields= field_list.elements;
i= 0;
- while (i < no_fields)
+ err+= add_begin_parenthesis(fptr);
+ while (i < num_fields)
{
const char *field_str= part_it++;
String field_string("", 0, system_charset_info);
THD *thd= current_thd;
- ulonglong save_options= thd->options;
- thd->options= 0;
+ ulonglong save_options= thd->variables.option_bits;
+ thd->variables.option_bits&= ~OPTION_QUOTE_SHOW_CREATE;
append_identifier(thd, &field_string, field_str,
strlen(field_str));
- thd->options= save_options;
+ thd->variables.option_bits= save_options;
err+= add_string_object(fptr, &field_string);
- if (i != (no_fields-1))
+ if (i != (num_fields-1))
err+= add_comma(fptr);
i++;
}
+ err+= add_end_parenthesis(fptr);
return err;
}
@@ -1907,12 +2016,11 @@ static int add_name_string(File fptr, const char *name)
int err;
String name_string("", 0, system_charset_info);
THD *thd= current_thd;
- ulonglong save_options= thd->options;
-
- thd->options= 0;
+ ulonglong save_options= thd->variables.option_bits;
+ thd->variables.option_bits&= ~OPTION_QUOTE_SHOW_CREATE;
append_identifier(thd, &name_string, name,
strlen(name));
- thd->options= save_options;
+ thd->variables.option_bits= save_options;
err= add_string_object(fptr, &name_string);
return err;
}
@@ -2008,37 +2116,269 @@ static int add_partition_options(File fptr, partition_element *p_elem)
return err + add_engine(fptr,p_elem->engine_type);
}
-static int add_partition_values(File fptr, partition_info *part_info, partition_element *p_elem)
+
+/*
+ Check partition fields for result type and if they need
+ to check the character set.
+
+ SYNOPSIS
+ check_part_field()
+ sql_type Type provided by user
+ field_name Name of field, used for error handling
+ result_type Out value: Result type of field
+ need_cs_check Out value: Do we need character set check
+
+ RETURN VALUES
+ TRUE Error
+ FALSE Ok
+*/
+
+static int check_part_field(enum_field_types sql_type,
+ const char *field_name,
+ Item_result *result_type,
+ bool *need_cs_check)
+{
+ if (sql_type >= MYSQL_TYPE_TINY_BLOB &&
+ sql_type <= MYSQL_TYPE_BLOB)
+ {
+ my_error(ER_BLOB_FIELD_IN_PART_FUNC_ERROR, MYF(0));
+ return TRUE;
+ }
+ switch (sql_type)
+ {
+ case MYSQL_TYPE_TINY:
+ case MYSQL_TYPE_SHORT:
+ case MYSQL_TYPE_LONG:
+ case MYSQL_TYPE_LONGLONG:
+ case MYSQL_TYPE_INT24:
+ *result_type= INT_RESULT;
+ *need_cs_check= FALSE;
+ return FALSE;
+ case MYSQL_TYPE_NEWDATE:
+ case MYSQL_TYPE_DATE:
+ case MYSQL_TYPE_TIME:
+ case MYSQL_TYPE_DATETIME:
+ *result_type= STRING_RESULT;
+ *need_cs_check= TRUE;
+ return FALSE;
+ case MYSQL_TYPE_VARCHAR:
+ case MYSQL_TYPE_STRING:
+ case MYSQL_TYPE_VAR_STRING:
+ *result_type= STRING_RESULT;
+ *need_cs_check= TRUE;
+ return FALSE;
+ case MYSQL_TYPE_NEWDECIMAL:
+ case MYSQL_TYPE_DECIMAL:
+ case MYSQL_TYPE_TIMESTAMP:
+ case MYSQL_TYPE_NULL:
+ case MYSQL_TYPE_FLOAT:
+ case MYSQL_TYPE_DOUBLE:
+ case MYSQL_TYPE_BIT:
+ case MYSQL_TYPE_ENUM:
+ case MYSQL_TYPE_SET:
+ case MYSQL_TYPE_GEOMETRY:
+ goto error;
+ default:
+ goto error;
+ }
+error:
+ my_error(ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD, MYF(0),
+ field_name);
+ return TRUE;
+}
+
+
+/*
+ Find the given field's Create_field object using name of field
+
+ SYNOPSIS
+ get_sql_field()
+ field_name Field name
+ alter_info Info from ALTER TABLE/CREATE TABLE
+
+ RETURN VALUE
+ sql_field Object filled in by parser about field
+ NULL No field found
+*/
+
+static Create_field* get_sql_field(char *field_name,
+ Alter_info *alter_info)
+{
+ List_iterator<Create_field> it(alter_info->create_list);
+ Create_field *sql_field;
+ DBUG_ENTER("get_sql_field");
+
+ while ((sql_field= it++))
+ {
+ if (!(my_strcasecmp(system_charset_info,
+ sql_field->field_name,
+ field_name)))
+ {
+ DBUG_RETURN(sql_field);
+ }
+ }
+ DBUG_RETURN(NULL);
+}
+
+
+static int add_column_list_values(File fptr, partition_info *part_info,
+ part_elem_value *list_value,
+ HA_CREATE_INFO *create_info,
+ Alter_info *alter_info)
+{
+ int err= 0;
+ uint i;
+ List_iterator<char> it(part_info->part_field_list);
+ uint num_elements= part_info->part_field_list.elements;
+ bool use_parenthesis= (part_info->part_type == LIST_PARTITION &&
+ part_info->num_columns > 1U);
+
+ if (use_parenthesis)
+ err+= add_begin_parenthesis(fptr);
+ for (i= 0; i < num_elements; i++)
+ {
+ part_column_list_val *col_val= &list_value->col_val_array[i];
+ char *field_name= it++;
+ if (col_val->max_value)
+ err+= add_string(fptr, partition_keywords[PKW_MAXVALUE].str);
+ else if (col_val->null_value)
+ err+= add_string(fptr, "NULL");
+ else
+ {
+ char buffer[MAX_KEY_LENGTH];
+ String str(buffer, sizeof(buffer), &my_charset_bin);
+ Item *item_expr= col_val->item_expression;
+ if (item_expr->null_value)
+ err+= add_string(fptr, "NULL");
+ else
+ {
+ String *res;
+ CHARSET_INFO *field_cs;
+ bool need_cs_check= FALSE;
+ Item_result result_type= STRING_RESULT;
+
+ /*
+ This function is called at a very early stage, even before
+ we have prepared the sql_field objects. Thus we have to
+ find the proper sql_field object and get the character set
+ from that object.
+ */
+ if (create_info)
+ {
+ Create_field *sql_field;
+
+ if (!(sql_field= get_sql_field(field_name,
+ alter_info)))
+ {
+ my_error(ER_FIELD_NOT_FOUND_PART_ERROR, MYF(0));
+ return 1;
+ }
+ if (check_part_field(sql_field->sql_type,
+ sql_field->field_name,
+ &result_type,
+ &need_cs_check))
+ return 1;
+ if (need_cs_check)
+ field_cs= get_sql_field_charset(sql_field, create_info);
+ else
+ field_cs= NULL;
+ }
+ else
+ {
+ Field *field= part_info->part_field_array[i];
+ result_type= field->result_type();
+ if (check_part_field(field->real_type(),
+ field->field_name,
+ &result_type,
+ &need_cs_check))
+ return 1;
+ DBUG_ASSERT(result_type == field->result_type());
+ if (need_cs_check)
+ field_cs= field->charset();
+ else
+ field_cs= NULL;
+ }
+ if (result_type != item_expr->result_type())
+ {
+ my_error(ER_WRONG_TYPE_COLUMN_VALUE_ERROR, MYF(0));
+ return 1;
+ }
+ if (field_cs && field_cs != item_expr->collation.collation)
+ {
+ if (!(item_expr= convert_charset_partition_constant(item_expr,
+ field_cs)))
+ {
+ my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
+ return 1;
+ }
+ }
+ {
+ String val_conv;
+ val_conv.set_charset(system_charset_info);
+ res= item_expr->val_str(&str);
+ if (get_cs_converted_part_value_from_string(current_thd,
+ item_expr, res,
+ &val_conv, field_cs,
+ (bool)(alter_info != NULL)))
+ return 1;
+ err+= add_string_object(fptr, &val_conv);
+ }
+ }
+ }
+ if (i != (num_elements - 1))
+ err+= add_string(fptr, comma_str);
+ }
+ if (use_parenthesis)
+ err+= add_end_parenthesis(fptr);
+ return err;
+}
+
+static int add_partition_values(File fptr, partition_info *part_info,
+ partition_element *p_elem,
+ HA_CREATE_INFO *create_info,
+ Alter_info *alter_info)
{
int err= 0;
if (part_info->part_type == RANGE_PARTITION)
{
err+= add_string(fptr, " VALUES LESS THAN ");
- if (!p_elem->max_value)
+ if (part_info->column_list)
{
+ List_iterator<part_elem_value> list_val_it(p_elem->list_val_list);
+ part_elem_value *list_value= list_val_it++;
err+= add_begin_parenthesis(fptr);
- if (p_elem->signed_flag)
- err+= add_int(fptr, p_elem->range_value);
- else
- err+= add_uint(fptr, p_elem->range_value);
+ err+= add_column_list_values(fptr, part_info, list_value,
+ create_info, alter_info);
err+= add_end_parenthesis(fptr);
}
else
- err+= add_string(fptr, partition_keywords[PKW_MAXVALUE].str);
+ {
+ if (!p_elem->max_value)
+ {
+ err+= add_begin_parenthesis(fptr);
+ if (p_elem->signed_flag)
+ err+= add_int(fptr, p_elem->range_value);
+ else
+ err+= add_uint(fptr, p_elem->range_value);
+ err+= add_end_parenthesis(fptr);
+ }
+ else
+ err+= add_string(fptr, partition_keywords[PKW_MAXVALUE].str);
+ }
}
else if (part_info->part_type == LIST_PARTITION)
{
uint i;
List_iterator<part_elem_value> list_val_it(p_elem->list_val_list);
err+= add_string(fptr, " VALUES IN ");
- uint no_items= p_elem->list_val_list.elements;
+ uint num_items= p_elem->list_val_list.elements;
err+= add_begin_parenthesis(fptr);
if (p_elem->has_null_value)
{
err+= add_string(fptr, "NULL");
- if (no_items == 0)
+ if (num_items == 0)
{
err+= add_end_parenthesis(fptr);
goto end;
@@ -2050,13 +2390,19 @@ static int add_partition_values(File fptr, partition_info *part_info, partition_
{
part_elem_value *list_value= list_val_it++;
- if (!list_value->unsigned_flag)
- err+= add_int(fptr, list_value->value);
+ if (part_info->column_list)
+ err+= add_column_list_values(fptr, part_info, list_value,
+ create_info, alter_info);
else
- err+= add_uint(fptr, list_value->value);
- if (i != (no_items-1))
+ {
+ if (!list_value->unsigned_flag)
+ err+= add_int(fptr, list_value->value);
+ else
+ err+= add_uint(fptr, list_value->value);
+ }
+ if (i != (num_items-1))
err+= add_comma(fptr);
- } while (++i < no_items);
+ } while (++i < num_items);
err+= add_end_parenthesis(fptr);
}
end:
@@ -2075,6 +2421,8 @@ end:
use_sql_alloc Allocate buffer from sql_alloc if true
otherwise use my_malloc
show_partition_options Should we display partition options
+ create_info Info generated by parser
+ alter_info Info generated by parser
RETURN VALUES
NULL error
@@ -2103,9 +2451,11 @@ end:
char *generate_partition_syntax(partition_info *part_info,
uint *buf_length,
bool use_sql_alloc,
- bool show_partition_options)
+ bool show_partition_options,
+ HA_CREATE_INFO *create_info,
+ Alter_info *alter_info)
{
- uint i,j, tot_no_parts, no_subparts;
+ uint i,j, tot_num_parts, num_subparts;
partition_element *part_elem;
ulonglong buffer_length;
char path[FN_REFLEN];
@@ -2136,27 +2486,37 @@ char *generate_partition_syntax(partition_info *part_info,
if (part_info->linear_hash_ind)
err+= add_string(fptr, partition_keywords[PKW_LINEAR].str);
if (part_info->list_of_part_fields)
- err+= add_key_partition(fptr, part_info->part_field_list);
+ {
+ err+= add_part_key_word(fptr, partition_keywords[PKW_KEY].str);
+ err+= add_part_field_list(fptr, part_info->part_field_list);
+ }
else
- err+= add_hash(fptr);
+ err+= add_part_key_word(fptr, partition_keywords[PKW_HASH].str);
break;
default:
DBUG_ASSERT(0);
/* We really shouldn't get here, no use in continuing from here */
- my_error(ER_OUT_OF_RESOURCES, MYF(0));
- current_thd->fatal_error();
+ my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATALERROR));
DBUG_RETURN(NULL);
}
if (part_info->part_expr)
+ {
+ err+= add_begin_parenthesis(fptr);
err+= add_string_len(fptr, part_info->part_func_string,
part_info->part_func_len);
- err+= add_end_parenthesis(fptr);
- if ((!part_info->use_default_no_partitions) &&
+ err+= add_end_parenthesis(fptr);
+ }
+ else if (part_info->column_list)
+ {
+ err+= add_string(fptr, partition_keywords[PKW_COLUMNS].str);
+ err+= add_part_field_list(fptr, part_info->part_field_list);
+ }
+ if ((!part_info->use_default_num_partitions) &&
part_info->use_default_partitions)
{
err+= add_string(fptr, "\n");
err+= add_string(fptr, "PARTITIONS ");
- err+= add_int(fptr, part_info->no_parts);
+ err+= add_int(fptr, part_info->num_parts);
}
if (part_info->is_sub_partitioned())
{
@@ -2166,23 +2526,29 @@ char *generate_partition_syntax(partition_info *part_info,
if (part_info->linear_hash_ind)
err+= add_string(fptr, partition_keywords[PKW_LINEAR].str);
if (part_info->list_of_subpart_fields)
- err+= add_key_partition(fptr, part_info->subpart_field_list);
+ {
+ add_part_key_word(fptr, partition_keywords[PKW_KEY].str);
+ add_part_field_list(fptr, part_info->subpart_field_list);
+ }
else
- err+= add_hash(fptr);
+ err+= add_part_key_word(fptr, partition_keywords[PKW_HASH].str);
if (part_info->subpart_expr)
+ {
+ err+= add_begin_parenthesis(fptr);
err+= add_string_len(fptr, part_info->subpart_func_string,
part_info->subpart_func_len);
- err+= add_end_parenthesis(fptr);
- if ((!part_info->use_default_no_subpartitions) &&
+ err+= add_end_parenthesis(fptr);
+ }
+ if ((!part_info->use_default_num_subpartitions) &&
part_info->use_default_subpartitions)
{
err+= add_string(fptr, "\n");
err+= add_string(fptr, "SUBPARTITIONS ");
- err+= add_int(fptr, part_info->no_subparts);
+ err+= add_int(fptr, part_info->num_subparts);
}
}
- tot_no_parts= part_info->partitions.elements;
- no_subparts= part_info->no_subparts;
+ tot_num_parts= part_info->partitions.elements;
+ num_subparts= part_info->num_subparts;
if (!part_info->use_default_partitions)
{
@@ -2205,7 +2571,8 @@ char *generate_partition_syntax(partition_info *part_info,
first= FALSE;
err+= add_partition(fptr);
err+= add_name_string(fptr, part_elem->partition_name);
- err+= add_partition_values(fptr, part_info, part_elem);
+ err+= add_partition_values(fptr, part_info, part_elem,
+ create_info, alter_info);
if (!part_info->is_sub_partitioned() ||
part_info->use_default_subpartitions)
{
@@ -2226,7 +2593,7 @@ char *generate_partition_syntax(partition_info *part_info,
err+= add_name_string(fptr, part_elem->partition_name);
if (show_partition_options)
err+= add_partition_options(fptr, part_elem);
- if (j != (no_subparts-1))
+ if (j != (num_subparts-1))
{
err+= add_comma(fptr);
err+= add_string(fptr, "\n");
@@ -2235,19 +2602,20 @@ char *generate_partition_syntax(partition_info *part_info,
}
else
err+= add_end_parenthesis(fptr);
- } while (++j < no_subparts);
+ } while (++j < num_subparts);
}
}
- if (i == (tot_no_parts-1))
+ if (i == (tot_num_parts-1))
err+= add_end_parenthesis(fptr);
- } while (++i < tot_no_parts);
+ } while (++i < tot_num_parts);
}
if (err)
goto close_file;
- buffer_length= my_seek(fptr, 0L,MY_SEEK_END,MYF(0));
+ buffer_length= mysql_file_seek(fptr, 0L, MY_SEEK_END, MYF(0));
if (unlikely(buffer_length == MY_FILEPOS_ERROR))
goto close_file;
- if (unlikely(my_seek(fptr, 0L, MY_SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR))
+ if (unlikely(mysql_file_seek(fptr, 0L, MY_SEEK_SET, MYF(0))
+ == MY_FILEPOS_ERROR))
goto close_file;
*buf_length= (uint)buffer_length;
if (use_sql_alloc)
@@ -2257,10 +2625,10 @@ char *generate_partition_syntax(partition_info *part_info,
if (!buf)
goto close_file;
- if (unlikely(my_read(fptr, (uchar*)buf, *buf_length, MYF(MY_FNABP))))
+ if (unlikely(mysql_file_read(fptr, (uchar*)buf, *buf_length, MYF(MY_FNABP))))
{
if (!use_sql_alloc)
- my_free(buf, MYF(0));
+ my_free(buf);
else
buf= NULL;
}
@@ -2268,7 +2636,7 @@ char *generate_partition_syntax(partition_info *part_info,
buf[*buf_length]= 0;
close_file:
- my_close(fptr, MYF(0));
+ mysql_file_close(fptr, MYF(0));
DBUG_RETURN(buf);
}
@@ -2387,14 +2755,14 @@ static uint32 calculate_key_value(Field **field_array)
get_part_id_for_sub()
loc_part_id Local partition id
sub_part_id Subpartition id
- no_subparts Number of subparts
+ num_subparts Number of subparts
*/
inline
static uint32 get_part_id_for_sub(uint32 loc_part_id, uint32 sub_part_id,
- uint no_subparts)
+ uint num_subparts)
{
- return (uint32)((loc_part_id * no_subparts) + sub_part_id);
+ return (uint32)((loc_part_id * num_subparts) + sub_part_id);
}
@@ -2403,7 +2771,7 @@ static uint32 get_part_id_for_sub(uint32 loc_part_id, uint32 sub_part_id,
SYNOPSIS
get_part_id_hash()
- no_parts Number of hash partitions
+ num_parts Number of hash partitions
part_expr Item tree of hash function
out:part_id The returned partition id
out:func_value Value of hash function
@@ -2413,7 +2781,7 @@ static uint32 get_part_id_for_sub(uint32 loc_part_id, uint32 sub_part_id,
FALSE Success
*/
-static int get_part_id_hash(uint no_parts,
+static int get_part_id_hash(uint num_parts,
Item *part_expr,
uint32 *part_id,
longlong *func_value)
@@ -2424,7 +2792,7 @@ static int get_part_id_hash(uint no_parts,
if (part_val_int(part_expr, func_value))
DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
- int_hash_id= *func_value % no_parts;
+ int_hash_id= *func_value % num_parts;
*part_id= int_hash_id < 0 ? (uint32) -int_hash_id : (uint32) int_hash_id;
DBUG_RETURN(FALSE);
@@ -2438,7 +2806,7 @@ static int get_part_id_hash(uint no_parts,
get_part_id_linear_hash()
part_info A reference to the partition_info struct where all the
desired information is given
- no_parts Number of hash partitions
+ num_parts Number of hash partitions
part_expr Item tree of hash function
out:part_id The returned partition id
out:func_value Value of hash function
@@ -2449,7 +2817,7 @@ static int get_part_id_hash(uint no_parts,
*/
static int get_part_id_linear_hash(partition_info *part_info,
- uint no_parts,
+ uint num_parts,
Item *part_expr,
uint32 *part_id,
longlong *func_value)
@@ -2461,7 +2829,7 @@ static int get_part_id_linear_hash(partition_info *part_info,
*part_id= get_part_id_from_linear_hash(*func_value,
part_info->linear_hash_mask,
- no_parts);
+ num_parts);
DBUG_RETURN(FALSE);
}
@@ -2472,7 +2840,7 @@ static int get_part_id_linear_hash(partition_info *part_info,
SYNOPSIS
get_part_id_key()
field_array Array of fields for PARTTION KEY
- no_parts Number of KEY partitions
+ num_parts Number of KEY partitions
RETURN VALUE
Calculated partition id
@@ -2480,12 +2848,12 @@ static int get_part_id_linear_hash(partition_info *part_info,
inline
static uint32 get_part_id_key(Field **field_array,
- uint no_parts,
+ uint num_parts,
longlong *func_value)
{
DBUG_ENTER("get_part_id_key");
*func_value= calculate_key_value(field_array);
- DBUG_RETURN((uint32) (*func_value % no_parts));
+ DBUG_RETURN((uint32) (*func_value % num_parts));
}
@@ -2497,7 +2865,7 @@ static uint32 get_part_id_key(Field **field_array,
part_info A reference to the partition_info struct where all the
desired information is given
field_array Array of fields for PARTTION KEY
- no_parts Number of KEY partitions
+ num_parts Number of KEY partitions
RETURN VALUE
Calculated partition id
@@ -2506,15 +2874,15 @@ static uint32 get_part_id_key(Field **field_array,
inline
static uint32 get_part_id_linear_key(partition_info *part_info,
Field **field_array,
- uint no_parts,
+ uint num_parts,
longlong *func_value)
{
- DBUG_ENTER("get_partition_id_linear_key");
+ DBUG_ENTER("get_part_id_linear_key");
*func_value= calculate_key_value(field_array);
DBUG_RETURN(get_part_id_from_linear_hash(*func_value,
part_info->linear_hash_mask,
- no_parts));
+ num_parts));
}
/*
@@ -2548,7 +2916,8 @@ static void copy_to_part_field_buffers(Field **ptr,
if (!field->maybe_null() || !field->is_null())
{
CHARSET_INFO *cs= ((Field_str*)field)->charset();
- uint len= field->pack_length();
+ uint max_len= field->pack_length();
+ uint data_len= field->data_length();
uchar *field_buf= *field_bufs;
/*
We only use the field buffer for VARCHAR and CHAR strings
@@ -2560,17 +2929,17 @@ static void copy_to_part_field_buffers(Field **ptr,
if (field->type() == MYSQL_TYPE_VARCHAR)
{
uint len_bytes= ((Field_varstring*)field)->length_bytes;
- my_strnxfrm(cs, field_buf + len_bytes, (len - len_bytes),
- field->ptr + len_bytes, field->field_length);
+ my_strnxfrm(cs, field_buf + len_bytes, max_len,
+ field->ptr + len_bytes, data_len);
if (len_bytes == 1)
- *field_buf= (uchar) field->field_length;
+ *field_buf= (uchar) data_len;
else
- int2store(field_buf, field->field_length);
+ int2store(field_buf, data_len);
}
else
{
- my_strnxfrm(cs, field_buf, len,
- field->ptr, field->field_length);
+ my_strnxfrm(cs, field_buf, max_len,
+ field->ptr, max_len);
}
field->ptr= field_buf;
}
@@ -2600,6 +2969,44 @@ static void restore_part_field_pointers(Field **ptr, uchar **restore_ptr)
return;
}
+/*
+ This function is used to calculate the partition id where all partition
+ fields have been prepared to point to a record where the partition field
+ values are bound.
+
+ SYNOPSIS
+ get_partition_id()
+ part_info A reference to the partition_info struct where all the
+ desired information is given
+ out:part_id The partition id is returned through this pointer
+ out:func_value Value of partition function (longlong)
+
+ RETURN VALUE
+ part_id Partition id of partition that would contain
+ row with given values of PF-fields
+ HA_ERR_NO_PARTITION_FOUND The fields of the partition function didn't
+ fit into any partition and thus the values of
+ the PF-fields are not allowed.
+
+ DESCRIPTION
+ A routine used from write_row, update_row and delete_row from any
+ handler supporting partitioning. It is also a support routine for
+ get_partition_set used to find the set of partitions needed to scan
+ for a certain index scan or full table scan.
+
+ It is actually 9 different variants of this function which are called
+ through a function pointer.
+
+ get_partition_id_list
+ get_partition_id_list_col
+ get_partition_id_range
+ get_partition_id_range_col
+ get_partition_id_hash_nosub
+ get_partition_id_key_nosub
+ get_partition_id_linear_hash_nosub
+ get_partition_id_linear_key_nosub
+ get_partition_id_with_sub
+*/
/*
This function is used to calculate the main partition to use in the case of
@@ -2621,67 +3028,26 @@ static void restore_part_field_pointers(Field **ptr, uchar **restore_ptr)
DESCRIPTION
- It is actually 6 different variants of this function which are called
+ It is actually 8 different variants of this function which are called
through a function pointer.
get_partition_id_list
+ get_partition_id_list_col
get_partition_id_range
+ get_partition_id_range_col
get_partition_id_hash_nosub
get_partition_id_key_nosub
get_partition_id_linear_hash_nosub
get_partition_id_linear_key_nosub
*/
-static int get_part_id_charset_func_subpart(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- int res;
- copy_to_part_field_buffers(part_info->subpart_charset_field_array,
- part_info->subpart_field_buffers,
- part_info->restore_subpart_field_ptrs);
- res= part_info->get_partition_id_charset(part_info, part_id, func_value);
- restore_part_field_pointers(part_info->subpart_charset_field_array,
- part_info->restore_subpart_field_ptrs);
- return res;
-}
-
-
static int get_part_id_charset_func_part(partition_info *part_info,
uint32 *part_id,
longlong *func_value)
{
int res;
- copy_to_part_field_buffers(part_info->part_charset_field_array,
- part_info->part_field_buffers,
- part_info->restore_part_field_ptrs);
- res= part_info->get_partition_id_charset(part_info, part_id, func_value);
- restore_part_field_pointers(part_info->part_charset_field_array,
- part_info->restore_part_field_ptrs);
- return res;
-}
-
+ DBUG_ENTER("get_part_id_charset_func_part");
-static int get_part_id_charset_func_all(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- int res;
- copy_to_part_field_buffers(part_info->full_part_field_array,
- part_info->full_part_field_buffers,
- part_info->restore_full_part_field_ptrs);
- res= part_info->get_partition_id_charset(part_info, part_id, func_value);
- restore_part_field_pointers(part_info->full_part_field_array,
- part_info->restore_full_part_field_ptrs);
- return res;
-}
-
-
-static int get_part_part_id_charset_func(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- int res;
copy_to_part_field_buffers(part_info->part_charset_field_array,
part_info->part_field_buffers,
part_info->restore_part_field_ptrs);
@@ -2689,21 +3055,58 @@ static int get_part_part_id_charset_func(partition_info *part_info,
part_id, func_value);
restore_part_field_pointers(part_info->part_charset_field_array,
part_info->restore_part_field_ptrs);
- return res;
+ DBUG_RETURN(res);
}
-static int get_subpart_id_charset_func(partition_info *part_info,
- uint32 *part_id)
+static int get_part_id_charset_func_subpart(partition_info *part_info,
+ uint32 *part_id)
{
int res;
+ DBUG_ENTER("get_part_id_charset_func_subpart");
+
copy_to_part_field_buffers(part_info->subpart_charset_field_array,
part_info->subpart_field_buffers,
part_info->restore_subpart_field_ptrs);
res= part_info->get_subpartition_id_charset(part_info, part_id);
restore_part_field_pointers(part_info->subpart_charset_field_array,
part_info->restore_subpart_field_ptrs);
- return res;
+ DBUG_RETURN(res);
+}
+
+int get_partition_id_list_col(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ part_column_list_val *list_col_array= part_info->list_col_array;
+ uint num_columns= part_info->part_field_list.elements;
+ int list_index, cmp;
+ int min_list_index= 0;
+ int max_list_index= part_info->num_list_values - 1;
+ DBUG_ENTER("get_partition_id_list_col");
+
+ while (max_list_index >= min_list_index)
+ {
+ list_index= (max_list_index + min_list_index) >> 1;
+ cmp= cmp_rec_and_tuple(list_col_array + list_index*num_columns,
+ num_columns);
+ if (cmp > 0)
+ min_list_index= list_index + 1;
+ else if (cmp < 0)
+ {
+ if (!list_index)
+ goto notfound;
+ max_list_index= list_index - 1;
+ }
+ else
+ {
+ *part_id= (uint32)list_col_array[list_index*num_columns].partition_id;
+ DBUG_RETURN(0);
+ }
+ }
+notfound:
+ *part_id= 0;
+ DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
}
@@ -2714,7 +3117,7 @@ int get_partition_id_list(partition_info *part_info,
LIST_PART_ENTRY *list_array= part_info->list_array;
int list_index;
int min_list_index= 0;
- int max_list_index= part_info->no_list_values - 1;
+ int max_list_index= part_info->num_list_values - 1;
longlong part_func_value;
int error= part_val_int(part_info->part_expr, &part_func_value);
longlong list_value;
@@ -2784,7 +3187,7 @@ notfound:
index idx.
The function returns first number idx, such that
list_array[idx].list_value is NOT contained within the passed interval.
- If all array elements are contained, part_info->no_list_values is
+ If all array elements are contained, part_info->num_list_values is
returned.
NOTE
@@ -2798,6 +3201,44 @@ notfound:
The edge of corresponding sub-array of part_info->list_array
*/
+uint32 get_partition_id_cols_list_for_endpoint(partition_info *part_info,
+ bool left_endpoint,
+ bool include_endpoint,
+ uint32 nparts)
+{
+ part_column_list_val *list_col_array= part_info->list_col_array;
+ uint num_columns= part_info->part_field_list.elements;
+ int list_index, cmp;
+ uint min_list_index= 0;
+ uint max_list_index= part_info->num_list_values - 1;
+ bool tailf= !(left_endpoint ^ include_endpoint);
+ DBUG_ENTER("get_partition_id_cols_list_for_endpoint");
+
+ do
+ {
+ list_index= (max_list_index + min_list_index) >> 1;
+ cmp= cmp_rec_and_tuple_prune(list_col_array + list_index*num_columns,
+ nparts, tailf);
+ if (cmp > 0)
+ min_list_index= list_index + 1;
+ else if (cmp < 0)
+ {
+ if (!list_index)
+ goto notfound;
+ max_list_index= list_index - 1;
+ }
+ else
+ {
+ DBUG_RETURN(list_index + test(!tailf));
+ }
+ } while (max_list_index >= min_list_index);
+ if (cmp > 0)
+ list_index++;
+notfound:
+ DBUG_RETURN(list_index);
+}
+
+
uint32 get_list_array_idx_for_endpoint_charset(partition_info *part_info,
bool left_endpoint,
bool include_endpoint)
@@ -2819,7 +3260,7 @@ uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
{
LIST_PART_ENTRY *list_array= part_info->list_array;
uint list_index;
- uint min_list_index= 0, max_list_index= part_info->no_list_values - 1;
+ uint min_list_index= 0, max_list_index= part_info->num_list_values - 1;
longlong list_value;
/* Get the partitioning function value for the endpoint */
longlong part_func_value=
@@ -2849,7 +3290,7 @@ uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
if (unsigned_flag)
part_func_value-= 0x8000000000000000ULL;
- DBUG_ASSERT(part_info->no_list_values);
+ DBUG_ASSERT(part_info->num_list_values);
do
{
list_index= (max_list_index + min_list_index) >> 1;
@@ -2874,12 +3315,49 @@ notfound:
}
+int get_partition_id_range_col(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ part_column_list_val *range_col_array= part_info->range_col_array;
+ uint num_columns= part_info->part_field_list.elements;
+ uint max_partition= part_info->num_parts - 1;
+ uint min_part_id= 0;
+ uint max_part_id= max_partition;
+ uint loc_part_id;
+ DBUG_ENTER("get_partition_id_range_col");
+
+ while (max_part_id > min_part_id)
+ {
+ loc_part_id= (max_part_id + min_part_id + 1) >> 1;
+ if (cmp_rec_and_tuple(range_col_array + loc_part_id*num_columns,
+ num_columns) >= 0)
+ min_part_id= loc_part_id + 1;
+ else
+ max_part_id= loc_part_id - 1;
+ }
+ loc_part_id= max_part_id;
+ if (loc_part_id != max_partition)
+ if (cmp_rec_and_tuple(range_col_array + loc_part_id*num_columns,
+ num_columns) >= 0)
+ loc_part_id++;
+ *part_id= (uint32)loc_part_id;
+ if (loc_part_id == max_partition &&
+ (cmp_rec_and_tuple(range_col_array + loc_part_id*num_columns,
+ num_columns) >= 0))
+ DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+
+ DBUG_PRINT("exit",("partition: %d", *part_id));
+ DBUG_RETURN(0);
+}
+
+
int get_partition_id_range(partition_info *part_info,
uint32 *part_id,
longlong *func_value)
{
longlong *range_array= part_info->range_int_array;
- uint max_partition= part_info->no_parts - 1;
+ uint max_partition= part_info->num_parts - 1;
uint min_part_id= 0;
uint max_part_id= max_partition;
uint loc_part_id;
@@ -2954,7 +3432,7 @@ int get_partition_id_range(partition_info *part_info,
represented by range_int_array[idx] has EMPTY intersection with the
passed interval.
If the interval represented by the last array element has non-empty
- intersection with the passed interval, part_info->no_parts is
+ intersection with the passed interval, part_info->num_parts is
returned.
RETURN
@@ -2983,7 +3461,7 @@ uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
{
longlong *range_array= part_info->range_int_array;
longlong part_end_val;
- uint max_partition= part_info->no_parts - 1;
+ uint max_partition= part_info->num_parts - 1;
uint min_part_id= 0, max_part_id= max_partition, loc_part_id;
/* Get the partitioning function value for the endpoint */
longlong part_func_value=
@@ -3071,7 +3549,7 @@ int get_partition_id_hash_nosub(partition_info *part_info,
uint32 *part_id,
longlong *func_value)
{
- return get_part_id_hash(part_info->no_parts, part_info->part_expr,
+ return get_part_id_hash(part_info->num_parts, part_info->part_expr,
part_id, func_value);
}
@@ -3080,7 +3558,7 @@ int get_partition_id_linear_hash_nosub(partition_info *part_info,
uint32 *part_id,
longlong *func_value)
{
- return get_part_id_linear_hash(part_info, part_info->no_parts,
+ return get_part_id_linear_hash(part_info, part_info->num_parts,
part_info->part_expr, part_id, func_value);
}
@@ -3090,232 +3568,44 @@ int get_partition_id_key_nosub(partition_info *part_info,
longlong *func_value)
{
*part_id= get_part_id_key(part_info->part_field_array,
- part_info->no_parts, func_value);
+ part_info->num_parts, func_value);
return 0;
}
int get_partition_id_linear_key_nosub(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
+ uint32 *part_id,
+ longlong *func_value)
{
*part_id= get_part_id_linear_key(part_info,
part_info->part_field_array,
- part_info->no_parts, func_value);
+ part_info->num_parts, func_value);
return 0;
}
-int get_partition_id_range_sub_hash(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- uint32 loc_part_id, sub_part_id;
- uint no_subparts;
- longlong local_func_value;
- int error;
- DBUG_ENTER("get_partition_id_range_sub_hash");
- LINT_INIT(loc_part_id);
- LINT_INIT(sub_part_id);
-
- if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
- func_value))))
- {
- DBUG_RETURN(error);
- }
- no_subparts= part_info->no_subparts;
- if (unlikely((error= get_part_id_hash(no_subparts, part_info->subpart_expr,
- &sub_part_id, &local_func_value))))
- {
- DBUG_RETURN(error);
- }
-
- *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(0);
-}
-
-
-int get_partition_id_range_sub_linear_hash(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- uint32 loc_part_id, sub_part_id;
- uint no_subparts;
- longlong local_func_value;
- int error;
- DBUG_ENTER("get_partition_id_range_sub_linear_hash");
- LINT_INIT(loc_part_id);
- LINT_INIT(sub_part_id);
-
- if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
- func_value))))
- {
- DBUG_RETURN(error);
- }
- no_subparts= part_info->no_subparts;
- if (unlikely((error= get_part_id_linear_hash(part_info, no_subparts,
- part_info->subpart_expr,
- &sub_part_id,
- &local_func_value))))
- {
- DBUG_RETURN(error);
- }
-
- *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(0);
-}
-
-
-int get_partition_id_range_sub_key(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- uint32 loc_part_id, sub_part_id;
- uint no_subparts;
- longlong local_func_value;
- int error;
- DBUG_ENTER("get_partition_id_range_sub_key");
- LINT_INIT(loc_part_id);
-
- if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
- func_value))))
- {
- DBUG_RETURN(error);
- }
- no_subparts= part_info->no_subparts;
- sub_part_id= get_part_id_key(part_info->subpart_field_array,
- no_subparts, &local_func_value);
- *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(0);
-}
-
-
-int get_partition_id_range_sub_linear_key(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- uint32 loc_part_id, sub_part_id;
- uint no_subparts;
- longlong local_func_value;
- int error;
- DBUG_ENTER("get_partition_id_range_sub_linear_key");
- LINT_INIT(loc_part_id);
-
- if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
- func_value))))
- {
- DBUG_RETURN(error);
- }
- no_subparts= part_info->no_subparts;
- sub_part_id= get_part_id_linear_key(part_info,
- part_info->subpart_field_array,
- no_subparts, &local_func_value);
- *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(0);
-}
-
-
-int get_partition_id_list_sub_hash(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- uint32 loc_part_id, sub_part_id;
- uint no_subparts;
- longlong local_func_value;
- int error;
- DBUG_ENTER("get_partition_id_list_sub_hash");
- LINT_INIT(sub_part_id);
-
- if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
- func_value))))
- {
- DBUG_RETURN(error);
- }
- no_subparts= part_info->no_subparts;
- if (unlikely((error= get_part_id_hash(no_subparts, part_info->subpart_expr,
- &sub_part_id, &local_func_value))))
- {
- DBUG_RETURN(error);
- }
-
- *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(0);
-}
-
-
-int get_partition_id_list_sub_linear_hash(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- uint32 loc_part_id, sub_part_id;
- uint no_subparts;
- longlong local_func_value;
- int error;
- DBUG_ENTER("get_partition_id_list_sub_linear_hash");
- LINT_INIT(sub_part_id);
-
- if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
- func_value))))
- {
- DBUG_RETURN(error);
- }
- no_subparts= part_info->no_subparts;
- if (unlikely((error= get_part_id_linear_hash(part_info, no_subparts,
- part_info->subpart_expr,
- &sub_part_id,
- &local_func_value))))
- {
- DBUG_RETURN(error);
- }
-
- *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(0);
-}
-
-
-int get_partition_id_list_sub_key(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
+int get_partition_id_with_sub(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
{
uint32 loc_part_id, sub_part_id;
- uint no_subparts;
- longlong local_func_value;
+ uint num_subparts;
int error;
- DBUG_ENTER("get_partition_id_range_sub_key");
+ DBUG_ENTER("get_partition_id_with_sub");
- if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
- func_value))))
+ if (unlikely((error= part_info->get_part_partition_id(part_info,
+ &loc_part_id,
+ func_value))))
{
DBUG_RETURN(error);
}
- no_subparts= part_info->no_subparts;
- sub_part_id= get_part_id_key(part_info->subpart_field_array,
- no_subparts, &local_func_value);
- *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(0);
-}
-
-
-int get_partition_id_list_sub_linear_key(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- uint32 loc_part_id, sub_part_id;
- uint no_subparts;
- longlong local_func_value;
- int error;
- DBUG_ENTER("get_partition_id_list_sub_linear_key");
-
- if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
- func_value))))
+ num_subparts= part_info->num_subparts;
+ if (unlikely((error= part_info->get_subpartition_id(part_info,
+ &sub_part_id))))
{
DBUG_RETURN(error);
- }
- no_subparts= part_info->no_subparts;
- sub_part_id= get_part_id_linear_key(part_info,
- part_info->subpart_field_array,
- no_subparts, &local_func_value);
- *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
+ }
+ *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, num_subparts);
DBUG_RETURN(0);
}
@@ -3348,7 +3638,7 @@ int get_partition_id_hash_sub(partition_info *part_info,
uint32 *part_id)
{
longlong func_value;
- return get_part_id_hash(part_info->no_subparts, part_info->subpart_expr,
+ return get_part_id_hash(part_info->num_subparts, part_info->subpart_expr,
part_id, &func_value);
}
@@ -3357,7 +3647,7 @@ int get_partition_id_linear_hash_sub(partition_info *part_info,
uint32 *part_id)
{
longlong func_value;
- return get_part_id_linear_hash(part_info, part_info->no_subparts,
+ return get_part_id_linear_hash(part_info, part_info->num_subparts,
part_info->subpart_expr, part_id,
&func_value);
}
@@ -3368,7 +3658,7 @@ int get_partition_id_key_sub(partition_info *part_info,
{
longlong func_value;
*part_id= get_part_id_key(part_info->subpart_field_array,
- part_info->no_subparts, &func_value);
+ part_info->num_subparts, &func_value);
return FALSE;
}
@@ -3379,7 +3669,7 @@ int get_partition_id_linear_key_sub(partition_info *part_info,
longlong func_value;
*part_id= get_part_id_linear_key(part_info,
part_info->subpart_field_array,
- part_info->no_subparts, &func_value);
+ part_info->num_subparts, &func_value);
return FALSE;
}
@@ -3678,18 +3968,18 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
const key_range *key_spec, part_id_range *part_spec)
{
partition_info *part_info= table->part_info;
- uint no_parts= part_info->get_tot_partitions();
+ uint num_parts= part_info->get_tot_partitions();
uint i, part_id;
- uint sub_part= no_parts;
- uint32 part_part= no_parts;
+ uint sub_part= num_parts;
+ uint32 part_part= num_parts;
KEY *key_info= NULL;
bool found_part_field= FALSE;
DBUG_ENTER("get_partition_set");
part_spec->start_part= 0;
- part_spec->end_part= no_parts - 1;
+ part_spec->end_part= num_parts - 1;
if ((index < MAX_KEY) &&
- key_spec->flag == (uint)HA_READ_KEY_EXACT &&
+ key_spec && key_spec->flag == (uint)HA_READ_KEY_EXACT &&
part_info->some_fields_in_PF.is_set(index))
{
key_info= table->key_info+index;
@@ -3724,7 +4014,7 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
{
if (get_sub_part_id_from_key(table, buf, key_info, key_spec, &sub_part))
{
- part_spec->start_part= no_parts;
+ part_spec->start_part= num_parts;
DBUG_VOID_RETURN;
}
}
@@ -3738,7 +4028,7 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
allowed values. Thus it is certain that the result of this
scan will be empty.
*/
- part_spec->start_part= no_parts;
+ part_spec->start_part= num_parts;
DBUG_VOID_RETURN;
}
}
@@ -3776,7 +4066,7 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
{
if (get_sub_part_id_from_key(table, buf, key_info, key_spec, &sub_part))
{
- part_spec->start_part= no_parts;
+ part_spec->start_part= num_parts;
clear_indicator_in_key_fields(key_info);
DBUG_VOID_RETURN;
}
@@ -3785,7 +4075,7 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
{
if (get_part_id_from_key(table,buf,key_info,key_spec,&part_part))
{
- part_spec->start_part= no_parts;
+ part_spec->start_part= num_parts;
clear_indicator_in_key_fields(key_info);
DBUG_VOID_RETURN;
}
@@ -3806,29 +4096,29 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
nothing or we have discovered a range of partitions with possible holes
in it. We need a bitvector to further the work here.
*/
- if (!(part_part == no_parts && sub_part == no_parts))
+ if (!(part_part == num_parts && sub_part == num_parts))
{
/*
We can only arrive here if we are using subpartitioning.
*/
- if (part_part != no_parts)
+ if (part_part != num_parts)
{
/*
We know the top partition and need to scan all underlying
subpartitions. This is a range without holes.
*/
- DBUG_ASSERT(sub_part == no_parts);
- part_spec->start_part= part_part * part_info->no_subparts;
- part_spec->end_part= part_spec->start_part+part_info->no_subparts - 1;
+ DBUG_ASSERT(sub_part == num_parts);
+ part_spec->start_part= part_part * part_info->num_subparts;
+ part_spec->end_part= part_spec->start_part+part_info->num_subparts - 1;
}
else
{
- DBUG_ASSERT(sub_part != no_parts);
+ DBUG_ASSERT(sub_part != num_parts);
part_spec->start_part= sub_part;
part_spec->end_part=sub_part+
- (part_info->no_subparts*(part_info->no_parts-1));
- for (i= 0, part_id= sub_part; i < part_info->no_parts;
- i++, part_id+= part_info->no_subparts)
+ (part_info->num_subparts*(part_info->num_parts-1));
+ for (i= 0, part_id= sub_part; i < part_info->num_parts;
+ i++, part_id+= part_info->num_subparts)
; //Set bit part_id in bit array
}
}
@@ -3900,7 +4190,6 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
bool mysql_unpack_partition(THD *thd,
char *part_buf, uint part_info_len,
- const char *part_state, uint part_state_len,
TABLE* table, bool is_create_table_ind,
handlerton *default_db_type,
bool *work_part_info_used)
@@ -3912,28 +4201,15 @@ bool mysql_unpack_partition(THD *thd,
LEX lex;
DBUG_ENTER("mysql_unpack_partition");
- thd->lex= &lex;
thd->variables.character_set_client= system_charset_info;
Parser_state parser_state;
if (parser_state.init(thd, part_buf, part_info_len))
goto end;
- lex_start(thd);
- *work_part_info_used= false;
- /*
- We need to use the current SELECT_LEX since I need to keep the
- Name_resolution_context object which is referenced from the
- Item_field objects.
- This is not a nice solution since if the parser uses current_select
- for anything else it will corrupt the current LEX object.
- Also, we need to make sure there even is a select -- if the statement
- was a "USE ...", current_select will be NULL, but we may still end up
- here if we try to log to a partitioned table. This is currently
- unsupported, but should still fail rather than crash!
- */
- if (!(thd->lex->current_select= old_lex->current_select))
+ if (init_lex_with_single_table(thd, table, &lex))
goto end;
+
/*
All Items created is put into a free list on the THD object. This list
is used to free all Item objects after completing a query. We don't
@@ -3943,16 +4219,17 @@ bool mysql_unpack_partition(THD *thd,
Thus we move away the current list temporarily and start a new list that
we then save in the partition info structure.
*/
+ *work_part_info_used= FALSE;
lex.part_info= new partition_info();/* Indicates MYSQLparse from this place */
if (!lex.part_info)
{
mem_alloc_error(sizeof(partition_info));
goto end;
}
- lex.part_info->part_state= part_state;
- lex.part_info->part_state_len= part_state_len;
+ part_info= lex.part_info;
DBUG_PRINT("info", ("Parse: %s", part_buf));
- if (parse_sql(thd, & parser_state, NULL))
+ if (parse_sql(thd, & parser_state, NULL) ||
+ part_info->fix_parser_data(thd))
{
thd->free_items();
goto end;
@@ -3973,45 +4250,27 @@ bool mysql_unpack_partition(THD *thd,
*/
DBUG_PRINT("info", ("Successful parse"));
- part_info= lex.part_info;
DBUG_PRINT("info", ("default engine = %s, default_db_type = %s",
ha_resolve_storage_engine_name(part_info->default_engine_type),
ha_resolve_storage_engine_name(default_db_type)));
if (is_create_table_ind && old_lex->sql_command == SQLCOM_CREATE_TABLE)
{
- if (old_lex->create_info.options & HA_LEX_CREATE_TABLE_LIKE)
- {
- /*
- This code is executed when we create table in CREATE TABLE t1 LIKE t2.
- old_lex->query_tables contains table list element for t2 and the table
- we are opening has name t1.
- */
- if (partition_default_handling(table, part_info, FALSE,
- old_lex->query_tables->table->s->path.str))
- {
- result= TRUE;
- goto end;
- }
- }
- else
- {
- /*
- When we come here we are doing a create table. In this case we
- have already done some preparatory work on the old part_info
- object. We don't really need this new partition_info object.
- Thus we go back to the old partition info object.
- We need to free any memory objects allocated on item_free_list
- by the parser since we are keeping the old info from the first
- parser call in CREATE TABLE.
- We'll ensure that this object isn't put into table cache also
- just to ensure we don't get into strange situations with the
- item objects.
- */
- thd->free_items();
- part_info= thd->work_part_info;
- table->s->version= 0UL;
- *work_part_info_used= true;
- }
+ /*
+ When we come here we are doing a create table. In this case we
+ have already done some preparatory work on the old part_info
+ object. We don't really need this new partition_info object.
+ Thus we go back to the old partition info object.
+ We need to free any memory objects allocated on item_free_list
+ by the parser since we are keeping the old info from the first
+ parser call in CREATE TABLE.
+
+ This table object can not be used any more. However, since
+ this is CREATE TABLE, we know that it will be destroyed by the
+ caller, and rely on that.
+ */
+ thd->free_items();
+ part_info= thd->work_part_info;
+ *work_part_info_used= true;
}
table->part_info= part_info;
table->file->set_part_info(part_info);
@@ -4055,8 +4314,7 @@ bool mysql_unpack_partition(THD *thd,
result= FALSE;
end:
- lex_end(thd->lex);
- thd->lex= old_lex;
+ end_lex_with_single_table(thd, table, old_lex);
thd->variables.character_set_client= old_character_set_client;
DBUG_RETURN(result);
}
@@ -4094,60 +4352,35 @@ set_engine_all_partitions(partition_info *part_info,
partition_element *sub_elem= sub_it++;
sub_elem->engine_type= engine_type;
- } while (++j < part_info->no_subparts);
+ } while (++j < part_info->num_subparts);
}
- } while (++i < part_info->no_parts);
+ } while (++i < part_info->num_parts);
}
-/*
- SYNOPSIS
- fast_end_partition()
- thd Thread object
- out:copied Number of records copied
- out:deleted Number of records deleted
- table_list Table list with the one table in it
- empty Has nothing been done
- lpt Struct to be used by error handler
- RETURN VALUES
- FALSE Success
- TRUE Failure
- DESCRIPTION
- Support routine to handle the successful cases for partition
- management.
+/**
+ Support routine to handle the successful cases for partition management.
+
+ @param thd Thread object
+ @param copied Number of records copied
+ @param deleted Number of records deleted
+ @param table_list Table list with the one table in it
+
+ @return Operation status
+ @retval FALSE Success
+ @retval TRUE Failure
*/
static int fast_end_partition(THD *thd, ulonglong copied,
ulonglong deleted,
- TABLE *table,
- TABLE_LIST *table_list, bool is_empty,
- ALTER_PARTITION_PARAM_TYPE *lpt,
- bool written_bin_log)
+ TABLE_LIST *table_list)
{
- int error;
char tmp_name[80];
DBUG_ENTER("fast_end_partition");
thd->proc_info="end";
- if (!is_empty)
- query_cache_invalidate3(thd, table_list, 0);
-
- error= ha_autocommit_or_rollback(thd, 0);
- if (end_active_trans(thd))
- error= 1;
-
- if (error)
- {
- /* If error during commit, no need to rollback, it's done. */
- table->file->print_error(error, MYF(0));
- DBUG_RETURN(TRUE);
- }
-
- if ((!is_empty) && (!written_bin_log) &&
- (!thd->lex->no_write_to_binlog) &&
- write_bin_log(thd, FALSE, thd->query(), thd->query_length()))
- DBUG_RETURN(TRUE);
+ query_cache_invalidate3(thd, table_list, 0);
my_snprintf(tmp_name, sizeof(tmp_name), ER(ER_INSERT_INFO),
(ulong) (copied + deleted),
@@ -4242,7 +4475,7 @@ uint set_part_state(Alter_info *alter_info, partition_info *tab_part_info,
enum partition_state part_state)
{
uint part_count= 0;
- uint no_parts_found= 0;
+ uint num_parts_found= 0;
List_iterator<partition_element> part_it(tab_part_info->partitions);
do
@@ -4255,41 +4488,39 @@ uint set_part_state(Alter_info *alter_info, partition_info *tab_part_info,
/*
Mark the partition.
I.e mark the partition as a partition to be "changed" by
- analyzing/optimizing/rebuilding/checking/repairing
+ analyzing/optimizing/rebuilding/checking/repairing/...
*/
- no_parts_found++;
+ num_parts_found++;
part_elem->part_state= part_state;
DBUG_PRINT("info", ("Setting part_state to %u for partition %s",
part_state, part_elem->partition_name));
}
- } while (++part_count < tab_part_info->no_parts);
- return no_parts_found;
+ else
+ part_elem->part_state= PART_NORMAL;
+ } while (++part_count < tab_part_info->num_parts);
+ return num_parts_found;
}
-/*
+/**
Prepare for ALTER TABLE of partition structure
- SYNOPSIS
- prep_alter_part_table()
- thd Thread object
- table Table object
- inout:alter_info Alter information
- inout:create_info Create info for CREATE TABLE
- old_db_type Old engine type
- out:partition_changed Boolean indicating whether partition changed
- out:fast_alter_partition Boolean indicating whether fast partition
- change is requested
+ @param[in] thd Thread object
+ @param[in] table Table object
+ @param[in,out] alter_info Alter information
+ @param[in,out] create_info Create info for CREATE TABLE
+ @param[in] old_db_type Old engine type
+ @param[out] partition_changed Boolean indicating whether partition changed
+ @param[out] fast_alter_table Internal temporary table allowing fast
+ partition change or NULL if not possible
- RETURN VALUES
- TRUE Error
- FALSE Success
- partition_changed
- fast_alter_partition
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
- DESCRIPTION
+ @note
This method handles all preparations for ALTER TABLE for partitioned
- tables
+ tables.
We need to handle both partition management command such as Add Partition
and others here as well as an ALTER TABLE that completely changes the
partitioning and yet others that don't change anything at all. We start
@@ -4301,8 +4532,12 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
HA_CREATE_INFO *create_info,
handlerton *old_db_type,
bool *partition_changed,
- uint *fast_alter_partition)
+ char *db,
+ const char *table_name,
+ const char *path,
+ TABLE **fast_alter_table)
{
+ TABLE *new_table= NULL;
DBUG_ENTER("prep_alter_part_table");
/* Foreign keys on partitioned tables are not supported, waits for WL#148 */
@@ -4311,16 +4546,9 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
my_error(ER_FOREIGN_KEY_ON_PARTITIONED, MYF(0));
DBUG_RETURN(TRUE);
}
- /*
- We are going to manipulate the partition info on the table object
- so we need to ensure that the data structure of the table object
- is freed by setting version to 0. table->s->version= 0 forces a
- flush of the table object in close_thread_tables().
- */
- if (table->part_info)
- table->s->version= 0L;
thd->work_part_info= thd->lex->part_info;
+
if (thd->work_part_info &&
!(thd->work_part_info= thd->lex->part_info->get_clone()))
DBUG_RETURN(TRUE);
@@ -4333,25 +4561,51 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
ALTER_COALESCE_PARTITION | ALTER_REORGANIZE_PARTITION |
ALTER_TABLE_REORG | ALTER_REBUILD_PARTITION))
{
- partition_info *tab_part_info= table->part_info;
+ partition_info *tab_part_info;
partition_info *alt_part_info= thd->work_part_info;
uint flags= 0;
- if (!tab_part_info)
+ bool is_last_partition_reorged= FALSE;
+ part_elem_value *tab_max_elem_val= NULL;
+ part_elem_value *alt_max_elem_val= NULL;
+ longlong tab_max_range= 0, alt_max_range= 0;
+
+ if (!table->part_info)
{
my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
DBUG_RETURN(TRUE);
}
+
+ /*
+ Open our intermediate table, we will operate on a temporary instance
+ of the original table, to be able to skip copying all partitions.
+ Open it as a copy of the original table, and modify its partition_info
+ object to allow fast_alter_partition_table to perform the changes.
+ */
+ DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE, db, table_name,
+ MDL_INTENTION_EXCLUSIVE));
+ new_table= open_table_uncached(thd, path, db, table_name, 0);
+ if (!new_table)
+ DBUG_RETURN(TRUE);
+
+ /*
+ This table may be used for copy rows between partitions
+ and also read/write columns when fixing the partition_info struct.
+ */
+ new_table->use_all_columns();
+
+ tab_part_info= new_table->part_info;
+
if (alter_info->flags & ALTER_TABLE_REORG)
{
uint new_part_no, curr_part_no;
if (tab_part_info->part_type != HASH_PARTITION ||
- tab_part_info->use_default_no_partitions)
+ tab_part_info->use_default_num_partitions)
{
my_error(ER_REORG_NO_PARAM_ERROR, MYF(0));
- DBUG_RETURN(TRUE);
+ goto err;
}
- new_part_no= table->file->get_default_no_partitions(create_info);
- curr_part_no= tab_part_info->no_parts;
+ new_part_no= new_table->file->get_default_no_partitions(create_info);
+ curr_part_no= tab_part_info->num_parts;
if (new_part_no == curr_part_no)
{
/*
@@ -4359,7 +4613,8 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
after the change as before. Thus we can reply ok immediately
without any changes at all.
*/
- *fast_alter_partition= TRUE;
+ *fast_alter_table= new_table;
+ thd->work_part_info= tab_part_info;
DBUG_RETURN(FALSE);
}
else if (new_part_no > curr_part_no)
@@ -4369,7 +4624,7 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
setting the flag for no default number of partitions
*/
alter_info->flags|= ALTER_ADD_PARTITION;
- thd->work_part_info->no_parts= new_part_no - curr_part_no;
+ thd->work_part_info->num_parts= new_part_no - curr_part_no;
}
else
{
@@ -4378,46 +4633,85 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
without setting the flag for no default number of partitions
*/
alter_info->flags|= ALTER_COALESCE_PARTITION;
- alter_info->no_parts= curr_part_no - new_part_no;
+ alter_info->num_parts= curr_part_no - new_part_no;
}
}
- if (!(flags= table->file->alter_table_flags(alter_info->flags)))
+ if (!(flags= new_table->file->alter_table_flags(alter_info->flags)))
{
my_error(ER_PARTITION_FUNCTION_FAILURE, MYF(0));
- DBUG_RETURN(1);
+ goto err;
}
- *fast_alter_partition=
- ((flags & (HA_FAST_CHANGE_PARTITION | HA_PARTITION_ONE_PHASE)) != 0);
- DBUG_PRINT("info", ("*fast_alter_partition: %d flags: 0x%x",
- *fast_alter_partition, flags));
- if (((alter_info->flags & ALTER_ADD_PARTITION) ||
- (alter_info->flags & ALTER_REORGANIZE_PARTITION)) &&
- (thd->work_part_info->part_type != tab_part_info->part_type) &&
- (thd->work_part_info->part_type != NOT_A_PARTITION))
+ if ((flags & (HA_FAST_CHANGE_PARTITION | HA_PARTITION_ONE_PHASE)) != 0)
+ *fast_alter_table= new_table;
+ DBUG_PRINT("info", ("*fast_alter_table: %p flags: 0x%x",
+ *fast_alter_table, flags));
+ if ((alter_info->flags & ALTER_ADD_PARTITION) ||
+ (alter_info->flags & ALTER_REORGANIZE_PARTITION))
{
- if (thd->work_part_info->part_type == RANGE_PARTITION)
+ if (thd->work_part_info->part_type != tab_part_info->part_type)
{
- my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
- "RANGE", "LESS THAN");
- }
- else if (thd->work_part_info->part_type == LIST_PARTITION)
- {
- DBUG_ASSERT(thd->work_part_info->part_type == LIST_PARTITION);
- my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
- "LIST", "IN");
+ if (thd->work_part_info->part_type == NOT_A_PARTITION)
+ {
+ if (tab_part_info->part_type == RANGE_PARTITION)
+ {
+ my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), "RANGE");
+ goto err;
+ }
+ else if (tab_part_info->part_type == LIST_PARTITION)
+ {
+ my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), "LIST");
+ goto err;
+ }
+ /*
+ Hash partitions can be altered without parser finds out about
+ that it is HASH partitioned. So no error here.
+ */
+ }
+ else
+ {
+ if (thd->work_part_info->part_type == RANGE_PARTITION)
+ {
+ my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
+ "RANGE", "LESS THAN");
+ }
+ else if (thd->work_part_info->part_type == LIST_PARTITION)
+ {
+ DBUG_ASSERT(thd->work_part_info->part_type == LIST_PARTITION);
+ my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
+ "LIST", "IN");
+ }
+ else if (tab_part_info->part_type == RANGE_PARTITION)
+ {
+ my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
+ "RANGE", "LESS THAN");
+ }
+ else
+ {
+ DBUG_ASSERT(tab_part_info->part_type == LIST_PARTITION);
+ my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
+ "LIST", "IN");
+ }
+ goto err;
+ }
}
- else if (tab_part_info->part_type == RANGE_PARTITION)
+ if ((tab_part_info->column_list &&
+ alt_part_info->num_columns != tab_part_info->num_columns) ||
+ (!tab_part_info->column_list &&
+ (tab_part_info->part_type == RANGE_PARTITION ||
+ tab_part_info->part_type == LIST_PARTITION) &&
+ alt_part_info->num_columns != 1U) ||
+ (!tab_part_info->column_list &&
+ tab_part_info->part_type == HASH_PARTITION &&
+ alt_part_info->num_columns != 0))
{
- my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
- "RANGE", "LESS THAN");
+ my_error(ER_PARTITION_COLUMN_LIST_ERROR, MYF(0));
+ goto err;
}
- else
+ alt_part_info->column_list= tab_part_info->column_list;
+ if (alt_part_info->fix_parser_data(thd))
{
- DBUG_ASSERT(tab_part_info->part_type == LIST_PARTITION);
- my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
- "LIST", "IN");
+ goto err;
}
- DBUG_RETURN(TRUE);
}
if (alter_info->flags & ALTER_ADD_PARTITION)
{
@@ -4427,9 +4721,9 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
partitioning scheme as currently set-up.
Partitions are always added at the end in ADD PARTITION.
*/
- uint no_new_partitions= alt_part_info->no_parts;
- uint no_orig_partitions= tab_part_info->no_parts;
- uint check_total_partitions= no_new_partitions + no_orig_partitions;
+ uint num_new_partitions= alt_part_info->num_parts;
+ uint num_orig_partitions= tab_part_info->num_parts;
+ uint check_total_partitions= num_new_partitions + num_orig_partitions;
uint new_total_partitions= check_total_partitions;
/*
We allow quite a lot of values to be supplied by defaults, however we
@@ -4439,42 +4733,42 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
tab_part_info->part_type != HASH_PARTITION)
{
my_error(ER_NO_BINLOG_ERROR, MYF(0));
- DBUG_RETURN(TRUE);
+ goto err;
}
if (tab_part_info->defined_max_value)
{
my_error(ER_PARTITION_MAXVALUE_ERROR, MYF(0));
- DBUG_RETURN(TRUE);
+ goto err;
}
- if (no_new_partitions == 0)
+ if (num_new_partitions == 0)
{
my_error(ER_ADD_PARTITION_NO_NEW_PARTITION, MYF(0));
- DBUG_RETURN(TRUE);
+ goto err;
}
if (tab_part_info->is_sub_partitioned())
{
- if (alt_part_info->no_subparts == 0)
- alt_part_info->no_subparts= tab_part_info->no_subparts;
- else if (alt_part_info->no_subparts != tab_part_info->no_subparts)
+ if (alt_part_info->num_subparts == 0)
+ alt_part_info->num_subparts= tab_part_info->num_subparts;
+ else if (alt_part_info->num_subparts != tab_part_info->num_subparts)
{
my_error(ER_ADD_PARTITION_SUBPART_ERROR, MYF(0));
- DBUG_RETURN(TRUE);
+ goto err;
}
check_total_partitions= new_total_partitions*
- alt_part_info->no_subparts;
+ alt_part_info->num_subparts;
}
if (check_total_partitions > MAX_PARTITIONS)
{
my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
- DBUG_RETURN(TRUE);
+ goto err;
}
alt_part_info->part_type= tab_part_info->part_type;
alt_part_info->subpart_type= tab_part_info->subpart_type;
- if (alt_part_info->set_up_defaults_for_partitioning(table->file,
- ULL(0),
- tab_part_info->no_parts))
+ if (alt_part_info->set_up_defaults_for_partitioning(new_table->file,
+ ULL(0),
+ tab_part_info->num_parts))
{
- DBUG_RETURN(TRUE);
+ goto err;
}
/*
Handling of on-line cases:
@@ -4538,7 +4832,7 @@ adding and copying partitions, the second after completing the adding
and copying and finally the third line after also dropping the partitions
that are reorganised.
*/
- if (*fast_alter_partition &&
+ if (*fast_alter_table &&
tab_part_info->part_type == HASH_PARTITION)
{
uint part_no= 0, start_part= 1, start_sec_part= 1;
@@ -4547,7 +4841,7 @@ that are reorganised.
uint lower_2n= upper_2n >> 1;
bool all_parts= TRUE;
if (tab_part_info->linear_hash_ind &&
- no_new_partitions < upper_2n)
+ num_new_partitions < upper_2n)
{
/*
An analysis of which parts needs reorganisation shows that it is
@@ -4556,7 +4850,7 @@ that are reorganised.
onwards it starts again from partition 0 and goes on until
it reaches p(upper_2n - 1). If the last new partition reaches
beyond upper_2n - 1 then the first interval will end with
- p(lower_2n - 1) and start with p(no_orig_partitions - lower_2n).
+ p(lower_2n - 1) and start with p(num_orig_partitions - lower_2n).
If lower_2n partitions are added then p0 to p(lower_2n - 1) will
be reorganised which means that the two interval becomes one
interval at this point. Thus only when adding less than
@@ -4584,7 +4878,7 @@ that are reorganised.
to TRUE. In this case we don't get into this if-part at all.
*/
all_parts= FALSE;
- if (no_new_partitions >= lower_2n)
+ if (num_new_partitions >= lower_2n)
{
/*
In this case there is only one interval since the two intervals
@@ -4600,8 +4894,8 @@ that are reorganised.
Also in this case there is only one interval since we are not
going over a 2**n boundary
*/
- start_part= no_orig_partitions - lower_2n;
- end_part= start_part + (no_new_partitions - 1);
+ start_part= num_orig_partitions - lower_2n;
+ end_part= start_part + (num_new_partitions - 1);
}
else
{
@@ -4610,7 +4904,7 @@ that are reorganised.
new parts that would ensure that the intervals become
overlapping.
*/
- start_part= no_orig_partitions - lower_2n;
+ start_part= num_orig_partitions - lower_2n;
end_part= upper_2n - 1;
start_sec_part= 0;
end_sec_part= new_total_partitions - (upper_2n + 1);
@@ -4627,7 +4921,7 @@ that are reorganised.
{
p_elem->part_state= PART_CHANGED;
}
- } while (++part_no < no_orig_partitions);
+ } while (++part_no < num_orig_partitions);
}
/*
Need to concatenate the lists here to make it possible to check the
@@ -4643,15 +4937,15 @@ that are reorganised.
do
{
partition_element *part_elem= alt_it++;
- if (*fast_alter_partition)
+ if (*fast_alter_table)
part_elem->part_state= PART_TO_BE_ADDED;
if (tab_part_info->partitions.push_back(part_elem))
{
mem_alloc_error(1);
- DBUG_RETURN(TRUE);
+ goto err;
}
- } while (++part_count < no_new_partitions);
- tab_part_info->no_parts+= no_new_partitions;
+ } while (++part_count < num_new_partitions);
+ tab_part_info->num_parts+= num_new_partitions;
}
/*
If we specify partitions explicitly we don't use defaults anymore.
@@ -4666,7 +4960,7 @@ that are reorganised.
DBUG_PRINT("info", ("part_info: 0x%lx", (long) tab_part_info));
tab_part_info->use_default_partitions= FALSE;
}
- tab_part_info->use_default_no_partitions= FALSE;
+ tab_part_info->use_default_num_partitions= FALSE;
tab_part_info->is_auto_partitioned= FALSE;
}
}
@@ -4680,8 +4974,8 @@ that are reorganised.
command to drop the partition failed in the middle.
*/
uint part_count= 0;
- uint no_parts_dropped= alter_info->partition_names.elements;
- uint no_parts_found= 0;
+ uint num_parts_dropped= alter_info->partition_names.elements;
+ uint num_parts_found= 0;
List_iterator<partition_element> part_it(tab_part_info->partitions);
tab_part_info->is_auto_partitioned= FALSE;
@@ -4689,12 +4983,12 @@ that are reorganised.
tab_part_info->part_type == LIST_PARTITION))
{
my_error(ER_ONLY_ON_RANGE_LIST_PARTITION, MYF(0), "DROP");
- DBUG_RETURN(TRUE);
+ goto err;
}
- if (no_parts_dropped >= tab_part_info->no_parts)
+ if (num_parts_dropped >= tab_part_info->num_parts)
{
my_error(ER_DROP_LAST_PARTITION, MYF(0));
- DBUG_RETURN(TRUE);
+ goto err;
}
do
{
@@ -4705,58 +4999,58 @@ that are reorganised.
/*
Set state to indicate that the partition is to be dropped.
*/
- no_parts_found++;
+ num_parts_found++;
part_elem->part_state= PART_TO_BE_DROPPED;
}
- } while (++part_count < tab_part_info->no_parts);
- if (no_parts_found != no_parts_dropped)
+ } while (++part_count < tab_part_info->num_parts);
+ if (num_parts_found != num_parts_dropped)
{
my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "DROP");
- DBUG_RETURN(TRUE);
+ goto err;
}
- if (table->file->is_fk_defined_on_table_or_index(MAX_KEY))
+ if (new_table->file->is_fk_defined_on_table_or_index(MAX_KEY))
{
my_error(ER_ROW_IS_REFERENCED, MYF(0));
- DBUG_RETURN(TRUE);
+ goto err;
}
- tab_part_info->no_parts-= no_parts_dropped;
+ tab_part_info->num_parts-= num_parts_dropped;
}
else if (alter_info->flags & ALTER_REBUILD_PARTITION)
{
- uint no_parts_found;
- uint no_parts_opt= alter_info->partition_names.elements;
- no_parts_found= set_part_state(alter_info, tab_part_info, PART_CHANGED);
- if (no_parts_found != no_parts_opt &&
+ uint num_parts_found;
+ uint num_parts_opt= alter_info->partition_names.elements;
+ num_parts_found= set_part_state(alter_info, tab_part_info, PART_CHANGED);
+ if (num_parts_found != num_parts_opt &&
(!(alter_info->flags & ALTER_ALL_PARTITION)))
{
my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "REBUILD");
- DBUG_RETURN(TRUE);
+ goto err;
}
- if (!(*fast_alter_partition))
+ if (!(*fast_alter_table))
{
- table->file->print_error(HA_ERR_WRONG_COMMAND, MYF(0));
- DBUG_RETURN(TRUE);
+ new_table->file->print_error(HA_ERR_WRONG_COMMAND, MYF(0));
+ goto err;
}
}
else if (alter_info->flags & ALTER_COALESCE_PARTITION)
{
- uint no_parts_coalesced= alter_info->no_parts;
- uint no_parts_remain= tab_part_info->no_parts - no_parts_coalesced;
+ uint num_parts_coalesced= alter_info->num_parts;
+ uint num_parts_remain= tab_part_info->num_parts - num_parts_coalesced;
List_iterator<partition_element> part_it(tab_part_info->partitions);
if (tab_part_info->part_type != HASH_PARTITION)
{
my_error(ER_COALESCE_ONLY_ON_HASH_PARTITION, MYF(0));
- DBUG_RETURN(TRUE);
+ goto err;
}
- if (no_parts_coalesced == 0)
+ if (num_parts_coalesced == 0)
{
my_error(ER_COALESCE_PARTITION_NO_PARTITION, MYF(0));
- DBUG_RETURN(TRUE);
+ goto err;
}
- if (no_parts_coalesced >= tab_part_info->no_parts)
+ if (num_parts_coalesced >= tab_part_info->num_parts)
{
my_error(ER_DROP_LAST_PARTITION, MYF(0));
- DBUG_RETURN(TRUE);
+ goto err;
}
/*
Online handling:
@@ -4795,50 +5089,50 @@ state of p1.
uint part_count= 0, start_part= 1, start_sec_part= 1;
uint end_part= 0, end_sec_part= 0;
bool all_parts= TRUE;
- if (*fast_alter_partition &&
+ if (*fast_alter_table &&
tab_part_info->linear_hash_ind)
{
uint upper_2n= tab_part_info->linear_hash_mask + 1;
uint lower_2n= upper_2n >> 1;
all_parts= FALSE;
- if (no_parts_coalesced >= lower_2n)
+ if (num_parts_coalesced >= lower_2n)
{
all_parts= TRUE;
}
- else if (no_parts_remain >= lower_2n)
+ else if (num_parts_remain >= lower_2n)
{
- end_part= tab_part_info->no_parts - (lower_2n + 1);
- start_part= no_parts_remain - lower_2n;
+ end_part= tab_part_info->num_parts - (lower_2n + 1);
+ start_part= num_parts_remain - lower_2n;
}
else
{
start_part= 0;
- end_part= tab_part_info->no_parts - (lower_2n + 1);
+ end_part= tab_part_info->num_parts - (lower_2n + 1);
end_sec_part= (lower_2n >> 1) - 1;
- start_sec_part= end_sec_part - (lower_2n - (no_parts_remain + 1));
+ start_sec_part= end_sec_part - (lower_2n - (num_parts_remain + 1));
}
}
do
{
partition_element *p_elem= part_it++;
- if (*fast_alter_partition &&
+ if (*fast_alter_table &&
(all_parts ||
(part_count >= start_part && part_count <= end_part) ||
(part_count >= start_sec_part && part_count <= end_sec_part)))
p_elem->part_state= PART_CHANGED;
- if (++part_count > no_parts_remain)
+ if (++part_count > num_parts_remain)
{
- if (*fast_alter_partition)
+ if (*fast_alter_table)
p_elem->part_state= PART_REORGED_DROPPED;
else
part_it.remove();
}
- } while (part_count < tab_part_info->no_parts);
- tab_part_info->no_parts= no_parts_remain;
+ } while (part_count < tab_part_info->num_parts);
+ tab_part_info->num_parts= num_parts_remain;
}
if (!(alter_info->flags & ALTER_TABLE_REORG))
{
- tab_part_info->use_default_no_partitions= FALSE;
+ tab_part_info->use_default_num_partitions= FALSE;
tab_part_info->is_auto_partitioned= FALSE;
}
}
@@ -4855,47 +5149,46 @@ state of p1.
range as those changed from.
This command can be used on RANGE and LIST partitions.
*/
- uint no_parts_reorged= alter_info->partition_names.elements;
- uint no_parts_new= thd->work_part_info->partitions.elements;
- partition_info *alt_part_info= thd->work_part_info;
+ uint num_parts_reorged= alter_info->partition_names.elements;
+ uint num_parts_new= thd->work_part_info->partitions.elements;
uint check_total_partitions;
tab_part_info->is_auto_partitioned= FALSE;
- if (no_parts_reorged > tab_part_info->no_parts)
+ if (num_parts_reorged > tab_part_info->num_parts)
{
my_error(ER_REORG_PARTITION_NOT_EXIST, MYF(0));
- DBUG_RETURN(TRUE);
+ goto err;
}
if (!(tab_part_info->part_type == RANGE_PARTITION ||
tab_part_info->part_type == LIST_PARTITION) &&
- (no_parts_new != no_parts_reorged))
+ (num_parts_new != num_parts_reorged))
{
my_error(ER_REORG_HASH_ONLY_ON_SAME_NO, MYF(0));
- DBUG_RETURN(TRUE);
+ goto err;
}
if (tab_part_info->is_sub_partitioned() &&
- alt_part_info->no_subparts &&
- alt_part_info->no_subparts != tab_part_info->no_subparts)
+ alt_part_info->num_subparts &&
+ alt_part_info->num_subparts != tab_part_info->num_subparts)
{
my_error(ER_PARTITION_WRONG_NO_SUBPART_ERROR, MYF(0));
- DBUG_RETURN(TRUE);
+ goto err;
}
- check_total_partitions= tab_part_info->no_parts + no_parts_new;
- check_total_partitions-= no_parts_reorged;
+ check_total_partitions= tab_part_info->num_parts + num_parts_new;
+ check_total_partitions-= num_parts_reorged;
if (check_total_partitions > MAX_PARTITIONS)
{
my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
- DBUG_RETURN(TRUE);
+ goto err;
}
alt_part_info->part_type= tab_part_info->part_type;
alt_part_info->subpart_type= tab_part_info->subpart_type;
- alt_part_info->no_subparts= tab_part_info->no_subparts;
+ alt_part_info->num_subparts= tab_part_info->num_subparts;
DBUG_ASSERT(!alt_part_info->use_default_partitions);
- if (alt_part_info->set_up_defaults_for_partitioning(table->file,
+ if (alt_part_info->set_up_defaults_for_partitioning(new_table->file,
ULL(0),
0))
{
- DBUG_RETURN(TRUE);
+ goto err;
}
/*
Online handling:
@@ -4937,9 +5230,7 @@ the generated partition syntax in a correct manner.
uint part_count= 0;
bool found_first= FALSE;
bool found_last= FALSE;
- bool is_last_partition_reorged;
uint drop_count= 0;
- longlong tab_max_range= 0, alt_max_range= 0;
do
{
partition_element *part_elem= tab_it++;
@@ -4949,37 +5240,51 @@ the generated partition syntax in a correct manner.
{
is_last_partition_reorged= TRUE;
drop_count++;
- tab_max_range= part_elem->range_value;
- if (*fast_alter_partition &&
+ if (tab_part_info->column_list)
+ {
+ List_iterator<part_elem_value> p(part_elem->list_val_list);
+ tab_max_elem_val= p++;
+ }
+ else
+ tab_max_range= part_elem->range_value;
+ if (*fast_alter_table &&
tab_part_info->temp_partitions.push_back(part_elem))
{
mem_alloc_error(1);
- DBUG_RETURN(TRUE);
+ goto err;
}
- if (*fast_alter_partition)
+ if (*fast_alter_table)
part_elem->part_state= PART_TO_BE_REORGED;
if (!found_first)
{
uint alt_part_count= 0;
- found_first= TRUE;
+ partition_element *alt_part_elem;
List_iterator<partition_element>
alt_it(alt_part_info->partitions);
+ found_first= TRUE;
do
{
- partition_element *alt_part_elem= alt_it++;
- alt_max_range= alt_part_elem->range_value;
- if (*fast_alter_partition)
+ alt_part_elem= alt_it++;
+ if (tab_part_info->column_list)
+ {
+ List_iterator<part_elem_value> p(alt_part_elem->list_val_list);
+ alt_max_elem_val= p++;
+ }
+ else
+ alt_max_range= alt_part_elem->range_value;
+
+ if (*fast_alter_table)
alt_part_elem->part_state= PART_TO_BE_ADDED;
if (alt_part_count == 0)
tab_it.replace(alt_part_elem);
else
tab_it.after(alt_part_elem);
- } while (++alt_part_count < no_parts_new);
+ } while (++alt_part_count < num_parts_new);
}
else if (found_last)
{
my_error(ER_CONSECUTIVE_REORG_PARTITIONS, MYF(0));
- DBUG_RETURN(TRUE);
+ goto err;
}
else
tab_it.remove();
@@ -4989,32 +5294,13 @@ the generated partition syntax in a correct manner.
if (found_first)
found_last= TRUE;
}
- } while (++part_count < tab_part_info->no_parts);
- if (drop_count != no_parts_reorged)
+ } while (++part_count < tab_part_info->num_parts);
+ if (drop_count != num_parts_reorged)
{
my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "REORGANIZE");
- DBUG_RETURN(TRUE);
- }
- if (tab_part_info->part_type == RANGE_PARTITION &&
- ((is_last_partition_reorged &&
- alt_max_range < tab_max_range) ||
- (!is_last_partition_reorged &&
- alt_max_range != tab_max_range)))
- {
- /*
- For range partitioning the total resulting range before and
- after the change must be the same except in one case. This is
- when the last partition is reorganised, in this case it is
- acceptable to increase the total range.
- The reason is that it is not allowed to have "holes" in the
- middle of the ranges and thus we should not allow to reorganise
- to create "holes". Also we should not allow using REORGANIZE
- to drop data.
- */
- my_error(ER_REORG_OUTSIDE_RANGE, MYF(0));
- DBUG_RETURN(TRUE);
+ goto err;
}
- tab_part_info->no_parts= check_total_partitions;
+ tab_part_info->num_parts= check_total_partitions;
}
}
else
@@ -5030,12 +5316,44 @@ the generated partition syntax in a correct manner.
!alt_part_info->use_default_subpartitions)
{
tab_part_info->use_default_subpartitions= FALSE;
- tab_part_info->use_default_no_subpartitions= FALSE;
+ tab_part_info->use_default_num_subpartitions= FALSE;
}
if (tab_part_info->check_partition_info(thd, (handlerton**)NULL,
- table->file, ULL(0), FALSE))
+ new_table->file, ULL(0), TRUE))
{
- DBUG_RETURN(TRUE);
+ goto err;
+ }
+ /*
+ The check below needs to be performed after check_partition_info
+ since this function "fixes" the item trees of the new partitions
+ to reorganize into
+ */
+ if (alter_info->flags == ALTER_REORGANIZE_PARTITION &&
+ tab_part_info->part_type == RANGE_PARTITION &&
+ ((is_last_partition_reorged &&
+ (tab_part_info->column_list ?
+ (tab_part_info->compare_column_values(
+ alt_max_elem_val->col_val_array,
+ tab_max_elem_val->col_val_array) < 0) :
+ alt_max_range < tab_max_range)) ||
+ (!is_last_partition_reorged &&
+ (tab_part_info->column_list ?
+ (tab_part_info->compare_column_values(
+ alt_max_elem_val->col_val_array,
+ tab_max_elem_val->col_val_array) != 0) :
+ alt_max_range != tab_max_range))))
+ {
+ /*
+ For range partitioning the total resulting range before and
+ after the change must be the same except in one case. This is
+ when the last partition is reorganised, in this case it is
+ acceptable to increase the total range.
+ The reason is that it is not allowed to have "holes" in the
+ middle of the ranges and thus we should not allow to reorganise
+ to create "holes".
+ */
+ my_error(ER_REORG_OUTSIDE_RANGE, MYF(0));
+ goto err;
}
}
}
@@ -5150,6 +5468,10 @@ the generated partition syntax in a correct manner.
{
DBUG_PRINT("info", ("partition changed"));
*partition_changed= TRUE;
+ if (thd->work_part_info->fix_parser_data(thd))
+ {
+ goto err;
+ }
}
/*
Set up partition default_engine_type either from the create_info
@@ -5169,7 +5491,7 @@ the generated partition syntax in a correct manner.
if (check_native_partitioned(create_info, &is_native_partitioned,
part_info, thd))
{
- DBUG_RETURN(TRUE);
+ goto err;
}
if (!is_native_partitioned)
{
@@ -5179,6 +5501,17 @@ the generated partition syntax in a correct manner.
}
}
DBUG_RETURN(FALSE);
+err:
+ if (new_table)
+ {
+ /*
+ Only remove the intermediate table object and its share object,
+ do not remove the .frm file, since it is the original one.
+ */
+ close_temporary(new_table, 1, 0);
+ }
+ *fast_alter_table= NULL;
+ DBUG_RETURN(TRUE);
}
@@ -5213,20 +5546,33 @@ static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
char path[FN_REFLEN+1];
int error;
handler *file= lpt->table->file;
+ THD *thd= lpt->thd;
DBUG_ENTER("mysql_change_partitions");
build_table_filename(path, sizeof(path) - 1, lpt->db, lpt->table_name, "", 0);
+
+ if(mysql_trans_prepare_alter_copy_data(thd))
+ DBUG_RETURN(TRUE);
+
+ if (file->ha_external_lock(thd, F_WRLCK))
+ DBUG_RETURN(TRUE);
+
+ /* TODO: test if bulk_insert would increase the performance */
+
if ((error= file->ha_change_partitions(lpt->create_info, path, &lpt->copied,
&lpt->deleted, lpt->pack_frm_data,
lpt->pack_frm_len)))
{
- if (error != ER_OUTOFMEMORY)
- file->print_error(error, MYF(0));
- else
- lpt->thd->fatal_error();
- DBUG_RETURN(TRUE);
+ file->print_error(error, MYF(error != ER_OUTOFMEMORY ? 0 : ME_FATALERROR));
}
- DBUG_RETURN(FALSE);
+
+ if (mysql_trans_commit_alter_copy_data(thd))
+ error= 1; /* The error has been reported */
+
+ if (file->ha_external_lock(thd, F_UNLCK))
+ error= 1;
+
+ DBUG_RETURN(test(error));
}
@@ -5310,8 +5656,8 @@ static bool mysql_drop_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
part_it.remove();
remove_count++;
}
- } while (++i < part_info->no_parts);
- part_info->no_parts-= remove_count;
+ } while (++i < part_info->num_parts);
+ part_info->num_parts-= remove_count;
DBUG_RETURN(FALSE);
}
@@ -5433,7 +5779,7 @@ static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
char normal_path[FN_REFLEN];
List_iterator<partition_element> part_it(part_info->partitions);
uint temp_partitions= part_info->temp_partitions.elements;
- uint no_elements= part_info->partitions.elements;
+ uint num_elements= part_info->partitions.elements;
uint i= 0;
DBUG_ENTER("write_log_changed_partitions");
@@ -5446,7 +5792,7 @@ static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
if (part_info->is_sub_partitioned())
{
List_iterator<partition_element> sub_it(part_elem->subpartitions);
- uint no_subparts= part_info->no_subparts;
+ uint num_subparts= part_info->num_subparts;
uint j= 0;
do
{
@@ -5475,7 +5821,7 @@ static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
*next_entry= log_entry->entry_pos;
sub_elem->log_entry= log_entry;
insert_part_info_log_entry_list(part_info, log_entry);
- } while (++j < no_subparts);
+ } while (++j < num_subparts);
}
else
{
@@ -5503,7 +5849,7 @@ static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
insert_part_info_log_entry_list(part_info, log_entry);
}
}
- } while (++i < no_elements);
+ } while (++i < num_elements);
DBUG_RETURN(FALSE);
}
@@ -5529,14 +5875,14 @@ static bool write_log_dropped_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
char tmp_path[FN_LEN];
List_iterator<partition_element> part_it(part_info->partitions);
List_iterator<partition_element> temp_it(part_info->temp_partitions);
- uint no_temp_partitions= part_info->temp_partitions.elements;
- uint no_elements= part_info->partitions.elements;
+ uint num_temp_partitions= part_info->temp_partitions.elements;
+ uint num_elements= part_info->partitions.elements;
DBUG_ENTER("write_log_dropped_partitions");
ddl_log_entry.action_type= DDL_LOG_DELETE_ACTION;
if (temp_list)
- no_elements= no_temp_partitions;
- while (no_elements--)
+ num_elements= num_temp_partitions;
+ while (num_elements--)
{
partition_element *part_elem;
if (temp_list)
@@ -5550,14 +5896,14 @@ static bool write_log_dropped_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
uint name_variant;
if (part_elem->part_state == PART_CHANGED ||
(part_elem->part_state == PART_TO_BE_ADDED &&
- no_temp_partitions))
+ num_temp_partitions))
name_variant= TEMP_PART_NAME;
else
name_variant= NORMAL_PART_NAME;
if (part_info->is_sub_partitioned())
{
List_iterator<partition_element> sub_it(part_elem->subpartitions);
- uint no_subparts= part_info->no_subparts;
+ uint num_subparts= part_info->num_subparts;
uint j= 0;
do
{
@@ -5577,7 +5923,7 @@ static bool write_log_dropped_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
*next_entry= log_entry->entry_pos;
sub_elem->log_entry= log_entry;
insert_part_info_log_entry_list(part_info, log_entry);
- } while (++j < no_subparts);
+ } while (++j < num_subparts);
}
else
{
@@ -5645,7 +5991,7 @@ static bool write_log_drop_shadow_frm(ALTER_PARTITION_PARAM_TYPE *lpt)
DBUG_ENTER("write_log_drop_shadow_frm");
build_table_shadow_filename(shadow_path, sizeof(shadow_path) - 1, lpt);
- pthread_mutex_lock(&LOCK_gdl);
+ mysql_mutex_lock(&LOCK_gdl);
if (write_log_replace_delete_frm(lpt, 0UL, NULL,
(const char*)shadow_path, FALSE))
goto error;
@@ -5653,13 +5999,13 @@ static bool write_log_drop_shadow_frm(ALTER_PARTITION_PARAM_TYPE *lpt)
if (write_execute_ddl_log_entry(log_entry->entry_pos,
FALSE, &exec_log_entry))
goto error;
- pthread_mutex_unlock(&LOCK_gdl);
+ mysql_mutex_unlock(&LOCK_gdl);
set_part_info_exec_log_entry(part_info, exec_log_entry);
DBUG_RETURN(FALSE);
error:
release_part_info_log_entries(part_info->first_log_entry);
- pthread_mutex_unlock(&LOCK_gdl);
+ mysql_mutex_unlock(&LOCK_gdl);
part_info->first_log_entry= NULL;
my_error(ER_DDL_LOG_ERROR, MYF(0));
DBUG_RETURN(TRUE);
@@ -5693,7 +6039,7 @@ static bool write_log_rename_frm(ALTER_PARTITION_PARAM_TYPE *lpt)
build_table_filename(path, sizeof(path) - 1, lpt->db,
lpt->table_name, "", 0);
build_table_shadow_filename(shadow_path, sizeof(shadow_path) - 1, lpt);
- pthread_mutex_lock(&LOCK_gdl);
+ mysql_mutex_lock(&LOCK_gdl);
if (write_log_replace_delete_frm(lpt, 0UL, shadow_path, path, TRUE))
goto error;
log_entry= part_info->first_log_entry;
@@ -5702,12 +6048,12 @@ static bool write_log_rename_frm(ALTER_PARTITION_PARAM_TYPE *lpt)
FALSE, &exec_log_entry))
goto error;
release_part_info_log_entries(old_first_log_entry);
- pthread_mutex_unlock(&LOCK_gdl);
+ mysql_mutex_unlock(&LOCK_gdl);
DBUG_RETURN(FALSE);
error:
release_part_info_log_entries(part_info->first_log_entry);
- pthread_mutex_unlock(&LOCK_gdl);
+ mysql_mutex_unlock(&LOCK_gdl);
part_info->first_log_entry= old_first_log_entry;
part_info->frm_log_entry= NULL;
my_error(ER_DDL_LOG_ERROR, MYF(0));
@@ -5745,7 +6091,7 @@ static bool write_log_drop_partition(ALTER_PARTITION_PARAM_TYPE *lpt)
build_table_filename(path, sizeof(path) - 1, lpt->db,
lpt->table_name, "", 0);
build_table_shadow_filename(tmp_path, sizeof(tmp_path) - 1, lpt);
- pthread_mutex_lock(&LOCK_gdl);
+ mysql_mutex_lock(&LOCK_gdl);
if (write_log_dropped_partitions(lpt, &next_entry, (const char*)path,
FALSE))
goto error;
@@ -5758,12 +6104,12 @@ static bool write_log_drop_partition(ALTER_PARTITION_PARAM_TYPE *lpt)
FALSE, &exec_log_entry))
goto error;
release_part_info_log_entries(old_first_log_entry);
- pthread_mutex_unlock(&LOCK_gdl);
+ mysql_mutex_unlock(&LOCK_gdl);
DBUG_RETURN(FALSE);
error:
release_part_info_log_entries(part_info->first_log_entry);
- pthread_mutex_unlock(&LOCK_gdl);
+ mysql_mutex_unlock(&LOCK_gdl);
part_info->first_log_entry= old_first_log_entry;
part_info->frm_log_entry= NULL;
my_error(ER_DDL_LOG_ERROR, MYF(0));
@@ -5792,34 +6138,41 @@ static bool write_log_add_change_partition(ALTER_PARTITION_PARAM_TYPE *lpt)
{
partition_info *part_info= lpt->part_info;
DDL_LOG_MEMORY_ENTRY *log_entry;
- DDL_LOG_MEMORY_ENTRY *exec_log_entry= NULL;
+ DDL_LOG_MEMORY_ENTRY *exec_log_entry= part_info->exec_log_entry;
char tmp_path[FN_REFLEN + 1];
char path[FN_REFLEN + 1];
uint next_entry= 0;
+ DDL_LOG_MEMORY_ENTRY *old_first_log_entry= part_info->first_log_entry;
+ /* write_log_drop_shadow_frm(lpt) must have been run first */
+ DBUG_ASSERT(old_first_log_entry);
DBUG_ENTER("write_log_add_change_partition");
build_table_filename(path, sizeof(path) - 1, lpt->db,
lpt->table_name, "", 0);
build_table_shadow_filename(tmp_path, sizeof(tmp_path) - 1, lpt);
- pthread_mutex_lock(&LOCK_gdl);
+ mysql_mutex_lock(&LOCK_gdl);
+
+ /* Relink the previous drop shadow frm entry */
+ if (old_first_log_entry)
+ next_entry= old_first_log_entry->entry_pos;
if (write_log_dropped_partitions(lpt, &next_entry, (const char*)path,
FALSE))
goto error;
- if (write_log_replace_delete_frm(lpt, next_entry, NULL, tmp_path,
- FALSE))
- goto error;
log_entry= part_info->first_log_entry;
+
if (write_execute_ddl_log_entry(log_entry->entry_pos,
- FALSE, &exec_log_entry))
+ FALSE,
+ /* Reuse the old execute ddl_log_entry */
+ &exec_log_entry))
goto error;
- pthread_mutex_unlock(&LOCK_gdl);
+ mysql_mutex_unlock(&LOCK_gdl);
set_part_info_exec_log_entry(part_info, exec_log_entry);
DBUG_RETURN(FALSE);
error:
release_part_info_log_entries(part_info->first_log_entry);
- pthread_mutex_unlock(&LOCK_gdl);
- part_info->first_log_entry= NULL;
+ mysql_mutex_unlock(&LOCK_gdl);
+ part_info->first_log_entry= old_first_log_entry;
my_error(ER_DDL_LOG_ERROR, MYF(0));
DBUG_RETURN(TRUE);
}
@@ -5836,9 +6189,15 @@ error:
TRUE Error
FALSE Success
DESCRIPTION
- We will write log entries that specify to remove all partitions reorganised,
- to rename others to reflect the new naming scheme and to install the shadow
- frm file.
+ We will write log entries that specify to
+ 1) Install the shadow frm file.
+ 2) Remove all partitions reorganized. (To be able to reorganize a partition
+ to the same name. Like in REORGANIZE p0 INTO (p0, p1),
+ so that the later rename from the new p0-temporary name to p0 don't
+ fail because the partition already exists.
+ 3) Rename others to reflect the new naming scheme.
+
+ Note that it is written in the ddl log in reverse.
*/
static bool write_log_final_change_partition(ALTER_PARTITION_PARAM_TYPE *lpt)
@@ -5852,30 +6211,35 @@ static bool write_log_final_change_partition(ALTER_PARTITION_PARAM_TYPE *lpt)
uint next_entry= 0;
DBUG_ENTER("write_log_final_change_partition");
+ /*
+ Do not link any previous log entry.
+ Replace the revert operations with forced retry operations.
+ */
part_info->first_log_entry= NULL;
build_table_filename(path, sizeof(path) - 1, lpt->db,
lpt->table_name, "", 0);
build_table_shadow_filename(shadow_path, sizeof(shadow_path) - 1, lpt);
- pthread_mutex_lock(&LOCK_gdl);
+ mysql_mutex_lock(&LOCK_gdl);
+ if (write_log_changed_partitions(lpt, &next_entry, (const char*)path))
+ goto error;
if (write_log_dropped_partitions(lpt, &next_entry, (const char*)path,
lpt->alter_info->flags & ALTER_REORGANIZE_PARTITION))
goto error;
- if (write_log_changed_partitions(lpt, &next_entry, (const char*)path))
- goto error;
- if (write_log_replace_delete_frm(lpt, 0UL, shadow_path, path, TRUE))
+ if (write_log_replace_delete_frm(lpt, next_entry, shadow_path, path, TRUE))
goto error;
log_entry= part_info->first_log_entry;
part_info->frm_log_entry= log_entry;
+ /* Overwrite the revert execute log entry with this retry execute entry */
if (write_execute_ddl_log_entry(log_entry->entry_pos,
FALSE, &exec_log_entry))
goto error;
release_part_info_log_entries(old_first_log_entry);
- pthread_mutex_unlock(&LOCK_gdl);
+ mysql_mutex_unlock(&LOCK_gdl);
DBUG_RETURN(FALSE);
error:
release_part_info_log_entries(part_info->first_log_entry);
- pthread_mutex_unlock(&LOCK_gdl);
+ mysql_mutex_unlock(&LOCK_gdl);
part_info->first_log_entry= old_first_log_entry;
part_info->frm_log_entry= NULL;
my_error(ER_DDL_LOG_ERROR, MYF(0));
@@ -5902,7 +6266,7 @@ static void write_log_completed(ALTER_PARTITION_PARAM_TYPE *lpt,
DBUG_ENTER("write_log_completed");
DBUG_ASSERT(log_entry);
- pthread_mutex_lock(&LOCK_gdl);
+ mysql_mutex_lock(&LOCK_gdl);
if (write_execute_ddl_log_entry(0UL, TRUE, &log_entry))
{
/*
@@ -5916,7 +6280,7 @@ static void write_log_completed(ALTER_PARTITION_PARAM_TYPE *lpt,
}
release_part_info_log_entries(part_info->first_log_entry);
release_part_info_log_entries(part_info->exec_log_entry);
- pthread_mutex_unlock(&LOCK_gdl);
+ mysql_mutex_unlock(&LOCK_gdl);
part_info->exec_log_entry= NULL;
part_info->first_log_entry= NULL;
DBUG_VOID_RETURN;
@@ -5934,10 +6298,10 @@ static void write_log_completed(ALTER_PARTITION_PARAM_TYPE *lpt,
static void release_log_entries(partition_info *part_info)
{
- pthread_mutex_lock(&LOCK_gdl);
+ mysql_mutex_lock(&LOCK_gdl);
release_part_info_log_entries(part_info->first_log_entry);
release_part_info_log_entries(part_info->exec_log_entry);
- pthread_mutex_unlock(&LOCK_gdl);
+ mysql_mutex_unlock(&LOCK_gdl);
part_info->first_log_entry= NULL;
part_info->exec_log_entry= NULL;
}
@@ -5954,59 +6318,86 @@ static void release_log_entries(partition_info *part_info)
*/
static void alter_partition_lock_handling(ALTER_PARTITION_PARAM_TYPE *lpt)
{
- int err;
- if (lpt->thd->locked_tables)
+ THD *thd= lpt->thd;
+
+ if (lpt->old_table)
+ close_all_tables_for_name(thd, lpt->old_table->s, FALSE);
+ if (lpt->table)
{
/*
- Close the table if open, to remove/destroy the already altered
- table->part_info object, so that it is not reused.
- */
- if (lpt->table->db_stat)
- abort_and_upgrade_lock_and_close_table(lpt);
- /*
- When we have the table locked, it is necessary to reopen the table
- since all table objects were closed and removed as part of the
- ALTER TABLE of partitioning structure.
+ Only remove the intermediate table object and its share object,
+ do not remove the .frm file, since it is the original one.
*/
- pthread_mutex_lock(&LOCK_open);
- lpt->thd->in_lock_tables= 1;
- err= reopen_tables(lpt->thd, 1, 1);
- lpt->thd->in_lock_tables= 0;
- if (err)
- {
- /*
- Issue a warning since we weren't able to regain the lock again.
- We also need to unlink table from thread's open list and from
- table_cache
- */
- unlink_open_table(lpt->thd, lpt->table, FALSE);
- sql_print_warning("We failed to reacquire LOCKs in ALTER TABLE");
- }
- pthread_mutex_unlock(&LOCK_open);
+ close_temporary(lpt->table, 1, 0);
}
+ lpt->table= 0;
+ lpt->old_table= 0;
+ lpt->table_list->table= 0;
+ if (thd->locked_tables_list.reopen_tables(thd))
+ sql_print_warning("We failed to reacquire LOCKs in ALTER TABLE");
}
/*
- Handle errors for ALTER TABLE for partitioning
+ Unlock and close table before renaming and dropping partitions
SYNOPSIS
- handle_alter_part_error()
+ alter_close_tables()
lpt Struct carrying parameters
- not_completed Was request in complete phase when error occurred
+ close_old Close original table too
RETURN VALUES
- NONE
+ 0
+*/
+
+static int alter_close_tables(ALTER_PARTITION_PARAM_TYPE *lpt, bool close_old)
+{
+ DBUG_ENTER("alter_close_tables");
+ if (lpt->table->db_stat)
+ {
+ lpt->table->file->close();
+ lpt->table->db_stat= 0; // Mark file closed
+ }
+ if (close_old && lpt->old_table)
+ {
+ close_all_tables_for_name(lpt->thd, lpt->old_table->s, FALSE);
+ lpt->old_table= 0;
+ }
+ DBUG_RETURN(0);
+}
+
+
+/**
+ Handle errors for ALTER TABLE for partitioning.
+
+ @param lpt Struct carrying parameters
+ @param action_completed The action must be completed, NOT reverted
+ @param drop_partition Partitions has not been dropped yet
+ @param frm_install The shadow frm-file has not yet been installed
+ @param close_table Table is still open, close it before reverting
*/
void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt,
- bool not_completed,
+ bool action_completed,
bool drop_partition,
- bool frm_install)
+ bool frm_install,
+ bool close_table)
{
partition_info *part_info= lpt->part_info;
DBUG_ENTER("handle_alter_part_error");
+ if (close_table)
+ {
+ /*
+ Since the error handling (ddl_log) needs to drop newly created
+ partitions they must be closed first to not issue errors.
+ But we still need some information from the part_info object,
+ so we clone it first to have a copy.
+ */
+ part_info= lpt->part_info->get_clone();
+ alter_close_tables(lpt, action_completed);
+ }
+
if (part_info->first_log_entry &&
- execute_ddl_log_entry(current_thd,
+ execute_ddl_log_entry(lpt->thd,
part_info->first_log_entry->entry_pos))
{
/*
@@ -6015,7 +6406,7 @@ void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt,
*/
write_log_completed(lpt, FALSE);
release_log_entries(part_info);
- if (not_completed)
+ if (!action_completed)
{
if (drop_partition)
{
@@ -6080,7 +6471,7 @@ void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt,
else
{
release_log_entries(part_info);
- if (not_completed)
+ if (!action_completed)
{
/*
We hit an error before things were completed but managed
@@ -6107,25 +6498,40 @@ void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt,
}
-/*
+/**
+ Downgrade an exclusive MDL lock if under LOCK TABLE.
+
+ If we don't downgrade the lock, it will not be downgraded or released
+ until the table is unlocked, resulting in blocking other threads using
+ the table.
+*/
+
+static void downgrade_mdl_if_lock_tables_mode(THD *thd, MDL_ticket *ticket,
+ enum_mdl_type type)
+{
+ if (thd->locked_tables_mode)
+ ticket->downgrade_exclusive_lock(type);
+}
+
+
+/**
Actually perform the change requested by ALTER TABLE of partitions
previously prepared.
- SYNOPSIS
- fast_alter_partition_table()
- thd Thread object
- table Table object
- alter_info ALTER TABLE info
- create_info Create info for CREATE TABLE
- table_list List of the table involved
- db Database name of new table
- table_name Table name of new table
+ @param thd Thread object
+ @param table Original table object
+ @param alter_info ALTER TABLE info
+ @param create_info Create info for CREATE TABLE
+ @param table_list List of the table involved
+ @param db Database name of new table
+ @param table_name Table name of new table
+ @param fast_alter_table Prepared table object
- RETURN VALUES
- TRUE Error
- FALSE Success
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
- DESCRIPTION
+ @note
Perform all ALTER TABLE operations for partitioned tables that can be
performed fast without a full copy of the original table.
*/
@@ -6136,25 +6542,30 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
TABLE_LIST *table_list,
char *db,
const char *table_name,
- uint fast_alter_partition)
+ TABLE *fast_alter_table)
{
/* Set-up struct used to write frm files */
- partition_info *part_info= table->part_info;
+ partition_info *part_info;
ALTER_PARTITION_PARAM_TYPE lpt_obj;
ALTER_PARTITION_PARAM_TYPE *lpt= &lpt_obj;
- bool written_bin_log= TRUE;
- bool not_completed= TRUE;
+ bool action_completed= FALSE;
+ bool close_table_on_failure= FALSE;
bool frm_install= FALSE;
+ MDL_ticket *mdl_ticket= table->mdl_ticket;
+ DBUG_ASSERT(fast_alter_table);
DBUG_ENTER("fast_alter_partition_table");
+ part_info= fast_alter_table->part_info;
lpt->thd= thd;
+ lpt->table_list= table_list;
lpt->part_info= part_info;
lpt->alter_info= alter_info;
lpt->create_info= create_info;
lpt->db_options= create_info->table_options;
if (create_info->row_type == ROW_TYPE_DYNAMIC)
lpt->db_options|= HA_OPTION_PACK_RECORD;
- lpt->table= table;
+ lpt->table= fast_alter_table;
+ lpt->old_table= table;
lpt->key_info_buffer= 0;
lpt->key_count= 0;
lpt->db= db;
@@ -6163,12 +6574,12 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
lpt->deleted= 0;
lpt->pack_frm_data= NULL;
lpt->pack_frm_len= 0;
- thd->work_part_info= part_info;
/* Never update timestamp columns when alter */
- table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
+ lpt->table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
- if (fast_alter_partition & HA_PARTITION_ONE_PHASE)
+ if (table->file->alter_table_flags(alter_info->flags) &
+ HA_PARTITION_ONE_PHASE)
{
/*
In the case where the engine supports one phase online partition
@@ -6242,20 +6653,18 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
0) Write an entry that removes the shadow frm file if crash occurs
1) Write the new frm file as a shadow frm
- 2) Write the ddl log to ensure that the operation is completed
- even in the presence of a MySQL Server crash
- 3) Lock the table in TL_WRITE_ONLY to ensure all other accesses to
- the table have completed. This ensures that other threads can not
- execute on the table in parallel.
- 4) Get a name lock on the table. This ensures that we can release all
- locks on the table and since no one can open the table, there can
- be no new threads accessing the table. They will be hanging on the
- name lock.
- 5) Close all tables that have already been opened but didn't stumble on
+ 2) Get an exclusive metadata lock on the table (waits for all active
+ transactions using this table). This ensures that we
+ can release all other locks on the table and since no one can open
+ the table, there can be no new threads accessing the table. They
+ will be hanging on this exclusive lock.
+ 3) Write the ddl log to ensure that the operation is completed
+ even in the presence of a MySQL Server crash (the log is executed
+ before any other threads are started, so there are no locking issues).
+ 4) Close all tables that have already been opened but didn't stumble on
the abort locked previously. This is done as part of the
- close_data_files_and_morph_locks call.
- 6) We are now ready to release all locks we got in this thread.
- 7) Write the bin log
+ alter_close_tables call.
+ 5) Write the bin log
Unfortunately the writing of the binlog is not synchronised with
other logging activities. So no matter in which order the binlog
is written compared to other activities there will always be cases
@@ -6266,40 +6675,54 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
require writing the statement first in the ddl log and then
when recovering from the crash read the binlog and insert it into
the binlog if not written already.
- 8) Install the previously written shadow frm file
- 9) Prepare handlers for drop of partitions
- 10) Drop the partitions
- 11) Remove entries from ddl log
- 12) Reopen table if under lock tables
- 13) Complete query
+ 6) Install the previously written shadow frm file
+ 7) Prepare handlers for drop of partitions
+ 8) Drop the partitions
+ 9) Remove entries from ddl log
+ 10) Reopen table if under lock tables
+ 11) Complete query
We insert Error injections at all places where it could be interesting
to test if recovery is properly done.
*/
if (write_log_drop_shadow_frm(lpt) ||
ERROR_INJECT_CRASH("crash_drop_partition_1") ||
+ ERROR_INJECT_ERROR("fail_drop_partition_1") ||
mysql_write_frm(lpt, WFRM_WRITE_SHADOW) ||
ERROR_INJECT_CRASH("crash_drop_partition_2") ||
- write_log_drop_partition(lpt) ||
+ ERROR_INJECT_ERROR("fail_drop_partition_2") ||
+ wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN) ||
ERROR_INJECT_CRASH("crash_drop_partition_3") ||
- (not_completed= FALSE) ||
- abort_and_upgrade_lock_and_close_table(lpt) ||
+ ERROR_INJECT_ERROR("fail_drop_partition_3") ||
+ (close_table_on_failure= TRUE, FALSE) ||
+ write_log_drop_partition(lpt) ||
+ (action_completed= TRUE, FALSE) ||
+ ERROR_INJECT_CRASH("crash_drop_partition_4") ||
+ ERROR_INJECT_ERROR("fail_drop_partition_4") ||
+ alter_close_tables(lpt, action_completed) ||
+ (close_table_on_failure= FALSE, FALSE) ||
ERROR_INJECT_CRASH("crash_drop_partition_5") ||
+ ERROR_INJECT_ERROR("fail_drop_partition_5") ||
((!thd->lex->no_write_to_binlog) &&
(write_bin_log(thd, FALSE,
thd->query(), thd->query_length()), FALSE)) ||
ERROR_INJECT_CRASH("crash_drop_partition_6") ||
- ((frm_install= TRUE), FALSE) ||
+ ERROR_INJECT_ERROR("fail_drop_partition_6") ||
+ (frm_install= TRUE, FALSE) ||
mysql_write_frm(lpt, WFRM_INSTALL_SHADOW) ||
- ((frm_install= FALSE), FALSE) ||
+ (frm_install= FALSE, FALSE) ||
ERROR_INJECT_CRASH("crash_drop_partition_7") ||
+ ERROR_INJECT_ERROR("fail_drop_partition_7") ||
mysql_drop_partitions(lpt) ||
ERROR_INJECT_CRASH("crash_drop_partition_8") ||
+ ERROR_INJECT_ERROR("fail_drop_partition_8") ||
(write_log_completed(lpt, FALSE), FALSE) ||
ERROR_INJECT_CRASH("crash_drop_partition_9") ||
+ ERROR_INJECT_ERROR("fail_drop_partition_9") ||
(alter_partition_lock_handling(lpt), FALSE))
{
- handle_alter_part_error(lpt, not_completed, TRUE, frm_install);
+ handle_alter_part_error(lpt, action_completed, TRUE, frm_install,
+ close_table_on_failure);
goto err;
}
}
@@ -6318,53 +6741,65 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
0) Write an entry that removes the shadow frm file if crash occurs
1) Write the new frm file as a shadow frm file
- 2) Log the changes to happen in ddl log
- 2) Add the new partitions
- 3) Lock all partitions in TL_WRITE_ONLY to ensure that no users
- are still using the old partitioning scheme. Wait until all
- ongoing users have completed before progressing.
- 4) Get a name lock on the table. This ensures that we can release all
- locks on the table and since no one can open the table, there can
- be no new threads accessing the table. They will be hanging on the
- name lock.
- 5) Close all tables that have already been opened but didn't stumble on
- the abort locked previously. This is done as part of the
- close_data_files_and_morph_locks call.
- 6) Close all table handlers and unlock all handlers but retain name lock
- 7) Write binlog
- 8) Now the change is completed except for the installation of the
+ 2) Get an exclusive metadata lock on the table (waits for all active
+ transactions using this table). This ensures that we
+ can release all other locks on the table and since no one can open
+ the table, there can be no new threads accessing the table. They
+ will be hanging on this exclusive lock.
+ 3) Write an entry to remove the new parttions if crash occurs
+ 4) Add the new partitions.
+ 5) Close all instances of the table and remove them from the table cache.
+ 6) Write binlog
+ 7) Now the change is completed except for the installation of the
new frm file. We thus write an action in the log to change to
the shadow frm file
- 9) Install the new frm file of the table where the partitions are
+ 8) Install the new frm file of the table where the partitions are
added to the table.
- 10)Wait until all accesses using the old frm file has completed
- 11)Remove entries from ddl log
- 12)Reopen tables if under lock tables
- 13)Complete query
+ 9) Remove entries from ddl log
+ 10)Reopen tables if under lock tables
+ 11)Complete query
*/
- if (write_log_add_change_partition(lpt) ||
+ if (write_log_drop_shadow_frm(lpt) ||
ERROR_INJECT_CRASH("crash_add_partition_1") ||
+ ERROR_INJECT_ERROR("fail_add_partition_1") ||
mysql_write_frm(lpt, WFRM_WRITE_SHADOW) ||
ERROR_INJECT_CRASH("crash_add_partition_2") ||
- mysql_change_partitions(lpt) ||
+ ERROR_INJECT_ERROR("fail_add_partition_2") ||
+ wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN) ||
ERROR_INJECT_CRASH("crash_add_partition_3") ||
- abort_and_upgrade_lock_and_close_table(lpt) ||
+ ERROR_INJECT_ERROR("fail_add_partition_3") ||
+ (close_table_on_failure= TRUE, FALSE) ||
+ write_log_add_change_partition(lpt) ||
+ ERROR_INJECT_CRASH("crash_add_partition_4") ||
+ ERROR_INJECT_ERROR("fail_add_partition_4") ||
+ mysql_change_partitions(lpt) ||
ERROR_INJECT_CRASH("crash_add_partition_5") ||
+ ERROR_INJECT_ERROR("fail_add_partition_5") ||
+ (close_table_on_failure= FALSE, FALSE) ||
+ alter_close_tables(lpt, action_completed) ||
+ ERROR_INJECT_CRASH("crash_add_partition_6") ||
+ ERROR_INJECT_ERROR("fail_add_partition_6") ||
((!thd->lex->no_write_to_binlog) &&
(write_bin_log(thd, FALSE,
thd->query(), thd->query_length()), FALSE)) ||
- ERROR_INJECT_CRASH("crash_add_partition_6") ||
- write_log_rename_frm(lpt) ||
- (not_completed= FALSE) ||
ERROR_INJECT_CRASH("crash_add_partition_7") ||
- ((frm_install= TRUE), FALSE) ||
- mysql_write_frm(lpt, WFRM_INSTALL_SHADOW) ||
+ ERROR_INJECT_ERROR("fail_add_partition_7") ||
+ write_log_rename_frm(lpt) ||
+ (action_completed= TRUE, FALSE) ||
ERROR_INJECT_CRASH("crash_add_partition_8") ||
- (write_log_completed(lpt, FALSE), FALSE) ||
+ ERROR_INJECT_ERROR("fail_add_partition_8") ||
+ (frm_install= TRUE, FALSE) ||
+ mysql_write_frm(lpt, WFRM_INSTALL_SHADOW) ||
+ (frm_install= FALSE, FALSE) ||
ERROR_INJECT_CRASH("crash_add_partition_9") ||
- (alter_partition_lock_handling(lpt), FALSE))
+ ERROR_INJECT_ERROR("fail_add_partition_9") ||
+ (write_log_completed(lpt, FALSE), FALSE) ||
+ ERROR_INJECT_CRASH("crash_add_partition_10") ||
+ ERROR_INJECT_ERROR("fail_add_partition_10") ||
+ (alter_partition_lock_handling(lpt), FALSE))
{
- handle_alter_part_error(lpt, not_completed, FALSE, frm_install);
+ handle_alter_part_error(lpt, action_completed, FALSE, frm_install,
+ close_table_on_failure);
goto err;
}
}
@@ -6407,78 +6842,96 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
removed in a crash situation
3) Add the new partitions
Copy from the reorganised partitions to the new partitions
- 4) Log that operation is completed and log all complete actions
+ 4) Get an exclusive metadata lock on the table (waits for all active
+ transactions using this table). This ensures that we
+ can release all other locks on the table and since no one can open
+ the table, there can be no new threads accessing the table. They
+ will be hanging on this exclusive lock.
+ 5) Log that operation is completed and log all complete actions
needed to complete operation from here
- 5) Lock all partitions in TL_WRITE_ONLY to ensure that no users
- are still using the old partitioning scheme. Wait until all
- ongoing users have completed before progressing.
- 6) Get a name lock of the table
- 7) Close all tables opened but not yet locked, after this call we are
- certain that no other thread is in the lock wait queue or has
- opened the table. The name lock will ensure that they are blocked
- on the open call.
- This is achieved also by close_data_files_and_morph_locks call.
- 8) Close all partitions opened by this thread, but retain name lock.
- 9) Write bin log
- 10) Prepare handlers for rename and delete of partitions
- 11) Rename and drop the reorged partitions such that they are no
- longer used and rename those added to their real new names.
- 12) Install the shadow frm file
- 13) Reopen the table if under lock tables
- 14) Complete query
+ 6) Write bin log
+ 7) Close all instances of the table and remove them from the table cache.
+ 8) Prepare handlers for rename and delete of partitions
+ 9) Rename and drop the reorged partitions such that they are no
+ longer used and rename those added to their real new names.
+ 10) Install the shadow frm file
+ 11) Reopen the table if under lock tables
+ 12) Complete query
*/
- if (write_log_add_change_partition(lpt) ||
+ if (write_log_drop_shadow_frm(lpt) ||
ERROR_INJECT_CRASH("crash_change_partition_1") ||
+ ERROR_INJECT_ERROR("fail_change_partition_1") ||
mysql_write_frm(lpt, WFRM_WRITE_SHADOW) ||
ERROR_INJECT_CRASH("crash_change_partition_2") ||
- mysql_change_partitions(lpt) ||
+ ERROR_INJECT_ERROR("fail_change_partition_2") ||
+ (close_table_on_failure= TRUE, FALSE) ||
+ write_log_add_change_partition(lpt) ||
ERROR_INJECT_CRASH("crash_change_partition_3") ||
- write_log_final_change_partition(lpt) ||
+ ERROR_INJECT_ERROR("fail_change_partition_3") ||
+ mysql_change_partitions(lpt) ||
ERROR_INJECT_CRASH("crash_change_partition_4") ||
- (not_completed= FALSE) ||
- abort_and_upgrade_lock_and_close_table(lpt) ||
+ ERROR_INJECT_ERROR("fail_change_partition_4") ||
+ wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN) ||
+ ERROR_INJECT_CRASH("crash_change_partition_5") ||
+ ERROR_INJECT_ERROR("fail_change_partition_5") ||
+ write_log_final_change_partition(lpt) ||
+ (action_completed= TRUE, FALSE) ||
ERROR_INJECT_CRASH("crash_change_partition_6") ||
+ ERROR_INJECT_ERROR("fail_change_partition_6") ||
((!thd->lex->no_write_to_binlog) &&
(write_bin_log(thd, FALSE,
thd->query(), thd->query_length()), FALSE)) ||
ERROR_INJECT_CRASH("crash_change_partition_7") ||
+ ERROR_INJECT_ERROR("fail_change_partition_7") ||
+ ((frm_install= TRUE), FALSE) ||
mysql_write_frm(lpt, WFRM_INSTALL_SHADOW) ||
+ (frm_install= FALSE, FALSE) ||
ERROR_INJECT_CRASH("crash_change_partition_8") ||
- mysql_drop_partitions(lpt) ||
+ ERROR_INJECT_ERROR("fail_change_partition_8") ||
+ alter_close_tables(lpt, action_completed) ||
+ (close_table_on_failure= FALSE, FALSE) ||
ERROR_INJECT_CRASH("crash_change_partition_9") ||
- mysql_rename_partitions(lpt) ||
- ((frm_install= TRUE), FALSE) ||
+ ERROR_INJECT_ERROR("fail_change_partition_9") ||
+ mysql_drop_partitions(lpt) ||
ERROR_INJECT_CRASH("crash_change_partition_10") ||
- (write_log_completed(lpt, FALSE), FALSE) ||
+ ERROR_INJECT_ERROR("fail_change_partition_10") ||
+ mysql_rename_partitions(lpt) ||
ERROR_INJECT_CRASH("crash_change_partition_11") ||
+ ERROR_INJECT_ERROR("fail_change_partition_11") ||
+ (write_log_completed(lpt, FALSE), FALSE) ||
+ ERROR_INJECT_CRASH("crash_change_partition_12") ||
+ ERROR_INJECT_ERROR("fail_change_partition_12") ||
(alter_partition_lock_handling(lpt), FALSE))
{
- handle_alter_part_error(lpt, not_completed, FALSE, frm_install);
+ handle_alter_part_error(lpt, action_completed, FALSE, frm_install,
+ close_table_on_failure);
goto err;
}
}
+ downgrade_mdl_if_lock_tables_mode(thd, mdl_ticket, MDL_SHARED_NO_READ_WRITE);
/*
A final step is to write the query to the binlog and send ok to the
user
*/
- DBUG_RETURN(fast_end_partition(thd, lpt->copied, lpt->deleted,
- table, table_list, FALSE, NULL,
- written_bin_log));
+ DBUG_RETURN(fast_end_partition(thd, lpt->copied, lpt->deleted, table_list));
err:
- if (thd->locked_tables)
+ if (action_completed)
{
/*
- table->part_info was altered in prep_alter_part_table and must be
- destroyed and recreated, since otherwise it will be reused, since
- we are under LOCK TABLE.
+ Although error occurred, the action was forced to retry for completion.
+ Therefore we must close+reopen all instances of the table.
*/
- alter_partition_lock_handling(lpt);
+ (void) alter_partition_lock_handling(lpt);
}
else
{
- /* Force the table to be closed to avoid reuse of the table->part_info */
- close_thread_tables(thd);
+ /*
+ The failed action was reverted, leave the original table as is and
+ close/destroy the intermediate table object and its share.
+ */
+ close_temporary(lpt->table, 1, 0);
}
+ downgrade_mdl_if_lock_tables_mode(thd, mdl_ticket, MDL_SHARED_NO_READ_WRITE);
DBUG_RETURN(TRUE);
}
#endif
@@ -6571,7 +7024,7 @@ void set_key_field_ptr(KEY *key_info, const uchar *new_buf,
void mem_alloc_error(size_t size)
{
- my_error(ER_OUTOFMEMORY, MYF(0), size);
+ my_error(ER_OUTOFMEMORY, MYF(0), static_cast<int>(size));
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
@@ -6660,16 +7113,19 @@ void make_used_partitions_str(partition_info *part_info, String *parts_str)
IMPLEMENTATION
There are two available interval analyzer functions:
(1) get_part_iter_for_interval_via_mapping
- (2) get_part_iter_for_interval_via_walking
+ (2) get_part_iter_for_interval_cols_via_map
+ (3) get_part_iter_for_interval_via_walking
They both have limited applicability:
(1) is applicable for "PARTITION BY <RANGE|LIST>(func(t.field))", where
func is a monotonic function.
-
- (2) is applicable for
+
+ (2) is applicable for "PARTITION BY <RANGE|LIST> COLUMNS (field_list)
+
+ (3) is applicable for
"[SUB]PARTITION BY <any-partitioning-type>(any_func(t.integer_field))"
- If both are applicable, (1) is preferred over (2).
+ If both (1) and (3) are applicable, (1) is preferred over (3).
This function sets part_info::get_part_iter_for_interval according to
this criteria, and also sets some auxilary fields that the function
@@ -6689,10 +7145,19 @@ static void set_up_range_analysis_info(partition_info *part_info)
switch (part_info->part_type) {
case RANGE_PARTITION:
case LIST_PARTITION:
- if (part_info->part_expr->get_monotonicity_info() != NON_MONOTONIC)
+ if (!part_info->column_list)
+ {
+ if (part_info->part_expr->get_monotonicity_info() != NON_MONOTONIC)
+ {
+ part_info->get_part_iter_for_interval=
+ get_part_iter_for_interval_via_mapping;
+ goto setup_subparts;
+ }
+ }
+ else
{
part_info->get_part_iter_for_interval=
- get_part_iter_for_interval_via_mapping;
+ get_part_iter_for_interval_cols_via_map;
goto setup_subparts;
}
default:
@@ -6703,7 +7168,7 @@ static void set_up_range_analysis_info(partition_info *part_info)
Check if get_part_iter_for_interval_via_walking() can be used for
partitioning
*/
- if (part_info->no_part_fields == 1)
+ if (part_info->num_part_fields == 1)
{
Field *field= part_info->part_field_array[0];
switch (field->type()) {
@@ -6725,7 +7190,7 @@ setup_subparts:
Check if get_part_iter_for_interval_via_walking() can be used for
subpartitioning
*/
- if (part_info->no_subpart_fields == 1)
+ if (part_info->num_subpart_fields == 1)
{
Field *field= part_info->subpart_field_array[0];
switch (field->type()) {
@@ -6743,9 +7208,118 @@ setup_subparts:
}
+/*
+ This function takes a memory of packed fields in opt-range format
+ and stores it in record format. To avoid having to worry about how
+ the length of fields are calculated in opt-range format we send
+ an array of lengths used for each field in store_length_array.
+
+ SYNOPSIS
+ store_tuple_to_record()
+ pfield Field array
+ store_length_array Array of field lengths
+ value Memory where fields are stored
+ value_end End of memory
+
+ RETURN VALUE
+ nparts Number of fields assigned
+*/
+uint32 store_tuple_to_record(Field **pfield,
+ uint32 *store_length_array,
+ uchar *value,
+ uchar *value_end)
+{
+ /* This function is inspired by store_key_image_rec. */
+ uint32 nparts= 0;
+ uchar *loc_value;
+ while (value < value_end)
+ {
+ loc_value= value;
+ if ((*pfield)->real_maybe_null())
+ {
+ if (*loc_value)
+ (*pfield)->set_null();
+ else
+ (*pfield)->set_notnull();
+ loc_value++;
+ }
+ uint len= (*pfield)->pack_length();
+ (*pfield)->set_key_image(loc_value, len);
+ value+= *store_length_array;
+ store_length_array++;
+ nparts++;
+ pfield++;
+ }
+ return nparts;
+}
+
+/*
+ RANGE(columns) partitioning: compare value bound and probe tuple.
+
+ The value bound always is a full tuple (but may include the MAXVALUE
+ special value).
+
+ The probe tuple may be a prefix of partitioning tuple. The tail_is_min
+ parameter specifies whether the suffix components should be assumed to
+ hold MAXVALUE
+*/
+
+static int cmp_rec_and_tuple(part_column_list_val *val, uint32 nvals_in_rec)
+{
+ partition_info *part_info= val->part_info;
+ Field **field= part_info->part_field_array;
+ Field **fields_end= field + nvals_in_rec;
+ int res;
+
+ for (; field != fields_end; field++, val++)
+ {
+ if (val->max_value)
+ return -1;
+ if ((*field)->is_null())
+ {
+ if (val->null_value)
+ continue;
+ return -1;
+ }
+ if (val->null_value)
+ return +1;
+ res= (*field)->cmp((const uchar*)val->column_value);
+ if (res)
+ return res;
+ }
+ return 0;
+}
+
+
+static int cmp_rec_and_tuple_prune(part_column_list_val *val,
+ uint32 n_vals_in_rec,
+ bool tail_is_min)
+{
+ int cmp;
+ Field **field;
+ partition_info *part_info;
+ if ((cmp= cmp_rec_and_tuple(val, n_vals_in_rec)))
+ return cmp;
+ part_info= val->part_info;
+ field= part_info->part_field_array + n_vals_in_rec;
+ for (; *field; field++, val++)
+ {
+ if (tail_is_min)
+ return -1;
+ if (!tail_is_min && !val->max_value)
+ return +1;
+ }
+ return 0;
+}
+
+
typedef uint32 (*get_endpoint_func)(partition_info*, bool left_endpoint,
bool include_endpoint);
+typedef uint32 (*get_col_endpoint_func)(partition_info*, bool left_endpoint,
+ bool include_endpoint,
+ uint32 num_parts);
+
/*
Partitioning Interval Analysis: Initialize the iterator for "mapping" case
@@ -6781,18 +7355,145 @@ typedef uint32 (*get_endpoint_func)(partition_info*, bool left_endpoint,
-1 - All partitions would match (iterator not initialized)
*/
+uint32 get_partition_id_cols_range_for_endpoint(partition_info *part_info,
+ bool left_endpoint,
+ bool include_endpoint,
+ uint32 nparts)
+{
+ uint max_partition= part_info->num_parts - 1;
+ uint min_part_id= 0, max_part_id= max_partition, loc_part_id;
+ part_column_list_val *range_col_array= part_info->range_col_array;
+ uint num_columns= part_info->part_field_list.elements;
+ bool tailf= !(left_endpoint ^ include_endpoint);
+ DBUG_ENTER("get_partition_id_cols_range_for_endpoint");
+
+ /* Get the partitioning function value for the endpoint */
+ while (max_part_id > min_part_id)
+ {
+ loc_part_id= (max_part_id + min_part_id + 1) >> 1;
+ if (cmp_rec_and_tuple_prune(range_col_array + loc_part_id*num_columns,
+ nparts, tailf) >= 0)
+ min_part_id= loc_part_id + 1;
+ else
+ max_part_id= loc_part_id - 1;
+ }
+ loc_part_id= max_part_id;
+ if (loc_part_id < max_partition &&
+ cmp_rec_and_tuple_prune(range_col_array + (loc_part_id+1)*num_columns,
+ nparts, tailf) >= 0
+ )
+ {
+ loc_part_id++;
+ }
+ if (left_endpoint)
+ {
+ if (cmp_rec_and_tuple_prune(range_col_array + loc_part_id*num_columns,
+ nparts, tailf) >= 0)
+ loc_part_id++;
+ }
+ else
+ {
+ if (loc_part_id < max_partition)
+ {
+ int res= cmp_rec_and_tuple_prune(range_col_array +
+ loc_part_id * num_columns,
+ nparts, tailf);
+ if (!res)
+ loc_part_id += test(include_endpoint);
+ else if (res > 0)
+ loc_part_id++;
+ }
+ loc_part_id++;
+ }
+ DBUG_RETURN(loc_part_id);
+}
+
+
+int get_part_iter_for_interval_cols_via_map(partition_info *part_info,
+ bool is_subpart,
+ uint32 *store_length_array,
+ uchar *min_value, uchar *max_value,
+ uint min_len, uint max_len,
+ uint flags,
+ PARTITION_ITERATOR *part_iter)
+{
+ uint32 nparts;
+ get_col_endpoint_func get_col_endpoint;
+ DBUG_ENTER("get_part_iter_for_interval_cols_via_map");
+
+ if (part_info->part_type == RANGE_PARTITION)
+ {
+ get_col_endpoint= get_partition_id_cols_range_for_endpoint;
+ part_iter->get_next= get_next_partition_id_range;
+ }
+ else if (part_info->part_type == LIST_PARTITION)
+ {
+ get_col_endpoint= get_partition_id_cols_list_for_endpoint;
+ part_iter->get_next= get_next_partition_id_list;
+ part_iter->part_info= part_info;
+ DBUG_ASSERT(part_info->num_list_values);
+ }
+ else
+ assert(0);
+
+ if (flags & NO_MIN_RANGE)
+ part_iter->part_nums.start= part_iter->part_nums.cur= 0;
+ else
+ {
+ // Copy from min_value to record
+ nparts= store_tuple_to_record(part_info->part_field_array,
+ store_length_array,
+ min_value,
+ min_value + min_len);
+ part_iter->part_nums.start= part_iter->part_nums.cur=
+ get_col_endpoint(part_info, TRUE, !(flags & NEAR_MIN),
+ nparts);
+ }
+ if (flags & NO_MAX_RANGE)
+ {
+ if (part_info->part_type == RANGE_PARTITION)
+ part_iter->part_nums.end= part_info->num_parts;
+ else /* LIST_PARTITION */
+ {
+ DBUG_ASSERT(part_info->part_type == LIST_PARTITION);
+ part_iter->part_nums.end= part_info->num_list_values;
+ }
+ }
+ else
+ {
+ // Copy from max_value to record
+ nparts= store_tuple_to_record(part_info->part_field_array,
+ store_length_array,
+ max_value,
+ max_value + max_len);
+ part_iter->part_nums.end= get_col_endpoint(part_info, FALSE,
+ !(flags & NEAR_MAX),
+ nparts);
+ }
+ if (part_iter->part_nums.start == part_iter->part_nums.end)
+ DBUG_RETURN(0);
+ DBUG_RETURN(1);
+}
+
+
int get_part_iter_for_interval_via_mapping(partition_info *part_info,
bool is_subpart,
+ uint32 *store_length_array, /* ignored */
uchar *min_value, uchar *max_value,
+ uint min_len, uint max_len, /* ignored */
uint flags,
PARTITION_ITERATOR *part_iter)
{
- DBUG_ASSERT(!is_subpart);
Field *field= part_info->part_field_array[0];
uint32 UNINIT_VAR(max_endpoint_val);
get_endpoint_func UNINIT_VAR(get_endpoint);
bool can_match_multiple_values; /* is not '=' */
uint field_len= field->pack_length_in_rec();
+ DBUG_ENTER("get_part_iter_for_interval_via_mapping");
+ DBUG_ASSERT(!is_subpart);
+ (void) store_length_array;
+ (void)min_len;
+ (void)max_len;
part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE;
if (part_info->part_type == RANGE_PARTITION)
@@ -6801,7 +7502,7 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
get_endpoint= get_partition_id_range_for_endpoint_charset;
else
get_endpoint= get_partition_id_range_for_endpoint;
- max_endpoint_val= part_info->no_parts;
+ max_endpoint_val= part_info->num_parts;
part_iter->get_next= get_next_partition_id_range;
}
else if (part_info->part_type == LIST_PARTITION)
@@ -6811,7 +7512,7 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
get_endpoint= get_list_array_idx_for_endpoint_charset;
else
get_endpoint= get_list_array_idx_for_endpoint;
- max_endpoint_val= part_info->no_list_values;
+ max_endpoint_val= part_info->num_list_values;
part_iter->get_next= get_next_partition_id_list;
part_iter->part_info= part_info;
if (max_endpoint_val == 0)
@@ -6824,7 +7525,7 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
part_iter->part_nums.start= part_iter->part_nums.end= 0;
part_iter->part_nums.cur= 0;
part_iter->ret_null_part= part_iter->ret_null_part_orig= TRUE;
- return -1;
+ DBUG_RETURN(-1);
}
}
else
@@ -6856,11 +7557,11 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
{
part_iter->ret_null_part= part_iter->ret_null_part_orig= TRUE;
part_iter->part_nums.start= part_iter->part_nums.cur= 0;
- if (*max_value && !(flags & NO_MAX_RANGE))
+ if (!(flags & NO_MAX_RANGE) && *max_value)
{
/* The right bound is X <= NULL, i.e. it is a "X IS NULL" interval */
part_iter->part_nums.end= 0;
- return 1;
+ DBUG_RETURN(1);
}
}
else
@@ -6884,11 +7585,11 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
part_iter->part_nums.cur= part_iter->part_nums.start= 0;
part_iter->part_nums.end= 0;
part_iter->ret_null_part= part_iter->ret_null_part_orig= TRUE;
- return 1;
+ DBUG_RETURN(1);
}
part_iter->part_nums.cur= part_iter->part_nums.start;
if (part_iter->part_nums.start == max_endpoint_val)
- return 0; /* No partitions */
+ DBUG_RETURN(0); /* No partitions */
}
}
@@ -6902,14 +7603,14 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
part_iter->part_nums.end= get_endpoint(part_info, 0, include_endp);
if (part_iter->part_nums.start >= part_iter->part_nums.end &&
!part_iter->ret_null_part)
- return 0; /* No partitions */
+ DBUG_RETURN(0); /* No partitions */
}
- return 1; /* Ok, iterator initialized */
+ DBUG_RETURN(1); /* Ok, iterator initialized */
}
/* See get_part_iter_for_interval_via_walking for definition of what this is */
-#define MAX_RANGE_TO_WALK 10
+#define MAX_RANGE_TO_WALK 32
/*
@@ -6945,16 +7646,6 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
Intervals with +inf/-inf, and [NULL, c1] interval can be processed but
that is more tricky and I don't have time to do it right now.
- Additionally we have these requirements:
- * number of values in the interval must be less then number of
- [sub]partitions, and
- * Number of values in the interval must be less then MAX_RANGE_TO_WALK.
-
- The rationale behind these requirements is that if they are not met
- we're likely to hit most of the partitions and traversing the interval
- will only add overhead. So it's better return "all partitions used" in
- that case.
-
RETURN
0 - No matching partitions, iterator not initialized
1 - Some partitions would match, iterator intialized for traversing them
@@ -6962,25 +7653,32 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
*/
int get_part_iter_for_interval_via_walking(partition_info *part_info,
- bool is_subpart,
- uchar *min_value, uchar *max_value,
- uint flags,
- PARTITION_ITERATOR *part_iter)
+ bool is_subpart,
+ uint32 *store_length_array, /* ignored */
+ uchar *min_value, uchar *max_value,
+ uint min_len, uint max_len, /* ignored */
+ uint flags,
+ PARTITION_ITERATOR *part_iter)
{
Field *field;
uint total_parts;
partition_iter_func get_next_func;
+ DBUG_ENTER("get_part_iter_for_interval_via_walking");
+ (void)store_length_array;
+ (void)min_len;
+ (void)max_len;
+
part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE;
if (is_subpart)
{
field= part_info->subpart_field_array[0];
- total_parts= part_info->no_subparts;
+ total_parts= part_info->num_subparts;
get_next_func= get_next_subpartition_via_walking;
}
else
{
field= part_info->part_field_array[0];
- total_parts= part_info->no_parts;
+ total_parts= part_info->num_parts;
get_next_func= get_next_partition_via_walking;
}
@@ -7000,7 +7698,7 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
if (!part_info->get_subpartition_id(part_info, &part_id))
{
init_single_partition_iterator(part_id, part_iter);
- return 1; /* Ok, iterator initialized */
+ DBUG_RETURN(1); /* Ok, iterator initialized */
}
}
else
@@ -7013,10 +7711,10 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
if (!res)
{
init_single_partition_iterator(part_id, part_iter);
- return 1; /* Ok, iterator initialized */
+ DBUG_RETURN(1); /* Ok, iterator initialized */
}
}
- return 0; /* No partitions match */
+ DBUG_RETURN(0); /* No partitions match */
}
if ((field->real_maybe_null() &&
@@ -7024,7 +7722,7 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
(!(flags & NO_MAX_RANGE) && *max_value))) || // X <? NULL
(flags & (NO_MIN_RANGE | NO_MAX_RANGE))) // -inf at any bound
{
- return -1; /* Can't handle this interval, have to use all partitions */
+ DBUG_RETURN(-1); /* Can't handle this interval, have to use all partitions */
}
/* Get integers for left and right interval bound */
@@ -7043,20 +7741,36 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
an empty interval by "wrapping around" a + 4G-1 + 1 = a.
*/
if ((ulonglong)b - (ulonglong)a == ~0ULL)
- return -1;
+ DBUG_RETURN(-1);
a += test(flags & NEAR_MIN);
b += test(!(flags & NEAR_MAX));
ulonglong n_values= b - a;
-
- if (n_values > total_parts || n_values > MAX_RANGE_TO_WALK)
- return -1;
+
+ /*
+ Will it pay off to enumerate all values in the [a..b] range and evaluate
+ the partitioning function for every value? It depends on
+ 1. whether we'll be able to infer that some partitions are not used
+ 2. if time savings from not scanning these partitions will be greater
+ than time spent in enumeration.
+ We will assume that the cost of accessing one extra partition is greater
+ than the cost of evaluating the partitioning function O(#partitions).
+ This means we should jump at any chance to eliminate a partition, which
+ gives us this logic:
+
+ Do the enumeration if
+ - the number of values to enumerate is comparable to the number of
+ partitions, or
+ - there are not many values to enumerate.
+ */
+ if ((n_values > 2*total_parts) && n_values > MAX_RANGE_TO_WALK)
+ DBUG_RETURN(-1);
part_iter->field_vals.start= part_iter->field_vals.cur= a;
part_iter->field_vals.end= b;
part_iter->part_info= part_info;
part_iter->get_next= get_next_func;
- return 1;
+ DBUG_RETURN(1);
}
@@ -7104,8 +7818,9 @@ uint32 get_next_partition_id_range(PARTITION_ITERATOR* part_iter)
DESCRIPTION
This implementation of PARTITION_ITERATOR::get_next() is special for
- LIST partitioning: it enumerates partition ids in
- part_info->list_array[i] where i runs over [min_idx, max_idx] interval.
+ LIST partitioning: it enumerates partition ids in
+ part_info->list_array[i] (list_col_array[i*cols] for COLUMNS LIST
+ partitioning) where i runs over [min_idx, max_idx] interval.
The function conforms to partition_iter_func type.
RETURN
@@ -7127,8 +7842,16 @@ uint32 get_next_partition_id_list(PARTITION_ITERATOR *part_iter)
return NOT_A_PARTITION_ID;
}
else
- return part_iter->part_info->list_array[part_iter->
- part_nums.cur++].partition_id;
+ {
+ partition_info *part_info= part_iter->part_info;
+ uint32 num_part= part_iter->part_nums.cur++;
+ if (part_info->column_list)
+ {
+ uint num_columns= part_info->part_field_list.elements;
+ return part_info->list_col_array[num_part*num_columns].partition_id;
+ }
+ return part_info->list_array[num_part].partition_id;
+ }
}
@@ -7269,5 +7992,17 @@ void create_subpartition_name(char *out, const char *in1,
strxmov(out, in1, "#P#", transl_part_name,
"#SP#", transl_subpart_name, "#REN#", NullS);
}
+
+uint get_partition_field_store_length(Field *field)
+{
+ uint store_length;
+
+ store_length= field->key_length();
+ if (field->real_maybe_null())
+ store_length+= HA_KEY_NULL_LENGTH;
+ if (field->real_type() == MYSQL_TYPE_VARCHAR)
+ store_length+= HA_KEY_BLOB_LENGTH;
+ return store_length;
+}
#endif