diff options
author | Mikael Ronstrom <mikael@mysql.com> | 2009-10-28 18:22:36 +0100 |
---|---|---|
committer | Mikael Ronstrom <mikael@mysql.com> | 2009-10-28 18:22:36 +0100 |
commit | 072c13d9395b6a3f3a472633d6e817db9215d81e (patch) | |
tree | 0891f6df26609fa66b89a4cc8faa8079ec21f6f3 /sql | |
parent | cd6e15989044e5d4a708a6ee191bcd3c13dc0071 (diff) | |
parent | 10fed1aca0096acb135c2065233e84d61b00b9cf (diff) | |
download | mariadb-git-072c13d9395b6a3f3a472633d6e817db9215d81e.tar.gz |
Merged WL#3352 into mysql-next-mr
Diffstat (limited to 'sql')
-rw-r--r-- | sql/ha_ndbcluster.cc | 28 | ||||
-rw-r--r-- | sql/ha_partition.cc | 154 | ||||
-rw-r--r-- | sql/ha_partition.h | 11 | ||||
-rw-r--r-- | sql/item_create.cc | 22 | ||||
-rw-r--r-- | sql/item_timefunc.cc | 53 | ||||
-rw-r--r-- | sql/item_timefunc.h | 18 | ||||
-rw-r--r-- | sql/lex.h | 1 | ||||
-rw-r--r-- | sql/mysql_priv.h | 7 | ||||
-rw-r--r-- | sql/opt_range.cc | 331 | ||||
-rw-r--r-- | sql/partition_element.h | 38 | ||||
-rw-r--r-- | sql/partition_info.cc | 1197 | ||||
-rw-r--r-- | sql/partition_info.h | 82 | ||||
-rw-r--r-- | sql/share/errmsg-utf8.txt | 19 | ||||
-rw-r--r-- | sql/share/errmsg.txt | 18 | ||||
-rw-r--r-- | sql/sql_lex.cc | 2 | ||||
-rw-r--r-- | sql/sql_lex.h | 6 | ||||
-rw-r--r-- | sql/sql_partition.cc | 1917 | ||||
-rw-r--r-- | sql/sql_partition.h | 25 | ||||
-rw-r--r-- | sql/sql_show.cc | 120 | ||||
-rw-r--r-- | sql/sql_table.cc | 90 | ||||
-rw-r--r-- | sql/sql_yacc.yy | 389 |
21 files changed, 3228 insertions, 1300 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 05b081d45b8..c4bbd64b138 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -9783,7 +9783,7 @@ void ha_ndbcluster::set_auto_partitions(partition_info *part_info) int ha_ndbcluster::set_range_data(void *tab_ref, partition_info *part_info) { NDBTAB *tab= (NDBTAB*)tab_ref; - int32 *range_data= (int32*)my_malloc(part_info->no_parts*sizeof(int32), + int32 *range_data= (int32*)my_malloc(part_info->num_parts*sizeof(int32), MYF(0)); uint i; int error= 0; @@ -9792,17 +9792,17 @@ int ha_ndbcluster::set_range_data(void *tab_ref, partition_info *part_info) if (!range_data) { - mem_alloc_error(part_info->no_parts*sizeof(int32)); + mem_alloc_error(part_info->num_parts*sizeof(int32)); DBUG_RETURN(1); } - for (i= 0; i < part_info->no_parts; i++) + for (i= 0; i < part_info->num_parts; i++) { longlong range_val= part_info->range_int_array[i]; if (unsigned_flag) range_val-= 0x8000000000000000ULL; if (range_val < INT_MIN32 || range_val >= INT_MAX32) { - if ((i != part_info->no_parts - 1) || + if ((i != part_info->num_parts - 1) || (range_val != LONGLONG_MAX)) { my_error(ER_LIMITED_PART_RANGE, MYF(0), "NDB"); @@ -9813,7 +9813,7 @@ int ha_ndbcluster::set_range_data(void *tab_ref, partition_info *part_info) } range_data[i]= (int32)range_val; } - tab->setRangeListData(range_data, sizeof(int32)*part_info->no_parts); + tab->setRangeListData(range_data, sizeof(int32)*part_info->num_parts); error: my_free((char*)range_data, MYF(0)); DBUG_RETURN(error); @@ -9822,7 +9822,7 @@ error: int ha_ndbcluster::set_list_data(void *tab_ref, partition_info *part_info) { NDBTAB *tab= (NDBTAB*)tab_ref; - int32 *list_data= (int32*)my_malloc(part_info->no_list_values * 2 + int32 *list_data= (int32*)my_malloc(part_info->num_list_values * 2 * sizeof(int32), MYF(0)); uint32 *part_id, i; int error= 0; @@ -9831,10 +9831,10 @@ int ha_ndbcluster::set_list_data(void *tab_ref, partition_info *part_info) if (!list_data) { - mem_alloc_error(part_info->no_list_values*2*sizeof(int32)); + mem_alloc_error(part_info->num_list_values*2*sizeof(int32)); DBUG_RETURN(1); } - for (i= 0; i < part_info->no_list_values; i++) + for (i= 0; i < part_info->num_list_values; i++) { LIST_PART_ENTRY *list_entry= &part_info->list_array[i]; longlong list_val= list_entry->list_value; @@ -9850,7 +9850,7 @@ int ha_ndbcluster::set_list_data(void *tab_ref, partition_info *part_info) part_id= (uint32*)&list_data[2*i+1]; *part_id= list_entry->partition_id; } - tab->setRangeListData(list_data, 2*sizeof(int32)*part_info->no_list_values); + tab->setRangeListData(list_data, 2*sizeof(int32)*part_info->num_list_values); error: my_free((char*)list_data, MYF(0)); DBUG_RETURN(error); @@ -9972,11 +9972,11 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info, ng= 0; ts_names[fd_index]= part_elem->tablespace_name; frag_data[fd_index++]= ng; - } while (++j < part_info->no_subparts); + } while (++j < part_info->num_subparts); } first= FALSE; - } while (++i < part_info->no_parts); - tab->setDefaultNoPartitionsFlag(part_info->use_default_no_partitions); + } while (++i < part_info->num_parts); + tab->setDefaultNoPartitionsFlag(part_info->use_default_num_partitions); tab->setLinearFlag(part_info->linear_hash_ind); { ha_rows max_rows= table_share->max_rows; @@ -10370,7 +10370,7 @@ ndberror2: } -bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts) +bool ha_ndbcluster::get_no_parts(const char *name, uint *num_parts) { Ndb *ndb; NDBDICT *dict; @@ -10392,7 +10392,7 @@ bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts) Ndb_table_guard ndbtab_g(dict= ndb->getDictionary(), m_tabname); if (!ndbtab_g.get_table()) ERR_BREAK(dict->getNdbError(), err); - *no_parts= ndbtab_g.get_table()->getFragmentCount(); + *num_parts= ndbtab_g.get_table()->getFragmentCount(); DBUG_RETURN(FALSE); } diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 5ebe5c2faea..b5bba25315c 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -1,4 +1,4 @@ -/* Copyright 2005-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright 2005-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -245,7 +245,7 @@ void ha_partition::init_handler_variables() /* this allows blackhole to work properly */ - m_no_locks= 0; + m_num_locks= 0; #ifdef DONT_HAVE_TO_BE_INITALIZED m_start_key.flag= 0; @@ -580,8 +580,8 @@ int ha_partition::drop_partitions(const char *path) { List_iterator<partition_element> part_it(m_part_info->partitions); char part_name_buff[FN_REFLEN]; - uint no_parts= m_part_info->partitions.elements; - uint no_subparts= m_part_info->no_subparts; + uint num_parts= m_part_info->partitions.elements; + uint num_subparts= m_part_info->num_subparts; uint i= 0; uint name_variant; int ret_error; @@ -611,7 +611,7 @@ int ha_partition::drop_partitions(const char *path) do { partition_element *sub_elem= sub_it++; - part= i * no_subparts + j; + part= i * num_subparts + j; create_subpartition_name(part_name_buff, path, part_elem->partition_name, sub_elem->partition_name, name_variant); @@ -621,7 +621,7 @@ int ha_partition::drop_partitions(const char *path) error= ret_error; if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos)) error= 1; - } while (++j < no_subparts); + } while (++j < num_subparts); } else { @@ -640,7 +640,7 @@ int ha_partition::drop_partitions(const char *path) else part_elem->part_state= PART_IS_DROPPED; } - } while (++i < no_parts); + } while (++i < num_parts); VOID(sync_ddl_log()); DBUG_RETURN(error); } @@ -671,9 +671,9 @@ int ha_partition::rename_partitions(const char *path) List_iterator<partition_element> temp_it(m_part_info->temp_partitions); char part_name_buff[FN_REFLEN]; char norm_name_buff[FN_REFLEN]; - uint no_parts= m_part_info->partitions.elements; + uint num_parts= m_part_info->partitions.elements; uint part_count= 0; - uint no_subparts= m_part_info->no_subparts; + uint num_subparts= m_part_info->num_subparts; uint i= 0; uint j= 0; int error= 0; @@ -722,7 +722,7 @@ int ha_partition::rename_partitions(const char *path) error= 1; else sub_elem->log_entry= NULL; /* Indicate success */ - } while (++j < no_subparts); + } while (++j < num_subparts); } else { @@ -778,7 +778,7 @@ int ha_partition::rename_partitions(const char *path) do { sub_elem= sub_it++; - part= i * no_subparts + j; + part= i * num_subparts + j; create_subpartition_name(norm_name_buff, path, part_elem->partition_name, sub_elem->partition_name, @@ -807,7 +807,7 @@ int ha_partition::rename_partitions(const char *path) error= 1; else sub_elem->log_entry= NULL; - } while (++j < no_subparts); + } while (++j < num_subparts); } else { @@ -839,7 +839,7 @@ int ha_partition::rename_partitions(const char *path) part_elem->log_entry= NULL; } } - } while (++i < no_parts); + } while (++i < num_parts); VOID(sync_ddl_log()); DBUG_RETURN(error); } @@ -1093,8 +1093,8 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, uint flag) { List_iterator<partition_element> part_it(m_part_info->partitions); - uint no_parts= m_part_info->no_parts; - uint no_subparts= m_part_info->no_subparts; + uint num_parts= m_part_info->num_parts; + uint num_subparts= m_part_info->num_subparts; uint i= 0; int error; DBUG_ENTER("ha_partition::handle_opt_partitions"); @@ -1118,7 +1118,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, do { sub_elem= subpart_it++; - part= i * no_subparts + j; + part= i * num_subparts + j; DBUG_PRINT("info", ("Optimize subpartition %u (%s)", part, sub_elem->partition_name)); #ifdef NOT_USED @@ -1148,7 +1148,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, } while (part_elem= part_it++); DBUG_RETURN(error); } - } while (++j < no_subparts); + } while (++j < num_subparts); } else { @@ -1183,7 +1183,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, } part_elem->part_state= PART_NORMAL; } - } while (++i < no_parts); + } while (++i < num_parts); DBUG_RETURN(FALSE); } @@ -1387,10 +1387,10 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, List_iterator<partition_element> part_it(m_part_info->partitions); List_iterator <partition_element> t_it(m_part_info->temp_partitions); char part_name_buff[FN_REFLEN]; - uint no_parts= m_part_info->partitions.elements; - uint no_subparts= m_part_info->no_subparts; + uint num_parts= m_part_info->partitions.elements; + uint num_subparts= m_part_info->num_subparts; uint i= 0; - uint no_remain_partitions, part_count, orig_count; + uint num_remain_partitions, part_count, orig_count; handler **new_file_array; int error= 1; bool first; @@ -1406,7 +1406,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, part_name_buff))); m_reorged_parts= 0; if (!m_part_info->is_sub_partitioned()) - no_subparts= 1; + num_subparts= 1; /* Step 1: @@ -1415,7 +1415,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, */ if (temp_partitions) { - m_reorged_parts= temp_partitions * no_subparts; + m_reorged_parts= temp_partitions * num_subparts; } else { @@ -1425,9 +1425,9 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, if (part_elem->part_state == PART_CHANGED || part_elem->part_state == PART_REORGED_DROPPED) { - m_reorged_parts+= no_subparts; + m_reorged_parts+= num_subparts; } - } while (++i < no_parts); + } while (++i < num_parts); } if (m_reorged_parts && !(m_reorged_file= (handler**)sql_calloc(sizeof(handler*)* @@ -1442,10 +1442,10 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, Calculate number of partitions after change and allocate space for their handler references. */ - no_remain_partitions= 0; + num_remain_partitions= 0; if (temp_partitions) { - no_remain_partitions= no_parts * no_subparts; + num_remain_partitions= num_parts * num_subparts; } else { @@ -1458,17 +1458,17 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, part_elem->part_state == PART_TO_BE_ADDED || part_elem->part_state == PART_CHANGED) { - no_remain_partitions+= no_subparts; + num_remain_partitions+= num_subparts; } - } while (++i < no_parts); + } while (++i < num_parts); } if (!(new_file_array= (handler**)sql_calloc(sizeof(handler*)* - (2*(no_remain_partitions + 1))))) + (2*(num_remain_partitions + 1))))) { - mem_alloc_error(sizeof(handler*)*2*(no_remain_partitions+1)); + mem_alloc_error(sizeof(handler*)*2*(num_remain_partitions+1)); DBUG_RETURN(ER_OUTOFMEMORY); } - m_added_file= &new_file_array[no_remain_partitions + 1]; + m_added_file= &new_file_array[num_remain_partitions + 1]; /* Step 3: @@ -1487,9 +1487,9 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, part_elem->part_state == PART_REORGED_DROPPED) { memcpy((void*)&m_reorged_file[part_count], - (void*)&m_file[i*no_subparts], - sizeof(handler*)*no_subparts); - part_count+= no_subparts; + (void*)&m_file[i*num_subparts], + sizeof(handler*)*num_subparts); + part_count+= num_subparts; } else if (first && temp_partitions && part_elem->part_state == PART_TO_BE_ADDED) @@ -1504,11 +1504,11 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, ones used to be. */ first= FALSE; - DBUG_ASSERT(((i*no_subparts) + m_reorged_parts) <= m_file_tot_parts); - memcpy((void*)m_reorged_file, &m_file[i*no_subparts], + DBUG_ASSERT(((i*num_subparts) + m_reorged_parts) <= m_file_tot_parts); + memcpy((void*)m_reorged_file, &m_file[i*num_subparts], sizeof(handler*)*m_reorged_parts); } - } while (++i < no_parts); + } while (++i < num_parts); } /* @@ -1526,11 +1526,11 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, partition_element *part_elem= part_it++; if (part_elem->part_state == PART_NORMAL) { - DBUG_ASSERT(orig_count + no_subparts <= m_file_tot_parts); + DBUG_ASSERT(orig_count + num_subparts <= m_file_tot_parts); memcpy((void*)&new_file_array[part_count], (void*)&m_file[orig_count], - sizeof(handler*)*no_subparts); - part_count+= no_subparts; - orig_count+= no_subparts; + sizeof(handler*)*num_subparts); + part_count+= num_subparts; + orig_count+= num_subparts; } else if (part_elem->part_state == PART_CHANGED || part_elem->part_state == PART_TO_BE_ADDED) @@ -1546,16 +1546,16 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, mem_alloc_error(sizeof(handler)); DBUG_RETURN(ER_OUTOFMEMORY); } - } while (++j < no_subparts); + } while (++j < num_subparts); if (part_elem->part_state == PART_CHANGED) - orig_count+= no_subparts; + orig_count+= num_subparts; else if (temp_partitions && first) { - orig_count+= (no_subparts * temp_partitions); + orig_count+= (num_subparts * temp_partitions); first= FALSE; } } - } while (++i < no_parts); + } while (++i < num_parts); first= FALSE; /* Step 5: @@ -1592,7 +1592,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, part_elem->partition_name, sub_elem->partition_name, name_variant); - part= i * no_subparts + j; + part= i * num_subparts + j; DBUG_PRINT("info", ("Add subpartition %s", part_name_buff)); if ((error= prepare_new_partition(table, create_info, new_file_array[part], @@ -1603,7 +1603,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, DBUG_RETURN(error); } m_added_file[part_count++]= new_file_array[part]; - } while (++j < no_subparts); + } while (++j < num_subparts); } else { @@ -1622,7 +1622,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, m_added_file[part_count++]= new_file_array[i]; } } - } while (++i < no_parts); + } while (++i < num_parts); /* Step 6: @@ -1639,7 +1639,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, part_elem->part_state= PART_IS_CHANGED; else if (part_elem->part_state == PART_REORGED_DROPPED) part_elem->part_state= PART_TO_BE_DROPPED; - } while (++i < no_parts); + } while (++i < num_parts); for (i= 0; i < temp_partitions; i++) { partition_element *part_elem= t_it++; @@ -1680,9 +1680,9 @@ int ha_partition::copy_partitions(ulonglong * const copied, if (m_part_info->linear_hash_ind) { if (m_part_info->part_type == HASH_PARTITION) - set_linear_hash_mask(m_part_info, m_part_info->no_parts); + set_linear_hash_mask(m_part_info, m_part_info->num_parts); else - set_linear_hash_mask(m_part_info, m_part_info->no_subparts); + set_linear_hash_mask(m_part_info, m_part_info->num_subparts); } while (reorg_part < m_reorged_parts) @@ -1961,7 +1961,7 @@ partition_element *ha_partition::find_partition_element(uint part_id) uint curr_part_id= 0; List_iterator_fast <partition_element> part_it(m_part_info->partitions); - for (i= 0; i < m_part_info->no_parts; i++) + for (i= 0; i < m_part_info->num_parts; i++) { partition_element *part_elem; part_elem= part_it++; @@ -1969,7 +1969,7 @@ partition_element *ha_partition::find_partition_element(uint part_id) { uint j; List_iterator_fast <partition_element> sub_it(part_elem->subpartitions); - for (j= 0; j < m_part_info->no_subparts; j++) + for (j= 0; j < m_part_info->num_subparts; j++) { part_elem= sub_it++; if (part_id == curr_part_id++) @@ -2090,7 +2090,7 @@ bool ha_partition::create_handler_file(const char *name) { partition_element *part_elem, *subpart_elem; uint i, j, part_name_len, subpart_name_len; - uint tot_partition_words, tot_name_len, no_parts; + uint tot_partition_words, tot_name_len, num_parts; uint tot_parts= 0; uint tot_len_words, tot_len_byte, chksum, tot_name_words; char *name_buffer_ptr; @@ -2103,11 +2103,11 @@ bool ha_partition::create_handler_file(const char *name) List_iterator_fast <partition_element> part_it(m_part_info->partitions); DBUG_ENTER("create_handler_file"); - no_parts= m_part_info->partitions.elements; - DBUG_PRINT("info", ("table name = %s, no_parts = %u", name, - no_parts)); + num_parts= m_part_info->partitions.elements; + DBUG_PRINT("info", ("table name = %s, num_parts = %u", name, + num_parts)); tot_name_len= 0; - for (i= 0; i < no_parts; i++) + for (i= 0; i < num_parts; i++) { part_elem= part_it++; if (part_elem->part_state != PART_NORMAL && @@ -2125,7 +2125,7 @@ bool ha_partition::create_handler_file(const char *name) else { List_iterator_fast <partition_element> sub_it(part_elem->subpartitions); - for (j= 0; j < m_part_info->no_subparts; j++) + for (j= 0; j < m_part_info->num_subparts; j++) { subpart_elem= sub_it++; tablename_to_filename(subpart_elem->partition_name, @@ -2159,7 +2159,7 @@ bool ha_partition::create_handler_file(const char *name) engine_array= (file_buffer + 12); name_buffer_ptr= (char*) (file_buffer + ((4 + tot_partition_words) * 4)); part_it.rewind(); - for (i= 0; i < no_parts; i++) + for (i= 0; i < num_parts; i++) { part_elem= part_it++; if (part_elem->part_state != PART_NORMAL && @@ -2177,7 +2177,7 @@ bool ha_partition::create_handler_file(const char *name) else { List_iterator_fast <partition_element> sub_it(part_elem->subpartitions); - for (j= 0; j < m_part_info->no_subparts; j++) + for (j= 0; j < m_part_info->num_subparts; j++) { subpart_elem= sub_it++; tablename_to_filename(part_elem->partition_name, part_name, @@ -2313,7 +2313,7 @@ bool ha_partition::new_handlers_from_part_info(MEM_ROOT *mem_root) } m_file_tot_parts= m_tot_parts; bzero((char*) m_file, alloc_len); - DBUG_ASSERT(m_part_info->no_parts > 0); + DBUG_ASSERT(m_part_info->num_parts > 0); i= 0; part_count= 0; @@ -2326,7 +2326,7 @@ bool ha_partition::new_handlers_from_part_info(MEM_ROOT *mem_root) part_elem= part_it++; if (m_is_sub_partitioned) { - for (j= 0; j < m_part_info->no_subparts; j++) + for (j= 0; j < m_part_info->num_subparts; j++) { if (!(m_file[part_count++]= get_new_handler(table_share, mem_root, part_elem->engine_type))) @@ -2343,7 +2343,7 @@ bool ha_partition::new_handlers_from_part_info(MEM_ROOT *mem_root) DBUG_PRINT("info", ("engine_type: %u", (uint) ha_legacy_type(part_elem->engine_type))); } - } while (++i < m_part_info->no_parts); + } while (++i < m_part_info->num_parts); if (part_elem->engine_type == myisam_hton) { DBUG_PRINT("info", ("MyISAM")); @@ -2546,7 +2546,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) if ((error= (*file)->ha_open(table, (const char*) name_buff, mode, test_if_locked))) goto err_handler; - m_no_locks+= (*file)->lock_count(); + m_num_locks+= (*file)->lock_count(); name_buffer_ptr+= strlen(name_buffer_ptr) + 1; set_if_bigger(ref_length, ((*file)->ref_length)); /* @@ -2893,8 +2893,8 @@ int ha_partition::start_stmt(THD *thd, thr_lock_type lock_type) uint ha_partition::lock_count() const { DBUG_ENTER("ha_partition::lock_count"); - DBUG_PRINT("info", ("m_no_locks %d", m_no_locks)); - DBUG_RETURN(m_no_locks); + DBUG_PRINT("info", ("m_num_locks %d", m_num_locks)); + DBUG_RETURN(m_num_locks); } @@ -3305,13 +3305,13 @@ int ha_partition::delete_all_rows() /* ALTER TABLE t TRUNCATE PARTITION ... */ List_iterator<partition_element> part_it(m_part_info->partitions); int saved_error= 0; - uint no_parts= m_part_info->no_parts; - uint no_subparts= m_part_info->no_subparts; + uint num_parts= m_part_info->num_parts; + uint num_subparts= m_part_info->num_subparts; uint i= 0; - uint no_parts_set= alter_info->partition_names.elements; - uint no_parts_found= set_part_state(alter_info, m_part_info, - PART_ADMIN); - if (no_parts_set != no_parts_found && + uint num_parts_set= alter_info->partition_names.elements; + uint num_parts_found= set_part_state(alter_info, m_part_info, + PART_ADMIN); + if (num_parts_set != num_parts_found && (!(alter_info->flags & ALTER_ALL_PARTITION))) DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND); @@ -3334,7 +3334,7 @@ int ha_partition::delete_all_rows() do { sub_elem= subpart_it++; - part= i * no_subparts + j; + part= i * num_subparts + j; bitmap_set_bit(&m_part_info->used_partitions, part); if (!saved_error) { @@ -3348,7 +3348,7 @@ int ha_partition::delete_all_rows() error != HA_ERR_WRONG_COMMAND) saved_error= error; } - } while (++j < no_subparts); + } while (++j < num_subparts); } else { @@ -3368,7 +3368,7 @@ int ha_partition::delete_all_rows() } part_elem->part_state= PART_NORMAL; } - } while (++i < no_parts); + } while (++i < num_parts); DBUG_RETURN(saved_error); } truncate= TRUE; diff --git a/sql/ha_partition.h b/sql/ha_partition.h index c62318daf84..c48978623d1 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -1,7 +1,7 @@ #ifndef HA_PARTITION_INCLUDED #define HA_PARTITION_INCLUDED -/* Copyright 2005-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright 2005-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -22,7 +22,8 @@ enum partition_keywords { - PKW_HASH= 0, PKW_RANGE, PKW_LIST, PKW_KEY, PKW_MAXVALUE, PKW_LINEAR + PKW_HASH= 0, PKW_RANGE, PKW_LIST, PKW_KEY, PKW_MAXVALUE, PKW_LINEAR, + PKW_COLUMNS }; /* @@ -114,7 +115,7 @@ private: uint m_reorged_parts; // Number of reorganised parts uint m_tot_parts; // Total number of partitions; - uint m_no_locks; // For engines like ha_blackhole, which needs no locks + uint m_num_locks; // For engines like ha_blackhole, which needs no locks uint m_last_part; // Last file that we update,write,read int m_lock_type; // Remembers type of last // external_lock @@ -246,10 +247,10 @@ public: size_t pack_frm_len); virtual int drop_partitions(const char *path); virtual int rename_partitions(const char *path); - bool get_no_parts(const char *name, uint *no_parts) + bool get_no_parts(const char *name, uint *num_parts) { DBUG_ENTER("ha_partition::get_no_parts"); - *no_parts= m_tot_parts; + *num_parts= m_tot_parts; DBUG_RETURN(0); } virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share); diff --git a/sql/item_create.cc b/sql/item_create.cc index 2f80d16b928..5e419d40152 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -2052,6 +2052,18 @@ protected: virtual ~Create_func_to_days() {} }; +class Create_func_to_seconds : public Create_func_arg1 +{ +public: + virtual Item* create(THD *thd, Item *arg1); + + static Create_func_to_seconds s_singleton; + +protected: + Create_func_to_seconds() {} + virtual ~Create_func_to_seconds() {} +}; + #ifdef HAVE_SPATIAL class Create_func_touches : public Create_func_arg2 @@ -4505,6 +4517,15 @@ Create_func_to_days::create(THD *thd, Item *arg1) } +Create_func_to_seconds Create_func_to_seconds::s_singleton; + +Item* +Create_func_to_seconds::create(THD *thd, Item *arg1) +{ + return new (thd->mem_root) Item_func_to_seconds(arg1); +} + + #ifdef HAVE_SPATIAL Create_func_touches Create_func_touches::s_singleton; @@ -4942,6 +4963,7 @@ static Native_func_registry func_array[] = { { C_STRING_WITH_LEN("TIME_TO_SEC") }, BUILDER(Create_func_time_to_sec)}, { { C_STRING_WITH_LEN("TOUCHES") }, GEOM_BUILDER(Create_func_touches)}, { { C_STRING_WITH_LEN("TO_DAYS") }, BUILDER(Create_func_to_days)}, + { { C_STRING_WITH_LEN("TO_SECONDS") }, BUILDER(Create_func_to_seconds)}, { { C_STRING_WITH_LEN("UCASE") }, BUILDER(Create_func_ucase)}, { { C_STRING_WITH_LEN("UNCOMPRESS") }, BUILDER(Create_func_uncompress)}, { { C_STRING_WITH_LEN("UNCOMPRESSED_LENGTH") }, BUILDER(Create_func_uncompressed_length)}, diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index eefc8429f5e..84ddc88487d 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -936,6 +936,48 @@ longlong Item_func_to_days::val_int() } +longlong Item_func_to_seconds::val_int_endpoint(bool left_endp, + bool *incl_endp) +{ + DBUG_ASSERT(fixed == 1); + MYSQL_TIME ltime; + longlong seconds; + longlong days; + int dummy; /* unused */ + if (get_arg0_date(<ime, TIME_FUZZY_DATE)) + { + /* got NULL, leave the incl_endp intact */ + return LONGLONG_MIN; + } + seconds= ltime.hour * 3600L + ltime.minute * 60 + ltime.second; + seconds= ltime.neg ? -seconds : seconds; + days= (longlong) calc_daynr(ltime.year, ltime.month, ltime.day); + seconds+= days * 24L * 3600L; + /* Set to NULL if invalid date, but keep the value */ + null_value= check_date(<ime, + (ltime.year || ltime.month || ltime.day), + (TIME_NO_ZERO_IN_DATE | TIME_NO_ZERO_DATE), + &dummy); + /* + Even if the evaluation return NULL, seconds is useful for pruning + */ + return seconds; +} + +longlong Item_func_to_seconds::val_int() +{ + DBUG_ASSERT(fixed == 1); + MYSQL_TIME ltime; + longlong seconds; + longlong days; + if (get_arg0_date(<ime, TIME_NO_ZERO_DATE)) + return 0; + seconds= ltime.hour * 3600L + ltime.minute * 60 + ltime.second; + seconds=ltime.neg ? -seconds : seconds; + days= (longlong) calc_daynr(ltime.year, ltime.month, ltime.day); + return seconds + days * 24L * 3600L; +} + /* Get information about this Item tree monotonicity @@ -962,6 +1004,17 @@ enum_monotonicity_info Item_func_to_days::get_monotonicity_info() const return NON_MONOTONIC; } +enum_monotonicity_info Item_func_to_seconds::get_monotonicity_info() const +{ + if (args[0]->type() == Item::FIELD_ITEM) + { + if (args[0]->field_type() == MYSQL_TYPE_DATE || + args[0]->field_type() == MYSQL_TYPE_DATETIME) + return MONOTONIC_STRICT_INCREASING_NOT_NULL; + } + return NON_MONOTONIC; +} + longlong Item_func_to_days::val_int_endpoint(bool left_endp, bool *incl_endp) { diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index 7f1b2ed3a53..caf91ae5c2c 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -76,6 +76,24 @@ public: }; +class Item_func_to_seconds :public Item_int_func +{ +public: + Item_func_to_seconds(Item *a) :Item_int_func(a) {} + longlong val_int(); + const char *func_name() const { return "to_seconds"; } + void fix_length_and_dec() + { + decimals=0; + max_length=6*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null=1; + } + enum_monotonicity_info get_monotonicity_info() const; + longlong val_int_endpoint(bool left_endp, bool *incl_endp); + bool check_partition_func_processor(uchar *bool_arg) { return FALSE;} +}; + + class Item_func_dayofmonth :public Item_int_func { public: diff --git a/sql/lex.h b/sql/lex.h index cd0c042159f..1a119d2a30b 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -119,6 +119,7 @@ static SYMBOL symbols[] = { { "COLUMN", SYM(COLUMN_SYM)}, { "COLUMN_NAME", SYM(COLUMN_NAME_SYM)}, { "COLUMNS", SYM(COLUMNS)}, + { "COLUMN_LIST", SYM(COLUMN_LIST_SYM)}, { "COMMENT", SYM(COMMENT_SYM)}, { "COMMIT", SYM(COMMIT_SYM)}, { "COMMITTED", SYM(COMMITTED_SYM)}, diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 05a79957963..1cf7862f6b5 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1153,6 +1153,8 @@ int prepare_create_field(Create_field *sql_field, uint *blob_columns, int *timestamps, int *timestamps_with_niladic, longlong table_flags); +CHARSET_INFO* get_sql_field_charset(Create_field *sql_field, + HA_CREATE_INFO *create_info); bool mysql_create_table(THD *thd,const char *db, const char *table_name, HA_CREATE_INFO *create_info, Alter_info *alter_info, @@ -1542,6 +1544,11 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, handlerton *old_db_type, bool *partition_changed, uint *fast_alter_partition); +char *generate_partition_syntax(partition_info *part_info, + uint *buf_length, bool use_sql_alloc, + bool show_partition_options, + HA_CREATE_INFO *create_info, + Alter_info *alter_info); #endif /* bits for last argument to remove_table_from_cache() */ diff --git a/sql/opt_range.cc b/sql/opt_range.cc index e1c7d4b38aa..c2c91881960 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1,4 +1,4 @@ -/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright 2000-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -438,35 +438,55 @@ public: return 0; } - /* returns a number of keypart values appended to the key buffer */ - int store_min_key(KEY_PART *key, uchar **range_key, uint *range_key_flag) + /* + Returns a number of keypart values appended to the key buffer + for min key and max key. This function is used by both Range + Analysis and Partition pruning. For partition pruning we have + to ensure that we don't store also subpartition fields. Thus + we have to stop at the last partition part and not step into + the subpartition fields. For Range Analysis we set last_part + to MAX_KEY which we should never reach. + */ + int store_min_key(KEY_PART *key, + uchar **range_key, + uint *range_key_flag, + uint last_part) { SEL_ARG *key_tree= first(); uint res= key_tree->store_min(key[key_tree->part].store_length, range_key, *range_key_flag); *range_key_flag|= key_tree->min_flag; if (key_tree->next_key_part && + key_tree->part != last_part && key_tree->next_key_part->part == key_tree->part+1 && !(*range_key_flag & (NO_MIN_RANGE | NEAR_MIN)) && key_tree->next_key_part->type == SEL_ARG::KEY_RANGE) - res+= key_tree->next_key_part->store_min_key(key, range_key, - range_key_flag); + res+= key_tree->next_key_part->store_min_key(key, + range_key, + range_key_flag, + last_part); return res; } /* returns a number of keypart values appended to the key buffer */ - int store_max_key(KEY_PART *key, uchar **range_key, uint *range_key_flag) + int store_max_key(KEY_PART *key, + uchar **range_key, + uint *range_key_flag, + uint last_part) { SEL_ARG *key_tree= last(); uint res=key_tree->store_max(key[key_tree->part].store_length, range_key, *range_key_flag); (*range_key_flag)|= key_tree->max_flag; if (key_tree->next_key_part && + key_tree->part != last_part && key_tree->next_key_part->part == key_tree->part+1 && !(*range_key_flag & (NO_MAX_RANGE | NEAR_MAX)) && key_tree->next_key_part->type == SEL_ARG::KEY_RANGE) - res+= key_tree->next_key_part->store_max_key(key, range_key, - range_key_flag); + res+= key_tree->next_key_part->store_max_key(key, + range_key, + range_key_flag, + last_part); return res; } @@ -634,6 +654,14 @@ public: using_real_indexes==TRUE */ uint real_keynr[MAX_KEY]; + + /* + Used to store 'current key tuples', in both range analysis and + partitioning (list) analysis + */ + uchar min_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH], + max_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH]; + /* Number of SEL_ARG objects allocated by SEL_ARG::clone_tree operations */ uint alloced_sel_args; }; @@ -645,8 +673,6 @@ public: longlong baseflag; uint max_key_part, range_count; - uchar min_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH], - max_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH]; bool quick; // Don't calulate possible keys uint fields_bitmap_size; @@ -2604,6 +2630,8 @@ typedef struct st_part_prune_param /* Same as above for subpartitioning */ my_bool *is_subpart_keypart; + my_bool ignore_part_fields; /* Ignore rest of partioning fields */ + /*************************************************************** Following fields form find_used_partitions() recursion context: **************************************************************/ @@ -2617,8 +2645,13 @@ typedef struct st_part_prune_param /* Iterator to be used to obtain the "current" set of used partitions */ PARTITION_ITERATOR part_iter; - /* Initialized bitmap of no_subparts size */ + /* Initialized bitmap of num_subparts size */ MY_BITMAP subparts_bitmap; + + uchar *cur_min_key; + uchar *cur_max_key; + + uint cur_min_flag, cur_max_flag; } PART_PRUNE_PARAM; static bool create_partition_index_description(PART_PRUNE_PARAM *prune_par); @@ -2736,6 +2769,11 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond) prune_param.arg_stack_end= prune_param.arg_stack; prune_param.cur_part_fields= 0; prune_param.cur_subpart_fields= 0; + + prune_param.cur_min_key= prune_param.range_param.min_key; + prune_param.cur_max_key= prune_param.range_param.max_key; + prune_param.cur_min_flag= prune_param.cur_max_flag= 0; + init_all_partitions_iterator(part_info, &prune_param.part_iter); if (!tree->keys[0] || (-1 == (res= find_used_partitions(&prune_param, tree->keys[0])))) @@ -2873,8 +2911,8 @@ static void mark_full_partition_used_no_parts(partition_info* part_info, static void mark_full_partition_used_with_parts(partition_info *part_info, uint32 part_id) { - uint32 start= part_id * part_info->no_subparts; - uint32 end= start + part_info->no_subparts; + uint32 start= part_id * part_info->num_subparts; + uint32 end= start + part_info->num_subparts; DBUG_ENTER("mark_full_partition_used_with_parts"); for (; start != end; start++) @@ -2972,6 +3010,11 @@ int find_used_partitions_imerge(PART_PRUNE_PARAM *ppar, SEL_IMERGE *imerge) ppar->arg_stack_end= ppar->arg_stack; ppar->cur_part_fields= 0; ppar->cur_subpart_fields= 0; + + ppar->cur_min_key= ppar->range_param.min_key; + ppar->cur_max_key= ppar->range_param.max_key; + ppar->cur_min_flag= ppar->cur_max_flag= 0; + init_all_partitions_iterator(ppar->part_info, &ppar->part_iter); SEL_ARG *key_tree= (*ptree)->keys[0]; if (!key_tree || (-1 == (res |= find_used_partitions(ppar, key_tree)))) @@ -3095,9 +3138,13 @@ static int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree) { int res, left_res=0, right_res=0; - int partno= (int)key_tree->part; - bool pushed= FALSE; + int key_tree_part= (int)key_tree->part; bool set_full_part_if_bad_ret= FALSE; + bool ignore_part_fields= ppar->ignore_part_fields; + bool did_set_ignore_part_fields= FALSE; + + if (check_stack_overrun(ppar->range_param.thd, 3*STACK_MIN_SIZE, NULL)) + return -1; if (key_tree->left != &null_element) { @@ -3105,40 +3152,159 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree) return -1; } + /* Push SEL_ARG's to stack to enable looking backwards as well */ + ppar->cur_part_fields+= ppar->is_part_keypart[key_tree_part]; + ppar->cur_subpart_fields+= ppar->is_subpart_keypart[key_tree_part]; + *(ppar->arg_stack_end++)= key_tree; + if (key_tree->type == SEL_ARG::KEY_RANGE) { - if (partno == 0 && (NULL != ppar->part_info->get_part_iter_for_interval)) + if (ppar->part_info->get_part_iter_for_interval && + key_tree->part <= ppar->last_part_partno) { - /* - Partitioning is done by RANGE|INTERVAL(monotonic_expr(fieldX)), and - we got "const1 CMP fieldX CMP const2" interval <-- psergey-todo: change + if (ignore_part_fields) + { + /* + We come here when a condition on the first partitioning + fields led to evaluating the partitioning condition + (due to finding a condition of the type a < const or + b > const). Thus we must ignore the rest of the + partitioning fields but we still want to analyse the + subpartitioning fields. + */ + if (key_tree->next_key_part) + res= find_used_partitions(ppar, key_tree->next_key_part); + else + res= -1; + goto pop_and_go_right; + } + /* Collect left and right bound, their lengths and flags */ + uchar *min_key= ppar->cur_min_key; + uchar *max_key= ppar->cur_max_key; + uchar *tmp_min_key= min_key; + uchar *tmp_max_key= max_key; + key_tree->store_min(ppar->key[key_tree->part].store_length, + &tmp_min_key, ppar->cur_min_flag); + key_tree->store_max(ppar->key[key_tree->part].store_length, + &tmp_max_key, ppar->cur_max_flag); + uint flag; + if (key_tree->next_key_part && + key_tree->next_key_part->part == key_tree->part+1 && + key_tree->next_key_part->part <= ppar->last_part_partno && + key_tree->next_key_part->type == SEL_ARG::KEY_RANGE) + { + /* + There are more key parts for partition pruning to handle + This mainly happens when the condition is an equality + condition. + */ + if ((tmp_min_key - min_key) == (tmp_max_key - max_key) && + (memcmp(min_key, max_key, (uint)(tmp_max_key - max_key)) == 0) && + !key_tree->min_flag && !key_tree->max_flag) + { + /* Set 'parameters' */ + ppar->cur_min_key= tmp_min_key; + ppar->cur_max_key= tmp_max_key; + uint save_min_flag= ppar->cur_min_flag; + uint save_max_flag= ppar->cur_max_flag; + + ppar->cur_min_flag|= key_tree->min_flag; + ppar->cur_max_flag|= key_tree->max_flag; + + res= find_used_partitions(ppar, key_tree->next_key_part); + + /* Restore 'parameters' back */ + ppar->cur_min_key= min_key; + ppar->cur_max_key= max_key; + + ppar->cur_min_flag= save_min_flag; + ppar->cur_max_flag= save_max_flag; + goto pop_and_go_right; + } + /* We have arrived at the last field in the partition pruning */ + uint tmp_min_flag= key_tree->min_flag, + tmp_max_flag= key_tree->max_flag; + if (!tmp_min_flag) + key_tree->next_key_part->store_min_key(ppar->key, + &tmp_min_key, + &tmp_min_flag, + ppar->last_part_partno); + if (!tmp_max_flag) + key_tree->next_key_part->store_max_key(ppar->key, + &tmp_max_key, + &tmp_max_flag, + ppar->last_part_partno); + flag= tmp_min_flag | tmp_max_flag; + } + else + flag= key_tree->min_flag | key_tree->max_flag; + + if (tmp_min_key != ppar->range_param.min_key) + flag&= ~NO_MIN_RANGE; + else + flag|= NO_MIN_RANGE; + if (tmp_max_key != ppar->range_param.max_key) + flag&= ~NO_MAX_RANGE; + else + flag|= NO_MAX_RANGE; + + /* + We need to call the interval mapper if we have a condition which + makes sense to prune on. In the example of a COLUMN_LIST on a and + b it makes sense if we have a condition on a, or conditions on + both a and b. If we only have conditions on b it might make sense + but this is a harder case we will solve later. For the harder case + this clause then turns into use of all partitions and thus we + simply set res= -1 as if the mapper had returned that. + TODO: What to do here is defined in WL#4065. */ - DBUG_EXECUTE("info", dbug_print_segment_range(key_tree, - ppar->range_param. - key_parts);); - res= ppar->part_info-> - get_part_iter_for_interval(ppar->part_info, - FALSE, - key_tree->min_value, - key_tree->max_value, - key_tree->min_flag | key_tree->max_flag, - &ppar->part_iter); - if (!res) - goto go_right; /* res==0 --> no satisfying partitions */ + if (ppar->arg_stack[0]->part == 0) + { + uint32 i; + uint32 store_length_array[MAX_KEY]; + uint32 num_keys= ppar->part_fields; + + for (i= 0; i < num_keys; i++) + store_length_array[i]= ppar->key[i].store_length; + res= ppar->part_info-> + get_part_iter_for_interval(ppar->part_info, + FALSE, + store_length_array, + ppar->range_param.min_key, + ppar->range_param.max_key, + tmp_min_key - ppar->range_param.min_key, + tmp_max_key - ppar->range_param.max_key, + flag, + &ppar->part_iter); + if (!res) + goto pop_and_go_right; /* res==0 --> no satisfying partitions */ + } + else + res= -1; + if (res == -1) { - //get a full range iterator + /* get a full range iterator */ init_all_partitions_iterator(ppar->part_info, &ppar->part_iter); } /* Save our intent to mark full partition as used if we will not be able to obtain further limits on subpartitions */ + if (key_tree_part < ppar->last_part_partno) + { + /* + We need to ignore the rest of the partitioning fields in all + evaluations after this + */ + did_set_ignore_part_fields= TRUE; + ppar->ignore_part_fields= TRUE; + } set_full_part_if_bad_ret= TRUE; goto process_next_key_part; } - if (partno == ppar->last_subpart_partno && + if (key_tree_part == ppar->last_subpart_partno && (NULL != ppar->part_info->get_subpart_iter_for_interval)) { PARTITION_ITERATOR subpart_iter; @@ -3148,13 +3314,16 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree) res= ppar->part_info-> get_subpart_iter_for_interval(ppar->part_info, TRUE, + NULL, /* Currently not used here */ key_tree->min_value, key_tree->max_value, - key_tree->min_flag | key_tree->max_flag, + 0, 0, /* Those are ignored here */ + key_tree->min_flag | + key_tree->max_flag, &subpart_iter); DBUG_ASSERT(res); /* We can't get "no satisfying subpartitions" */ if (res == -1) - return -1; /* all subpartitions satisfy */ + goto pop_and_go_right; /* all subpartitions satisfy */ uint32 subpart_id; bitmap_clear_all(&ppar->subparts_bitmap); @@ -3167,23 +3336,19 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree) while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) != NOT_A_PARTITION_ID) { - for (uint i= 0; i < ppar->part_info->no_subparts; i++) + for (uint i= 0; i < ppar->part_info->num_subparts; i++) if (bitmap_is_set(&ppar->subparts_bitmap, i)) bitmap_set_bit(&ppar->part_info->used_partitions, - part_id * ppar->part_info->no_subparts + i); + part_id * ppar->part_info->num_subparts + i); } - goto go_right; + goto pop_and_go_right; } if (key_tree->is_singlepoint()) { - pushed= TRUE; - ppar->cur_part_fields+= ppar->is_part_keypart[partno]; - ppar->cur_subpart_fields+= ppar->is_subpart_keypart[partno]; - *(ppar->arg_stack_end++) = key_tree; - - if (partno == ppar->last_part_partno && - ppar->cur_part_fields == ppar->part_fields) + if (key_tree_part == ppar->last_part_partno && + ppar->cur_part_fields == ppar->part_fields && + ppar->part_info->get_part_iter_for_interval == NULL) { /* Ok, we've got "fieldN<=>constN"-type SEL_ARGs for all partitioning @@ -3212,7 +3377,7 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree) goto process_next_key_part; } - if (partno == ppar->last_subpart_partno && + if (key_tree_part == ppar->last_subpart_partno && ppar->cur_subpart_fields == ppar->subpart_fields) { /* @@ -3236,7 +3401,7 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree) NOT_A_PARTITION_ID) { bitmap_set_bit(&part_info->used_partitions, - part_id * part_info->no_subparts + subpart_id); + part_id * part_info->num_subparts + subpart_id); } res= 1; /* Some partitions were marked as used */ goto pop_and_go_right; @@ -3249,8 +3414,11 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree) we're processing subpartititoning's key parts, this means we'll not be able to infer any suitable condition, so bail out. */ - if (partno >= ppar->last_part_partno) - return -1; + if (key_tree_part >= ppar->last_part_partno) + { + res= -1; + goto pop_and_go_right; + } } } @@ -3259,7 +3427,17 @@ process_next_key_part: res= find_used_partitions(ppar, key_tree->next_key_part); else res= -1; - + + if (did_set_ignore_part_fields) + { + /* + We have returned from processing all key trees linked to our next + key part. We are ready to be moving down (using right pointers) and + this tree is a new evaluation requiring its own decision on whether + to ignore partitioning fields. + */ + ppar->ignore_part_fields= FALSE; + } if (set_full_part_if_bad_ret) { if (res == -1) @@ -3282,18 +3460,14 @@ process_next_key_part: init_all_partitions_iterator(ppar->part_info, &ppar->part_iter); } - if (pushed) - { pop_and_go_right: - /* Pop this key part info off the "stack" */ - ppar->arg_stack_end--; - ppar->cur_part_fields-= ppar->is_part_keypart[partno]; - ppar->cur_subpart_fields-= ppar->is_subpart_keypart[partno]; - } + /* Pop this key part info off the "stack" */ + ppar->arg_stack_end--; + ppar->cur_part_fields-= ppar->is_part_keypart[key_tree_part]; + ppar->cur_subpart_fields-= ppar->is_subpart_keypart[key_tree_part]; if (res == -1) return -1; -go_right: if (key_tree->right != &null_element) { if (-1 == (right_res= find_used_partitions(ppar,key_tree->right))) @@ -3375,13 +3549,14 @@ static bool create_partition_index_description(PART_PRUNE_PARAM *ppar) uint used_part_fields, used_subpart_fields; used_part_fields= fields_ok_for_partition_index(part_info->part_field_array) ? - part_info->no_part_fields : 0; + part_info->num_part_fields : 0; used_subpart_fields= fields_ok_for_partition_index(part_info->subpart_field_array)? - part_info->no_subpart_fields : 0; + part_info->num_subpart_fields : 0; uint total_parts= used_part_fields + used_subpart_fields; + ppar->ignore_part_fields= FALSE; ppar->part_fields= used_part_fields; ppar->last_part_partno= (int)used_part_fields - 1; @@ -3416,10 +3591,10 @@ static bool create_partition_index_description(PART_PRUNE_PARAM *ppar) if (ppar->subpart_fields) { my_bitmap_map *buf; - uint32 bufsize= bitmap_buffer_size(ppar->part_info->no_subparts); + uint32 bufsize= bitmap_buffer_size(ppar->part_info->num_subparts); if (!(buf= (my_bitmap_map*) alloc_root(alloc, bufsize))) return TRUE; - bitmap_init(&ppar->subparts_bitmap, buf, ppar->part_info->no_subparts, + bitmap_init(&ppar->subparts_bitmap, buf, ppar->part_info->num_subparts, FALSE); } range_par->key_parts= key_part; @@ -3430,12 +3605,8 @@ static bool create_partition_index_description(PART_PRUNE_PARAM *ppar) { key_part->key= 0; key_part->part= part; - key_part->store_length= key_part->length= (uint16) (*field)->key_length(); - if ((*field)->real_maybe_null()) - key_part->store_length+= HA_KEY_NULL_LENGTH; - if ((*field)->type() == MYSQL_TYPE_BLOB || - (*field)->real_type() == MYSQL_TYPE_VARCHAR) - key_part->store_length+= HA_KEY_BLOB_LENGTH; + key_part->length= (uint16)get_partition_field_store_length(*field); + key_part->store_length= key_part->length; DBUG_PRINT("info", ("part %u length %u store_length %u", part, key_part->length, key_part->store_length)); @@ -7553,12 +7724,16 @@ check_quick_keys(PARAM *param, uint idx, SEL_ARG *key_tree, tmp_max_flag=key_tree->max_flag; if (!tmp_min_flag) tmp_min_keypart+= - key_tree->next_key_part->store_min_key(param->key[idx], &tmp_min_key, - &tmp_min_flag); + key_tree->next_key_part->store_min_key(param->key[idx], + &tmp_min_key, + &tmp_min_flag, + MAX_KEY); if (!tmp_max_flag) tmp_max_keypart+= - key_tree->next_key_part->store_max_key(param->key[idx], &tmp_max_key, - &tmp_max_flag); + key_tree->next_key_part->store_max_key(param->key[idx], + &tmp_max_key, + &tmp_max_flag, + MAX_KEY); min_key_length= (uint) (tmp_min_key - param->min_key); max_key_length= (uint) (tmp_max_key - param->max_key); } @@ -7828,11 +8003,15 @@ get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, { uint tmp_min_flag=key_tree->min_flag,tmp_max_flag=key_tree->max_flag; if (!tmp_min_flag) - min_part+= key_tree->next_key_part->store_min_key(key, &tmp_min_key, - &tmp_min_flag); + min_part+= key_tree->next_key_part->store_min_key(key, + &tmp_min_key, + &tmp_min_flag, + MAX_KEY); if (!tmp_max_flag) - max_part+= key_tree->next_key_part->store_max_key(key, &tmp_max_key, - &tmp_max_flag); + max_part+= key_tree->next_key_part->store_max_key(key, + &tmp_max_key, + &tmp_max_flag, + MAX_KEY); flag=tmp_min_flag | tmp_max_flag; } } diff --git a/sql/partition_element.h b/sql/partition_element.h index 5575eb93bfb..5e908a75fbe 100644 --- a/sql/partition_element.h +++ b/sql/partition_element.h @@ -1,7 +1,7 @@ #ifndef PARTITION_ELEMENT_INCLUDED #define PARTITION_ELEMENT_INCLUDED -/* Copyright (C) 2006 MySQL AB +/* Copyright (C) 2006-2008 MySQL AB, Sun Microsystems Inc. 2008-2009 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -40,6 +40,35 @@ enum partition_state { }; /* + This struct is used to keep track of column expressions as part + of the COLUMNS concept in conjunction with RANGE and LIST partitioning. + The value can be either of MINVALUE, MAXVALUE and an expression that + must be constant and evaluate to the same type as the column it + represents. + + The data in this fixed in two steps. The parser will only fill in whether + it is a max_value or provide an expression. Filling in + column_value, part_info, partition_id, null_value is done by the + function fix_column_value_function. However the item tree needs + fixed also before writing it into the frm file (in add_column_list_values). + To distinguish between those two variants, fixed= 1 after the + fixing in add_column_list_values and fixed= 2 otherwise. This is + since the fixing in add_column_list_values isn't a complete fixing. +*/ + +typedef struct p_column_list_val +{ + void* column_value; + Item* item_expression; + partition_info *part_info; + uint partition_id; + bool max_value; + bool null_value; + char fixed; +} part_column_list_val; + + +/* This struct is used to contain the value of an element in the VALUES IN struct. It needs to keep knowledge of whether it is a signed/unsigned value and whether it is @@ -49,8 +78,10 @@ enum partition_state { typedef struct p_elem_val { longlong value; + uint added_items; bool null_value; bool unsigned_flag; + part_column_list_val *col_val_array; } part_elem_value; struct st_ddl_log_memory_entry; @@ -72,8 +103,9 @@ public: enum partition_state part_state; uint16 nodegroup_id; bool has_null_value; - bool signed_flag;/* Indicate whether this partition uses signed constants */ - bool max_value; /* Indicate whether this partition uses MAXVALUE */ + /* signed_flag and max_value only relevant for subpartitions */ + bool signed_flag; + bool max_value; partition_element() : part_max_rows(0), part_min_rows(0), range_value(0), diff --git a/sql/partition_info.cc b/sql/partition_info.cc index ba9ea0e876e..99d505a4540 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2006 MySQL AB +/* Copyright (C) 2006-2008 MySQL AB, Sun Microsystems Inc. 2008-2009 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -75,7 +75,7 @@ partition_info *partition_info::get_clone() SYNOPSIS create_default_partition_names() part_no Partition number for subparts - no_parts Number of partitions + num_parts Number of partitions start_no Starting partition number subpart Is it subpartitions @@ -91,10 +91,10 @@ partition_info *partition_info::get_clone() #define MAX_PART_NAME_SIZE 8 char *partition_info::create_default_partition_names(uint part_no, - uint no_parts_arg, + uint num_parts_arg, uint start_no) { - char *ptr= (char*) sql_calloc(no_parts_arg*MAX_PART_NAME_SIZE); + char *ptr= (char*) sql_calloc(num_parts_arg*MAX_PART_NAME_SIZE); char *move_ptr= ptr; uint i= 0; DBUG_ENTER("create_default_partition_names"); @@ -105,11 +105,11 @@ char *partition_info::create_default_partition_names(uint part_no, { my_sprintf(move_ptr, (move_ptr,"p%u", (start_no + i))); move_ptr+=MAX_PART_NAME_SIZE; - } while (++i < no_parts_arg); + } while (++i < num_parts_arg); } else { - mem_alloc_error(no_parts_arg*MAX_PART_NAME_SIZE); + mem_alloc_error(num_parts_arg*MAX_PART_NAME_SIZE); } DBUG_RETURN(ptr); } @@ -189,19 +189,19 @@ bool partition_info::set_up_default_partitions(handler *file, goto end; } - if ((no_parts == 0) && - ((no_parts= file->get_default_no_partitions(info)) == 0)) + if ((num_parts == 0) && + ((num_parts= file->get_default_no_partitions(info)) == 0)) { my_error(ER_PARTITION_NOT_DEFINED_ERROR, MYF(0), "partitions"); goto end; } - if (unlikely(no_parts > MAX_PARTITIONS)) + if (unlikely(num_parts > MAX_PARTITIONS)) { my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0)); goto end; } - if (unlikely((!(default_name= create_default_partition_names(0, no_parts, + if (unlikely((!(default_name= create_default_partition_names(0, num_parts, start_no))))) goto end; i= 0; @@ -220,7 +220,7 @@ bool partition_info::set_up_default_partitions(handler *file, mem_alloc_error(sizeof(partition_element)); goto end; } - } while (++i < no_parts); + } while (++i < num_parts); result= FALSE; end: DBUG_RETURN(result); @@ -259,9 +259,9 @@ bool partition_info::set_up_default_subpartitions(handler *file, List_iterator<partition_element> part_it(partitions); DBUG_ENTER("partition_info::set_up_default_subpartitions"); - if (no_subparts == 0) - no_subparts= file->get_default_no_partitions(info); - if (unlikely((no_parts * no_subparts) > MAX_PARTITIONS)) + if (num_subparts == 0) + num_subparts= file->get_default_no_partitions(info); + if (unlikely((num_parts * num_subparts) > MAX_PARTITIONS)) { my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0)); goto end; @@ -288,8 +288,8 @@ bool partition_info::set_up_default_subpartitions(handler *file, mem_alloc_error(sizeof(partition_element)); goto end; } - } while (++j < no_subparts); - } while (++i < no_parts); + } while (++j < num_subparts); + } while (++i < num_parts); result= FALSE; end: DBUG_RETURN(result); @@ -334,6 +334,49 @@ bool partition_info::set_up_defaults_for_partitioning(handler *file, /* + Support routine for check_partition_info + + SYNOPSIS + has_unique_fields + no parameters + + RETURN VALUE + Erroneus field name Error, there are two fields with same name + NULL Ok, no field defined twice + + DESCRIPTION + Check that the user haven't defined the same field twice in + key or column list partitioning. +*/ +char* partition_info::has_unique_fields() +{ + char *field_name_outer, *field_name_inner; + List_iterator<char> it_outer(part_field_list); + uint num_fields= part_field_list.elements; + uint i,j; + DBUG_ENTER("partition_info::has_unique_fields"); + + for (i= 0; i < num_fields; i++) + { + field_name_outer= it_outer++; + List_iterator<char> it_inner(part_field_list); + for (j= 0; j < num_fields; j++) + { + field_name_inner= it_inner++; + if (i >= j) + continue; + if (!(my_strcasecmp(system_charset_info, + field_name_outer, + field_name_inner))) + { + DBUG_RETURN(field_name_outer); + } + } + } + DBUG_RETURN(NULL); +} + +/* A support function to check if a partition element's name is unique SYNOPSIS @@ -518,12 +561,12 @@ bool partition_info::check_engine_mix(handlerton *engine_type, { handlerton *old_engine_type= engine_type; bool first= TRUE; - uint no_parts= partitions.elements; + uint num_parts= partitions.elements; DBUG_ENTER("partition_info::check_engine_mix"); DBUG_PRINT("info", ("in: engine_type = %s, table_engine_set = %u", ha_resolve_storage_engine_name(engine_type), table_engine_set)); - if (no_parts) + if (num_parts) { List_iterator<partition_element> part_it(partitions); uint i= 0; @@ -536,7 +579,7 @@ bool partition_info::check_engine_mix(handlerton *engine_type, if (is_sub_partitioned() && part_elem->subpartitions.elements) { - uint no_subparts= part_elem->subpartitions.elements; + uint num_subparts= part_elem->subpartitions.elements; uint j= 0; List_iterator<partition_element> sub_it(part_elem->subpartitions); do @@ -548,7 +591,7 @@ bool partition_info::check_engine_mix(handlerton *engine_type, if (check_engine_condition(sub_elem, table_engine_set, &engine_type, &first)) goto error; - } while (++j < no_subparts); + } while (++j < num_subparts); /* ensure that the partition also has correct engine */ if (check_engine_condition(part_elem, table_engine_set, &engine_type, &first)) @@ -557,7 +600,7 @@ bool partition_info::check_engine_mix(handlerton *engine_type, else if (check_engine_condition(part_elem, table_engine_set, &engine_type, &first)) goto error; - } while (++i < no_parts); + } while (++i < num_parts); } DBUG_PRINT("info", ("engine_type = %s", ha_resolve_storage_engine_name(engine_type))); @@ -589,6 +632,7 @@ error: SYNOPSIS check_range_constants() + thd Thread object RETURN VALUE TRUE An error occurred during creation of range constants @@ -601,76 +645,112 @@ error: called for RANGE PARTITIONed tables. */ -bool partition_info::check_range_constants() +bool partition_info::check_range_constants(THD *thd) { partition_element* part_def; - longlong current_largest; - longlong part_range_value; bool first= TRUE; uint i; List_iterator<partition_element> it(partitions); - bool result= TRUE; - bool signed_flag= !part_expr->unsigned_flag; + int result= TRUE; DBUG_ENTER("partition_info::check_range_constants"); - DBUG_PRINT("enter", ("INT_RESULT with %d parts", no_parts)); - - LINT_INIT(current_largest); + DBUG_PRINT("enter", ("RANGE with %d parts, column_list = %u", num_parts, + column_list)); - part_result_type= INT_RESULT; - range_int_array= (longlong*)sql_alloc(no_parts * sizeof(longlong)); - if (unlikely(range_int_array == NULL)) + if (column_list) { - mem_alloc_error(no_parts * sizeof(longlong)); - goto end; - } - i= 0; - do - { - part_def= it++; - if ((i != (no_parts - 1)) || !defined_max_value) + part_column_list_val *loc_range_col_array; + part_column_list_val *current_largest_col_val; + uint num_column_values= part_field_list.elements; + uint size_entries= sizeof(part_column_list_val) * num_column_values; + range_col_array= (part_column_list_val*)sql_calloc(num_parts * + size_entries); + LINT_INIT(current_largest_col_val); + if (unlikely(range_col_array == NULL)) { - part_range_value= part_def->range_value; - if (!signed_flag) - part_range_value-= 0x8000000000000000ULL; + mem_alloc_error(num_parts * size_entries); + goto end; } - else - part_range_value= LONGLONG_MAX; - if (first) + loc_range_col_array= range_col_array; + i= 0; + do { - current_largest= part_range_value; - range_int_array[0]= part_range_value; + part_def= it++; + { + List_iterator<part_elem_value> list_val_it(part_def->list_val_list); + part_elem_value *range_val= list_val_it++; + part_column_list_val *col_val= range_val->col_val_array; + + if (fix_column_value_functions(thd, range_val, i)) + goto end; + memcpy(loc_range_col_array, (const void*)col_val, size_entries); + loc_range_col_array+= num_column_values; + if (!first) + { + if (compare_column_values((const void*)current_largest_col_val, + (const void*)col_val) >= 0) + goto range_not_increasing_error; + } + current_largest_col_val= col_val; + } first= FALSE; + } while (++i < num_parts); + } + else + { + longlong current_largest; + longlong part_range_value; + bool signed_flag= !part_expr->unsigned_flag; + + LINT_INIT(current_largest); + + part_result_type= INT_RESULT; + range_int_array= (longlong*)sql_alloc(num_parts * sizeof(longlong)); + if (unlikely(range_int_array == NULL)) + { + mem_alloc_error(num_parts * sizeof(longlong)); + goto end; } - else + i= 0; + do { - if (likely(current_largest < part_range_value)) + part_def= it++; + if ((i != (num_parts - 1)) || !defined_max_value) { - current_largest= part_range_value; - range_int_array[i]= part_range_value; - } - else if (defined_max_value && - current_largest == part_range_value && - part_range_value == LONGLONG_MAX && - i == (no_parts - 1)) - { - range_int_array[i]= part_range_value; + part_range_value= part_def->range_value; + if (!signed_flag) + part_range_value-= 0x8000000000000000ULL; } else + part_range_value= LONGLONG_MAX; + + if (!first) { - my_error(ER_RANGE_NOT_INCREASING_ERROR, MYF(0)); - goto end; + if (unlikely(current_largest > part_range_value) || + (unlikely(current_largest == part_range_value) && + (part_range_value < LONGLONG_MAX || + i != (num_parts - 1) || + !defined_max_value))) + goto range_not_increasing_error; } - } - } while (++i < no_parts); + range_int_array[i]= part_range_value; + current_largest= part_range_value; + first= FALSE; + } while (++i < num_parts); + } result= FALSE; end: DBUG_RETURN(result); + +range_not_increasing_error: + my_error(ER_RANGE_NOT_INCREASING_ERROR, MYF(0)); + goto end; } /* Support routines for check_list_constants used by qsort to sort the - constant list expressions. One routine for unsigned and one for signed. + constant list expressions. One routine for integers and one for + column lists. SYNOPSIS list_part_cmp() @@ -695,6 +775,54 @@ int partition_info::list_part_cmp(const void* a, const void* b) return 0; } + /* + Compare two lists of column values in RANGE/LIST partitioning + SYNOPSIS + compare_column_values() + first First column list argument + second Second column list argument + RETURN VALUES + 0 Equal + -1 First argument is smaller + +1 First argument is larger +*/ + +int partition_info::compare_column_values(const void *first_arg, + const void *second_arg) +{ + const part_column_list_val *first= (part_column_list_val*)first_arg; + const part_column_list_val *second= (part_column_list_val*)second_arg; + partition_info *part_info= first->part_info; + Field **field; + + for (field= part_info->part_field_array; *field; + field++, first++, second++) + { + if (first->max_value || second->max_value) + { + if (first->max_value && second->max_value) + continue; + if (second->max_value) + return -1; + else + return +1; + } + if (first->null_value || second->null_value) + { + if (first->null_value && second->null_value) + continue; + if (second->null_value) + return +1; + else + return -1; + } + int res= (*field)->cmp((const uchar*)first->column_value, + (const uchar*)second->column_value); + if (res) + return res; + } + return 0; +} /* This routine allocates an array for all list constants to achieve a fast @@ -704,6 +832,7 @@ int partition_info::list_part_cmp(const void* a, const void* b) SYNOPSIS check_list_constants() + thd Thread object RETURN VALUE TRUE An error occurred during creation of list constants @@ -716,20 +845,23 @@ int partition_info::list_part_cmp(const void* a, const void* b) called for LIST PARTITIONed tables. */ -bool partition_info::check_list_constants() +bool partition_info::check_list_constants(THD *thd) { - uint i; + uint i, size_entries, num_column_values; uint list_index= 0; part_elem_value *list_value; bool result= TRUE; - longlong curr_value, prev_value, type_add, calc_value; + longlong type_add, calc_value; + void *curr_value, *prev_value; partition_element* part_def; bool found_null= FALSE; + int (*compare_func)(const void *, const void*); + void *ptr; List_iterator<partition_element> list_func_it(partitions); DBUG_ENTER("partition_info::check_list_constants"); part_result_type= INT_RESULT; - no_list_values= 0; + num_list_values= 0; /* We begin by calculating the number of list values that have been defined in the first step. @@ -762,51 +894,86 @@ bool partition_info::check_list_constants() } List_iterator<part_elem_value> list_val_it1(part_def->list_val_list); while (list_val_it1++) - no_list_values++; - } while (++i < no_parts); + num_list_values++; + } while (++i < num_parts); list_func_it.rewind(); - list_array= (LIST_PART_ENTRY*)sql_alloc((no_list_values+1) * - sizeof(LIST_PART_ENTRY)); - if (unlikely(list_array == NULL)) + num_column_values= part_field_list.elements; + size_entries= column_list ? + (num_column_values * sizeof(part_column_list_val)) : + sizeof(LIST_PART_ENTRY); + ptr= sql_calloc((num_list_values+1) * size_entries); + if (unlikely(ptr == NULL)) { - mem_alloc_error(no_list_values * sizeof(LIST_PART_ENTRY)); + mem_alloc_error(num_list_values * size_entries); goto end; } - - i= 0; - /* - Fix to be able to reuse signed sort functions also for unsigned - partition functions. - */ - type_add= (longlong)(part_expr->unsigned_flag ? + if (column_list) + { + part_column_list_val *loc_list_col_array; + loc_list_col_array= (part_column_list_val*)ptr; + list_col_array= (part_column_list_val*)ptr; + compare_func= compare_column_values; + i= 0; + do + { + part_def= list_func_it++; + List_iterator<part_elem_value> list_val_it2(part_def->list_val_list); + while ((list_value= list_val_it2++)) + { + part_column_list_val *col_val= list_value->col_val_array; + if (unlikely(fix_column_value_functions(thd, list_value, i))) + { + DBUG_RETURN(TRUE); + } + memcpy(loc_list_col_array, (const void*)col_val, size_entries); + loc_list_col_array+= num_column_values; + } + } while (++i < num_parts); + } + else + { + compare_func= list_part_cmp; + list_array= (LIST_PART_ENTRY*)ptr; + i= 0; + /* + Fix to be able to reuse signed sort functions also for unsigned + partition functions. + */ + type_add= (longlong)(part_expr->unsigned_flag ? 0x8000000000000000ULL : 0ULL); - do - { - part_def= list_func_it++; - List_iterator<part_elem_value> list_val_it2(part_def->list_val_list); - while ((list_value= list_val_it2++)) + do { - calc_value= list_value->value - type_add; - list_array[list_index].list_value= calc_value; - list_array[list_index++].partition_id= i; - } - } while (++i < no_parts); - - if (fixed && no_list_values) + part_def= list_func_it++; + List_iterator<part_elem_value> list_val_it2(part_def->list_val_list); + while ((list_value= list_val_it2++)) + { + calc_value= list_value->value - type_add; + list_array[list_index].list_value= calc_value; + list_array[list_index++].partition_id= i; + } + } while (++i < num_parts); + } + DBUG_ASSERT(fixed); + if (num_list_values) { bool first= TRUE; - my_qsort((void*)list_array, no_list_values, sizeof(LIST_PART_ENTRY), - &list_part_cmp); - + /* + list_array and list_col_array are unions, so this works for both + variants of LIST partitioning. + */ + my_qsort((void*)list_array, num_list_values, size_entries, + compare_func); + i= 0; LINT_INIT(prev_value); do { - DBUG_ASSERT(i < no_list_values); - curr_value= list_array[i].list_value; - if (likely(first || prev_value != curr_value)) + DBUG_ASSERT(i < num_list_values); + curr_value= column_list ? (void*)&list_col_array[num_column_values * i] : + (void*)&list_array[i]; + if (likely(first || compare_func(curr_value, prev_value))) { prev_value= curr_value; first= FALSE; @@ -816,7 +983,7 @@ bool partition_info::check_list_constants() my_error(ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR, MYF(0)); goto end; } - } while (++i < no_list_values); + } while (++i < num_list_values); } result= FALSE; end: @@ -829,10 +996,11 @@ end: SYNOPSIS check_partition_info() + thd Thread object + eng_type Return value for used engine in partitions file A reference to a handler of the table info Create info - engine_type Return value for used engine in partitions - check_partition_function Should we check the partition function + add_or_reorg_part Is it ALTER TABLE ADD/REORGANIZE command RETURN VALUE TRUE Error, something went wrong @@ -848,7 +1016,7 @@ end: bool partition_info::check_partition_info(THD *thd, handlerton **eng_type, handler *file, HA_CREATE_INFO *info, - bool check_partition_function) + bool add_or_reorg_part) { handlerton *table_engine= default_engine_type; uint i, tot_partitions; @@ -859,11 +1027,11 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type, DBUG_PRINT("info", ("default table_engine = %s", ha_resolve_storage_engine_name(table_engine))); - if (check_partition_function) + if (!add_or_reorg_part) { int err= 0; - if (part_type != HASH_PARTITION || !list_of_part_fields) + if (!list_of_part_fields) { DBUG_ASSERT(part_expr); err= part_expr->walk(&Item::check_partition_func_processor, 0, @@ -877,9 +1045,12 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type, my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0)); goto end; } + if (thd->lex->sql_command == SQLCOM_CREATE_TABLE && + fix_parser_data(thd)) + goto end; } if (unlikely(!is_sub_partitioned() && - !(use_default_subpartitions && use_default_no_subpartitions))) + !(use_default_subpartitions && use_default_num_subpartitions))) { my_error(ER_SUBPARTITION_ERROR, MYF(0)); goto end; @@ -937,6 +1108,12 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type, } } + if (part_field_list.elements > 0 && + (same_name= has_unique_fields())) + { + my_error(ER_SAME_NAME_PARTITION_FIELD, MYF(0), same_name); + goto end; + } if ((same_name= has_unique_names())) { my_error(ER_SAME_NAME_PARTITION, MYF(0), same_name); @@ -945,8 +1122,8 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type, i= 0; { List_iterator<partition_element> part_it(partitions); - uint no_parts_not_set= 0; - uint prev_no_subparts_not_set= no_subparts + 1; + uint num_parts_not_set= 0; + uint prev_num_subparts_not_set= num_subparts + 1; do { partition_element *part_elem= part_it++; @@ -968,7 +1145,7 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type, { if (part_elem->engine_type == NULL) { - no_parts_not_set++; + num_parts_not_set++; part_elem->engine_type= default_engine_type; } if (check_table_name(part_elem->partition_name, @@ -983,7 +1160,7 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type, else { uint j= 0; - uint no_subparts_not_set= 0; + uint num_subparts_not_set= 0; List_iterator<partition_element> sub_it(part_elem->subpartitions); partition_element *sub_elem; do @@ -1002,44 +1179,45 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type, else { sub_elem->engine_type= default_engine_type; - no_subparts_not_set++; + num_subparts_not_set++; } } DBUG_PRINT("info", ("part = %d sub = %d engine = %s", i, j, ha_resolve_storage_engine_name(sub_elem->engine_type))); - } while (++j < no_subparts); + } while (++j < num_subparts); - if (prev_no_subparts_not_set == (no_subparts + 1) && - (no_subparts_not_set == 0 || no_subparts_not_set == no_subparts)) - prev_no_subparts_not_set= no_subparts_not_set; + if (prev_num_subparts_not_set == (num_subparts + 1) && + (num_subparts_not_set == 0 || + num_subparts_not_set == num_subparts)) + prev_num_subparts_not_set= num_subparts_not_set; if (!table_engine_set && - prev_no_subparts_not_set != no_subparts_not_set) + prev_num_subparts_not_set != num_subparts_not_set) { - DBUG_PRINT("info", ("no_subparts_not_set = %u no_subparts = %u", - no_subparts_not_set, no_subparts)); + DBUG_PRINT("info", ("num_subparts_not_set = %u num_subparts = %u", + num_subparts_not_set, num_subparts)); my_error(ER_MIX_HANDLER_ERROR, MYF(0)); goto end; } if (part_elem->engine_type == NULL) { - if (no_subparts_not_set == 0) + if (num_subparts_not_set == 0) part_elem->engine_type= sub_elem->engine_type; else { - no_parts_not_set++; + num_parts_not_set++; part_elem->engine_type= default_engine_type; } } } - } while (++i < no_parts); + } while (++i < num_parts); if (!table_engine_set && - no_parts_not_set != 0 && - no_parts_not_set != no_parts) + num_parts_not_set != 0 && + num_parts_not_set != num_parts) { - DBUG_PRINT("info", ("no_parts_not_set = %u no_parts = %u", - no_parts_not_set, no_subparts)); + DBUG_PRINT("info", ("num_parts_not_set = %u num_parts = %u", + num_parts_not_set, num_subparts)); my_error(ER_MIX_HANDLER_ERROR, MYF(0)); goto end; } @@ -1062,10 +1240,12 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type, list constants. */ - if (fixed) + if (add_or_reorg_part) { - if (unlikely((part_type == RANGE_PARTITION && check_range_constants()) || - (part_type == LIST_PARTITION && check_list_constants()))) + if (unlikely((part_type == RANGE_PARTITION && + check_range_constants(thd)) || + (part_type == LIST_PARTITION && + check_list_constants(thd)))) goto end; } result= FALSE; @@ -1096,20 +1276,101 @@ void partition_info::print_no_partition_found(TABLE *table) if (check_single_table_access(current_thd, SELECT_ACL, &table_list, TRUE)) + { my_message(ER_NO_PARTITION_FOR_GIVEN_VALUE, ER(ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT), MYF(0)); + } else { - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); - if (part_expr->null_value) - buf_ptr= (char*)"NULL"; + if (column_list) + buf_ptr= (char*)"from column_list"; else - longlong2str(err_value, buf, - part_expr->unsigned_flag ? 10 : -10); + { + my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + if (part_expr->null_value) + buf_ptr= (char*)"NULL"; + else + longlong2str(err_value, buf, + part_expr->unsigned_flag ? 10 : -10); + dbug_tmp_restore_column_map(table->read_set, old_map); + } my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0), buf_ptr); - dbug_tmp_restore_column_map(table->read_set, old_map); } } + + +/* + Set fields related to partition expression + SYNOPSIS + set_part_expr() + start_token Start of partition function string + item_ptr Pointer to item tree + end_token End of partition function string + is_subpart Subpartition indicator + RETURN VALUES + TRUE Memory allocation error + FALSE Success +*/ + +bool partition_info::set_part_expr(char *start_token, Item *item_ptr, + char *end_token, bool is_subpart) +{ + uint expr_len= end_token - start_token; + char *func_string= (char*) sql_memdup(start_token, expr_len); + + if (!func_string) + { + mem_alloc_error(expr_len); + return TRUE; + } + if (is_subpart) + { + list_of_subpart_fields= FALSE; + subpart_expr= item_ptr; + subpart_func_string= func_string; + subpart_func_len= expr_len; + } + else + { + list_of_part_fields= FALSE; + part_expr= item_ptr; + part_func_string= func_string; + part_func_len= expr_len; + } + return FALSE; +} + + +/* + Check that partition fields and subpartition fields are not too long + + SYNOPSIS + check_partition_field_length() + + RETURN VALUES + TRUE Total length was too big + FALSE Length is ok +*/ + +bool partition_info::check_partition_field_length() +{ + uint store_length= 0; + uint i; + DBUG_ENTER("partition_info::check_partition_field_length"); + + for (i= 0; i < num_part_fields; i++) + store_length+= get_partition_field_store_length(part_field_array[i]); + if (store_length > MAX_KEY_LENGTH) + DBUG_RETURN(TRUE); + store_length= 0; + for (i= 0; i < num_subpart_fields; i++) + store_length+= get_partition_field_store_length(subpart_field_array[i]); + if (store_length > MAX_KEY_LENGTH) + DBUG_RETURN(TRUE); + DBUG_RETURN(FALSE); +} + + /* Set up buffers and arrays for fields requiring preparation SYNOPSIS @@ -1221,46 +1482,6 @@ bool partition_info::set_up_charset_field_preps() } subpart_charset_field_array[i]= NULL; } - if (tot_fields) - { - uint k; - size= tot_fields*sizeof(char**); - if (!(char_ptrs= (uchar**)sql_calloc(size))) - goto error; - full_part_field_buffers= char_ptrs; - if (!(char_ptrs= (uchar**)sql_calloc(size))) - goto error; - restore_full_part_field_ptrs= char_ptrs; - size= (tot_fields + 1) * sizeof(char**); - if (!(char_ptrs= (uchar**)sql_calloc(size))) - goto error; - full_part_charset_field_array= (Field**)char_ptrs; - for (i= 0; i < tot_part_fields; i++) - { - full_part_charset_field_array[i]= part_charset_field_array[i]; - full_part_field_buffers[i]= part_field_buffers[i]; - } - k= tot_part_fields; - for (i= 0; i < tot_subpart_fields; i++) - { - uint j; - bool found= FALSE; - field= subpart_charset_field_array[i]; - - for (j= 0; j < tot_part_fields; j++) - { - if (field == part_charset_field_array[i]) - found= TRUE; - } - if (!found) - { - full_part_charset_field_array[k]= subpart_charset_field_array[i]; - full_part_field_buffers[k]= subpart_field_buffers[i]; - k++; - } - } - full_part_charset_field_array[k]= NULL; - } DBUG_RETURN(FALSE); error: mem_alloc_error(size); @@ -1321,5 +1542,633 @@ id_err: return 1; } +/* + Create a new column value in current list with maxvalue + Called from parser + + SYNOPSIS + add_max_value() + RETURN + TRUE Error + FALSE Success +*/ + +int partition_info::add_max_value() +{ + DBUG_ENTER("partition_info::add_max_value"); + + part_column_list_val *col_val; + if (!(col_val= add_column_value())) + { + DBUG_RETURN(TRUE); + } + col_val->max_value= TRUE; + DBUG_RETURN(FALSE); +} + +/* + Create a new column value in current list + Called from parser + + SYNOPSIS + add_column_value() + RETURN + >0 A part_column_list_val object which have been + inserted into its list + 0 Memory allocation failure +*/ + +part_column_list_val *partition_info::add_column_value() +{ + uint max_val= num_columns ? num_columns : MAX_REF_PARTS; + DBUG_ENTER("add_column_value"); + DBUG_PRINT("enter", ("num_columns = %u, curr_list_object %u, max_val = %u", + num_columns, curr_list_object, max_val)); + if (curr_list_object < max_val) + { + curr_list_val->added_items++; + DBUG_RETURN(&curr_list_val->col_val_array[curr_list_object++]); + } + if (!num_columns && part_type == LIST_PARTITION) + { + /* + We're trying to add more than MAX_REF_PARTS, this can happen + in ALTER TABLE using List partitions where the first partition + uses VALUES IN (1,2,3...,17) where the number of fields in + the list is more than MAX_REF_PARTS, in this case we know + that the number of columns must be 1 and we thus reorganize + into the structure used for 1 column. After this we call + ourselves recursively which should always succeed. + */ + if (!reorganize_into_single_field_col_val()) + { + DBUG_RETURN(add_column_value()); + } + DBUG_RETURN(NULL); + } + if (column_list) + { + my_error(ER_PARTITION_COLUMN_LIST_ERROR, MYF(0)); + } + else + { + if (part_type == RANGE_PARTITION) + my_error(ER_TOO_MANY_VALUES_ERROR, MYF(0), "RANGE"); + else + my_error(ER_TOO_MANY_VALUES_ERROR, MYF(0), "LIST"); + } + DBUG_RETURN(NULL); +} + + +/* + Initialise part_elem_value object at setting of a new object + (Helper functions to functions called by parser) + + SYNOPSIS + init_col_val + col_val Column value object to be initialised + item Item object representing column value + RETURN VALUES + TRUE Failure + FALSE Success +*/ +void partition_info::init_col_val(part_column_list_val *col_val, Item *item) +{ + DBUG_ENTER("partition_info::init_col_val"); + + col_val->item_expression= item; + col_val->null_value= item->null_value; + if (item->result_type() == INT_RESULT) + { + /* + This could be both column_list partitioning and function + partitioning, but it doesn't hurt to set the function + partitioning flags about unsignedness. + */ + curr_list_val->value= item->val_int(); + curr_list_val->unsigned_flag= TRUE; + if (!item->unsigned_flag && + curr_list_val->value < 0) + curr_list_val->unsigned_flag= FALSE; + if (!curr_list_val->unsigned_flag) + curr_part_elem->signed_flag= TRUE; + } + col_val->part_info= NULL; + DBUG_VOID_RETURN; +} +/* + Add a column value in VALUES LESS THAN or VALUES IN + (Called from parser) + + SYNOPSIS + add_column_list_value() + lex Parser's lex object + thd Thread object + item Item object representing column value + + RETURN VALUES + TRUE Failure + FALSE Success +*/ +bool partition_info::add_column_list_value(THD *thd, Item *item) +{ + part_column_list_val *col_val; + Name_resolution_context *context= &thd->lex->current_select->context; + TABLE_LIST *save_list= context->table_list; + const char *save_where= thd->where; + DBUG_ENTER("partition_info::add_column_list_value"); + + if (part_type == LIST_PARTITION && + num_columns == 1U) + { + if (init_column_part()) + { + DBUG_RETURN(TRUE); + } + } + + context->table_list= 0; + if (column_list) + thd->where= "field list"; + else + thd->where= "partition function"; + + if (item->walk(&Item::check_partition_func_processor, 0, + NULL)) + { + my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0)); + DBUG_RETURN(TRUE); + } + if (item->fix_fields(thd, (Item**)0) || + ((context->table_list= save_list), FALSE) || + (!item->const_item())) + { + context->table_list= save_list; + thd->where= save_where; + my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0)); + DBUG_RETURN(TRUE); + } + thd->where= save_where; + + if (!(col_val= add_column_value())) + { + DBUG_RETURN(TRUE); + } + init_col_val(col_val, item); + DBUG_RETURN(FALSE); +} + +/* + Initialise part_info object for receiving a set of column values + for a partition, called when parser reaches VALUES LESS THAN or + VALUES IN. + + SYNOPSIS + init_column_part() + lex Parser's lex object + + RETURN VALUES + TRUE Failure + FALSE Success +*/ +bool partition_info::init_column_part() +{ + partition_element *p_elem= curr_part_elem; + part_column_list_val *col_val_array; + part_elem_value *list_val; + uint loc_num_columns; + DBUG_ENTER("partition_info::init_column_part"); + + if (!(list_val= + (part_elem_value*)sql_calloc(sizeof(part_elem_value))) || + p_elem->list_val_list.push_back(list_val)) + { + mem_alloc_error(sizeof(part_elem_value)); + DBUG_RETURN(TRUE); + } + if (num_columns) + loc_num_columns= num_columns; + else + loc_num_columns= MAX_REF_PARTS; + if (!(col_val_array= + (part_column_list_val*)sql_calloc(loc_num_columns * + sizeof(part_column_list_val)))) + { + mem_alloc_error(loc_num_columns * sizeof(part_elem_value)); + DBUG_RETURN(TRUE); + } + list_val->col_val_array= col_val_array; + list_val->added_items= 0; + curr_list_val= list_val; + curr_list_object= 0; + DBUG_RETURN(FALSE); +} + +/* + In the case of ALTER TABLE ADD/REORGANIZE PARTITION for LIST + partitions we can specify list values as: + VALUES IN (v1, v2,,,, v17) if we're using the first partitioning + variant with a function or a column list partitioned table with + one partition field. In this case the parser knows not the + number of columns start with and allocates MAX_REF_PARTS in the + array. If we try to allocate something beyond MAX_REF_PARTS we + will call this function to reorganize into a structure with + num_columns = 1. Also when the parser knows that we used LIST + partitioning and we used a VALUES IN like above where number of + values was smaller than MAX_REF_PARTS or equal, then we will + reorganize after discovering this in the parser. + + SYNOPSIS + reorganize_into_single_field_col_val() + + RETURN VALUES + TRUE Failure + FALSE Success +*/ +int partition_info::reorganize_into_single_field_col_val() +{ + part_column_list_val *col_val, *new_col_val; + part_elem_value *val= curr_list_val; + uint loc_num_columns= num_columns; + uint i; + DBUG_ENTER("partition_info::reorganize_into_single_field_col_val"); + + num_columns= 1; + val->added_items= 1U; + col_val= &val->col_val_array[0]; + init_col_val(col_val, col_val->item_expression); + for (i= 1; i < loc_num_columns; i++) + { + col_val= &val->col_val_array[i]; + DBUG_ASSERT(part_type == LIST_PARTITION); + if (init_column_part()) + { + DBUG_RETURN(TRUE); + } + if (!(new_col_val= add_column_value())) + { + DBUG_RETURN(TRUE); + } + memcpy(new_col_val, col_val, sizeof(*col_val)); + init_col_val(new_col_val, col_val->item_expression); + } + curr_list_val= val; + DBUG_RETURN(FALSE); +} + +/* + This function handles the case of function-based partitioning. + It fixes some data structures created in the parser and puts + them in the format required by the rest of the partitioning + code. + + SYNOPSIS + fix_func_partition() + thd Thread object + col_val Array of one value + part_elem The partition instance + part_id Id of partition instance + + RETURN VALUES + TRUE Failure + FALSE Success +*/ +int partition_info::fix_func_partition(THD *thd, + part_elem_value *val, + partition_element *part_elem, + uint part_id) +{ + part_column_list_val *col_val= val->col_val_array; + DBUG_ENTER("partition_info::fix_func_partition"); + + if (col_val->fixed) + { + DBUG_RETURN(FALSE); + } + if (val->added_items != 1) + { + my_error(ER_PARTITION_COLUMN_LIST_ERROR, MYF(0)); + DBUG_RETURN(TRUE); + } + if (col_val->max_value) + { + /* The parser ensures we're not LIST partitioned here */ + DBUG_ASSERT(part_type == RANGE_PARTITION); + if (defined_max_value) + { + my_error(ER_PARTITION_MAXVALUE_ERROR, MYF(0)); + DBUG_RETURN(TRUE); + } + if (part_id == (num_parts - 1)) + { + defined_max_value= TRUE; + part_elem->max_value= TRUE; + part_elem->range_value= LONGLONG_MAX; + } + else + { + my_error(ER_PARTITION_MAXVALUE_ERROR, MYF(0)); + DBUG_RETURN(TRUE); + } + } + else + { + Item *item_expr= col_val->item_expression; + if ((val->null_value= item_expr->null_value)) + { + if (part_elem->has_null_value) + { + my_error(ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR, MYF(0)); + DBUG_RETURN(TRUE); + } + part_elem->has_null_value= TRUE; + } + else if (item_expr->result_type() != INT_RESULT) + { + my_error(ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR, MYF(0)); + DBUG_RETURN(TRUE); + } + if (part_type == RANGE_PARTITION) + { + if (part_elem->has_null_value) + { + my_error(ER_NULL_IN_VALUES_LESS_THAN, MYF(0)); + DBUG_RETURN(TRUE); + } + part_elem->range_value= val->value; + } + } + col_val->fixed= 2; + DBUG_RETURN(FALSE); +} + +/* + Get column item with a proper character set according to the field + + SYNOPSIS + get_column_item() + item Item object to start with + field Field for which the item will be compared to + + RETURN VALUES + NULL Error + item Returned item +*/ + +Item* partition_info::get_column_item(Item *item, Field *field) +{ + if (field->result_type() == STRING_RESULT && + item->collation.collation != field->charset()) + { + if (!(item= convert_charset_partition_constant(item, + field->charset()))) + { + my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0)); + return NULL; + } + } + return item; +} + + +/* + Evaluate VALUES functions for column list values + SYNOPSIS + fix_column_value_functions() + thd Thread object + col_val List of column values + part_id Partition id we are fixing + + RETURN VALUES + TRUE Error + FALSE Success + DESCRIPTION + Fix column VALUES and store in memory array adapted to the data type +*/ + +bool partition_info::fix_column_value_functions(THD *thd, + part_elem_value *val, + uint part_id) +{ + uint num_columns= part_field_list.elements; + bool result= FALSE; + uint i; + part_column_list_val *col_val= val->col_val_array; + DBUG_ENTER("partition_info::fix_column_value_functions"); + + if (col_val->fixed > 1) + { + DBUG_RETURN(FALSE); + } + for (i= 0; i < num_columns; col_val++, i++) + { + Item *column_item= col_val->item_expression; + Field *field= part_field_array[i]; + col_val->part_info= this; + col_val->partition_id= part_id; + if (col_val->max_value) + col_val->column_value= NULL; + else + { + col_val->column_value= NULL; + if (!col_val->null_value) + { + uchar *val_ptr; + uint len= field->pack_length(); + ulong save_sql_mode; + bool save_got_warning; + + if (!(column_item= get_column_item(column_item, + field))) + { + result= TRUE; + goto end; + } + save_sql_mode= thd->variables.sql_mode; + thd->variables.sql_mode= 0; + save_got_warning= thd->got_warning; + thd->got_warning= 0; + if (column_item->save_in_field(field, TRUE) || + thd->got_warning) + { + my_error(ER_WRONG_TYPE_COLUMN_VALUE_ERROR, MYF(0)); + result= TRUE; + goto end; + } + thd->got_warning= save_got_warning; + thd->variables.sql_mode= save_sql_mode; + if (!(val_ptr= (uchar*) sql_calloc(len))) + { + mem_alloc_error(len); + result= TRUE; + goto end; + } + col_val->column_value= val_ptr; + memcpy(val_ptr, field->ptr, len); + } + } + col_val->fixed= 2; + } +end: + DBUG_RETURN(result); +} + +/* + The parser generates generic data structures, we need to set them up + as the rest of the code expects to find them. This is in reality part + of the syntax check of the parser code. + + It is necessary to call this function in the case of a CREATE TABLE + statement, in this case we do it early in the check_partition_info + function. + + It is necessary to call this function for ALTER TABLE where we + assign a completely new partition structure, in this case we do it + in prep_alter_part_table after discovering that the partition + structure is entirely redefined. + + It's necessary to call this method also for ALTER TABLE ADD/REORGANIZE + of partitions, in this we call it in prep_alter_part_table after + making some initial checks but before going deep to check the partition + info, we also assign the column_list variable before calling this function + here. + + Finally we also call it immediately after returning from parsing the + partitioning text found in the frm file. + + This function mainly fixes the VALUES parts, these are handled differently + whether or not we use column list partitioning. Since the parser doesn't + know which we are using we need to set-up the old data structures after + the parser is complete when we know if what type of partitioning the + base table is using. + + For column lists we will handle this in the fix_column_value_function. + For column lists it is sufficient to verify that the number of columns + and number of elements are in synch with each other. So only partitioning + using functions need to be set-up to their data structures. + + SYNOPSIS + fix_parser_data() + thd Thread object + + RETURN VALUES + TRUE Failure + FALSE Success +*/ + +int partition_info::fix_parser_data(THD *thd) +{ + List_iterator<partition_element> it(partitions); + partition_element *part_elem; + part_elem_value *val; + uint num_elements; + uint i= 0, j, k; + int result; + DBUG_ENTER("partition_info::fix_parser_data"); + + if (!(part_type == RANGE_PARTITION || + part_type == LIST_PARTITION)) + { + /* Nothing to do for HASH/KEY partitioning */ + DBUG_RETURN(FALSE); + } + do + { + part_elem= it++; + List_iterator<part_elem_value> list_val_it(part_elem->list_val_list); + j= 0; + num_elements= part_elem->list_val_list.elements; + DBUG_ASSERT(part_type == RANGE_PARTITION ? + num_elements == 1U : TRUE); + do + { + part_elem_value *val= list_val_it++; + if (column_list) + { + if (val->added_items != num_columns) + { + my_error(ER_PARTITION_COLUMN_LIST_ERROR, MYF(0)); + DBUG_RETURN(TRUE); + } + for (k= 0; k < num_columns; k++) + { + part_column_list_val *col_val= &val->col_val_array[k]; + if (col_val->null_value && part_type == RANGE_PARTITION) + { + my_error(ER_NULL_IN_VALUES_LESS_THAN, MYF(0)); + DBUG_RETURN(TRUE); + } + } + } + else + { + if (fix_func_partition(thd, val, part_elem, i)) + { + DBUG_RETURN(TRUE); + } + if (val->null_value) + { + /* + Null values aren't required in the value part, they are kept per + partition instance, only LIST partitions have NULL values. + */ + list_val_it.remove(); + } + } + } while (++j < num_elements); + } while (++i < num_parts); + DBUG_RETURN(FALSE); +} + +void partition_info::print_debug(const char *str, uint *value) +{ + DBUG_ENTER("print_debug"); + if (value) + DBUG_PRINT("info", ("parser: %s, val = %u", str, *value)); + else + DBUG_PRINT("info", ("parser: %s", str)); + DBUG_VOID_RETURN; +} +#else /* WITH_PARTITION_STORAGE_ENGINE */ + /* + For builds without partitioning we need to define these functions + since we they are called from the parser. The parser cannot + remove code parts using ifdef, but the code parts cannot be called + so we simply need to add empty functions to make the linker happy. + */ +part_column_list_val *partition_info::add_column_value() +{ + return NULL; +} + +bool partition_info::set_part_expr(char *start_token, Item *item_ptr, + char *end_token, bool is_subpart) +{ + (void)start_token; + (void)item_ptr; + (void)end_token; + (void)is_subpart; + return FALSE; +} + +int partition_info::reorganize_into_single_field_col_val() +{ + return 0; +} + +bool partition_info::init_column_part() +{ + return FALSE; +} + +bool partition_info::add_column_list_value(Item *item) +{ + return FALSE; +} +int partition_info::add_max_value() +{ + return 0; +} #endif /* WITH_PARTITION_STORAGE_ENGINE */ diff --git a/sql/partition_info.h b/sql/partition_info.h index b5a6c4a0961..0ac8dec4945 100644 --- a/sql/partition_info.h +++ b/sql/partition_info.h @@ -1,7 +1,7 @@ #ifndef PARTITION_INFO_INCLUDED #define PARTITION_INFO_INCLUDED -/* Copyright 2006-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright 2006-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -67,10 +67,9 @@ public: /* When we have various string fields we might need some preparation before and clean-up after calling the get_part_id_func's. We need - one such method for get_partition_id and one for - get_part_partition_id and one for get_subpartition_id. + one such method for get_part_partition_id and one for + get_subpartition_id. */ - get_part_id_func get_partition_id_charset; get_part_id_func get_part_partition_id_charset; get_subpart_id_func get_subpartition_id_charset; @@ -84,7 +83,6 @@ public: without duplicates, NULL-terminated. */ Field **full_part_field_array; - Field **full_part_charset_field_array; /* Set of all fields used in partition and subpartition expression. Required for testing of partition fields in write_set when @@ -100,10 +98,8 @@ public: */ uchar **part_field_buffers; uchar **subpart_field_buffers; - uchar **full_part_field_buffers; uchar **restore_part_field_ptrs; uchar **restore_subpart_field_ptrs; - uchar **restore_full_part_field_ptrs; Item *part_expr; Item *subpart_expr; @@ -127,6 +123,8 @@ public: union { longlong *range_int_array; LIST_PART_ENTRY *list_array; + part_column_list_val *range_col_array; + part_column_list_val *list_col_array; }; /******************************************** @@ -157,6 +155,10 @@ public: partition_element *curr_part_elem; partition_element *current_partition; + part_elem_value *curr_list_val; + uint curr_list_object; + uint num_columns; + /* These key_map's are used for Partitioning to enable quick decisions on whether we can derive more information about which partition to @@ -175,17 +177,17 @@ public: uint part_func_len; uint subpart_func_len; - uint no_parts; - uint no_subparts; + uint num_parts; + uint num_subparts; uint count_curr_subparts; uint part_error_code; - uint no_list_values; + uint num_list_values; - uint no_part_fields; - uint no_subpart_fields; - uint no_full_part_fields; + uint num_part_fields; + uint num_subpart_fields; + uint num_full_part_fields; uint has_null_part_id; /* @@ -196,9 +198,9 @@ public: uint16 linear_hash_mask; bool use_default_partitions; - bool use_default_no_partitions; + bool use_default_num_partitions; bool use_default_subpartitions; - bool use_default_no_subpartitions; + bool use_default_num_subpartitions; bool default_partitions_setup; bool defined_max_value; bool list_of_part_fields; @@ -208,7 +210,7 @@ public: bool is_auto_partitioned; bool from_openfrm; bool has_null_value; - + bool column_list; partition_info() : get_partition_id(NULL), get_part_partition_id(NULL), @@ -217,11 +219,8 @@ public: part_charset_field_array(NULL), subpart_charset_field_array(NULL), full_part_field_array(NULL), - full_part_charset_field_array(NULL), part_field_buffers(NULL), subpart_field_buffers(NULL), - full_part_field_buffers(NULL), restore_part_field_ptrs(NULL), restore_subpart_field_ptrs(NULL), - restore_full_part_field_ptrs(NULL), part_expr(NULL), subpart_expr(NULL), item_free_list(NULL), first_log_entry(NULL), exec_log_entry(NULL), frm_log_entry(NULL), list_array(NULL), err_value(0), @@ -229,22 +228,23 @@ public: part_func_string(NULL), subpart_func_string(NULL), part_state(NULL), curr_part_elem(NULL), current_partition(NULL), + curr_list_object(0), num_columns(0), default_engine_type(NULL), part_result_type(INT_RESULT), part_type(NOT_A_PARTITION), subpart_type(NOT_A_PARTITION), part_info_len(0), part_state_len(0), part_func_len(0), subpart_func_len(0), - no_parts(0), no_subparts(0), + num_parts(0), num_subparts(0), count_curr_subparts(0), part_error_code(0), - no_list_values(0), no_part_fields(0), no_subpart_fields(0), - no_full_part_fields(0), has_null_part_id(0), linear_hash_mask(0), - use_default_partitions(TRUE), use_default_no_partitions(TRUE), - use_default_subpartitions(TRUE), use_default_no_subpartitions(TRUE), + num_list_values(0), num_part_fields(0), num_subpart_fields(0), + num_full_part_fields(0), has_null_part_id(0), linear_hash_mask(0), + use_default_partitions(TRUE), use_default_num_partitions(TRUE), + use_default_subpartitions(TRUE), use_default_num_subpartitions(TRUE), default_partitions_setup(FALSE), defined_max_value(FALSE), list_of_part_fields(FALSE), list_of_subpart_fields(FALSE), linear_hash_ind(FALSE), fixed(FALSE), is_auto_partitioned(FALSE), from_openfrm(FALSE), - has_null_value(FALSE) + has_null_value(FALSE), column_list(FALSE) { all_fields_in_PF.clear_all(); all_fields_in_PPF.clear_all(); @@ -267,27 +267,47 @@ public: /* Returns the total number of partitions on the leaf level */ uint get_tot_partitions() { - return no_parts * (is_sub_partitioned() ? no_subparts : 1); + return num_parts * (is_sub_partitioned() ? num_subparts : 1); } bool set_up_defaults_for_partitioning(handler *file, HA_CREATE_INFO *info, uint start_no); + char *has_unique_fields(); char *has_unique_names(); bool check_engine_mix(handlerton *engine_type, bool default_engine); - bool check_range_constants(); - bool check_list_constants(); + bool check_range_constants(THD *thd); + bool check_list_constants(THD *thd); bool check_partition_info(THD *thd, handlerton **eng_type, handler *file, HA_CREATE_INFO *info, bool check_partition_function); void print_no_partition_found(TABLE *table); + void print_debug(const char *str, uint*); + Item* get_column_item(Item *item, Field *field); + int fix_func_partition(THD *thd, + part_elem_value *val, + partition_element *part_elem, + uint part_id); + bool fix_column_value_functions(THD *thd, + part_elem_value *val, + uint part_id); + int fix_parser_data(THD *thd); + int add_max_value(); + void init_col_val(part_column_list_val *col_val, Item *item); + int reorganize_into_single_field_col_val(); + part_column_list_val *add_column_value(); + bool set_part_expr(char *start_token, Item *item_ptr, + char *end_token, bool is_subpart); + static int compare_column_values(const void *a, const void *b); bool set_up_charset_field_preps(); + bool check_partition_field_length(); + bool init_column_part(); + bool add_column_list_value(THD *thd, Item *item); private: static int list_part_cmp(const void* a, const void* b); - static int list_part_cmp_unsigned(const void* a, const void* b); bool set_up_default_partitions(handler *file, HA_CREATE_INFO *info, uint start_no); bool set_up_default_subpartitions(handler *file, HA_CREATE_INFO *info); - char *create_default_partition_names(uint part_no, uint no_parts, + char *create_default_partition_names(uint part_no, uint num_parts, uint start_no); char *create_subpartition_name(uint subpart_no, const char *part_name); bool has_unique_name(partition_element *element); @@ -313,7 +333,7 @@ void init_all_partitions_iterator(partition_info *part_info, PARTITION_ITERATOR *part_iter) { part_iter->part_nums.start= part_iter->part_nums.cur= 0; - part_iter->part_nums.end= part_info->no_parts; + part_iter->part_nums.end= part_info->num_parts; part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE; part_iter->get_next= get_next_partition_id_range; } diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 2b2d60c0657..0784370bbd3 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -6240,4 +6240,21 @@ ER_UNKNOWN_LOCALE ER_SLAVE_IGNORE_SERVER_IDS eng "The requested server id %d clashes with the slave startup option --replicate-same-server-id" - +ER_SAME_NAME_PARTITION_FIELD + eng "Duplicate partition field name '%-.192s'" +ER_PARTITION_COLUMN_LIST_ERROR + eng "Inconsistency in usage of column lists for partitioning" +ER_WRONG_TYPE_COLUMN_VALUE_ERROR + eng "Partition column values of incorrect type" +ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR + eng "Too many fields in '%-.192s'" +ER_MAXVALUE_IN_VALUES_IN + eng "Cannot use MAXVALUE as value in VALUES IN" +ER_TOO_MANY_VALUES_ERROR + eng "Cannot have more than one value for this type of %-.64s partitioning" +ER_ROW_SINGLE_PARTITION_FIELD_ERROR + eng "Row expressions in VALUES IN only allowed for multi-field column partitioning" +ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD + eng "Field '%-.192s' is of a not allowed type for this type of partitioning" +ER_PARTITION_FIELDS_TOO_LONG + eng "The total length of the partitioning fields is too large" diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt index 317087854c1..034f987e0e7 100644 --- a/sql/share/errmsg.txt +++ b/sql/share/errmsg.txt @@ -6241,3 +6241,21 @@ ER_UNKNOWN_LOCALE ER_SLAVE_IGNORE_SERVER_IDS eng "The requested server id %d clashes with the slave startup option --replicate-same-server-id" +ER_SAME_NAME_PARTITION_FIELD + eng "Duplicate partition field name '%-.192s'" +ER_PARTITION_COLUMN_LIST_ERROR + eng "Inconsistency in usage of column lists for partitioning" +ER_WRONG_TYPE_COLUMN_VALUE_ERROR + eng "Partition column values of incorrect type" +ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR + eng "Too many fields in '%-.192s'" +ER_MAXVALUE_IN_VALUES_IN + eng "Cannot use MAXVALUE as value in VALUES IN" +ER_TOO_MANY_VALUES_ERROR + eng "Cannot have more than one value for this type of %-.64s partitioning" +ER_ROW_SINGLE_PARTITION_FIELD_ERROR + eng "Row expressions in VALUES IN only allowed for multi-field column partitioning" +ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD + eng "Field '%-.192s' is of a not allowed type for this type of partitioning" +ER_PARTITION_FIELDS_TOO_LONG + eng "The total length of the partitioning fields is too large" diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 2adbc44eb12..9f4e4cbd2ca 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -1497,7 +1497,7 @@ Alter_info::Alter_info(const Alter_info &rhs, MEM_ROOT *mem_root) keys_onoff(rhs.keys_onoff), tablespace_op(rhs.tablespace_op), partition_names(rhs.partition_names, mem_root), - no_parts(rhs.no_parts), + num_parts(rhs.num_parts), change_level(rhs.change_level), datetime_field(rhs.datetime_field), error_if_not_empty(rhs.error_if_not_empty) diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 05a4d1d78bf..e70feaeaeb0 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -899,7 +899,7 @@ public: enum enum_enable_or_disable keys_onoff; enum tablespace_op_type tablespace_op; List<char> partition_names; - uint no_parts; + uint num_parts; enum_alter_table_change_level change_level; Create_field *datetime_field; bool error_if_not_empty; @@ -909,7 +909,7 @@ public: flags(0), keys_onoff(LEAVE_AS_IS), tablespace_op(NO_TABLESPACE_OP), - no_parts(0), + num_parts(0), change_level(ALTER_TABLE_METADATA_ONLY), datetime_field(NULL), error_if_not_empty(FALSE) @@ -924,7 +924,7 @@ public: flags= 0; keys_onoff= LEAVE_AS_IS; tablespace_op= NO_TABLESPACE_OP; - no_parts= 0; + num_parts= 0; partition_names.empty(); change_level= ALTER_TABLE_METADATA_ONLY; datetime_field= 0; diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index eb711001ef6..07558ed5f6b 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -1,4 +1,4 @@ -/* Copyright 2005-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright 2005-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -18,16 +18,29 @@ to partitioning introduced in MySQL version 5.1. It contains functionality used by all handlers that support partitioning, such as the partitioning handler itself and the NDB handler. + (Much of the code in this file has been split into partition_info.cc and + the header files partition_info.h + partition_element.h + sql_partition.h) - The first version was written by Mikael Ronstrom. + The first version was written by Mikael Ronstrom 2004-2006. + Various parts of the optimizer code was written by Sergey Petrunia. + Code have been maintained by Mattias Jonsson. + The second version was written by Mikael Ronstrom 2006-2007 with some + final fixes for partition pruning in 2008-2009 with assistance from Sergey + Petrunia and Mattias Jonsson. - This version supports RANGE partitioning, LIST partitioning, HASH + The first version supports RANGE partitioning, LIST partitioning, HASH partitioning and composite partitioning (hereafter called subpartitioning) where each RANGE/LIST partitioning is HASH partitioned. The hash function can either be supplied by the user or by only a list of fields (also called KEY partitioning), where the MySQL server will use an internal hash function. There are quite a few defaults that can be used as well. + + The second version introduces a new variant of RANGE and LIST partitioning + which is often referred to as column lists in the code variables. This + enables a user to specify a set of columns and their concatenated value + as the partition value. By comparing the concatenation of these values + the proper partition can be choosen. */ /* Some general useful functions */ @@ -54,9 +67,11 @@ const LEX_STRING partition_keywords[]= { C_STRING_WITH_LEN("LIST") }, { C_STRING_WITH_LEN("KEY") }, { C_STRING_WITH_LEN("MAXVALUE") }, - { C_STRING_WITH_LEN("LINEAR ") } + { C_STRING_WITH_LEN("LINEAR ") }, + { C_STRING_WITH_LEN(" COLUMN_LIST") } }; static const char *part_str= "PARTITION"; +static const char *subpart_str= "SUBPARTITION"; static const char *sub_str= "SUB"; static const char *by_str= "BY"; static const char *space_str= " "; @@ -65,26 +80,23 @@ static const char *end_paren_str= ")"; static const char *begin_paren_str= "("; static const char *comma_str= ","; -static int get_part_id_charset_func_all(partition_info *part_info, - uint32 *part_id, - longlong *func_value); -static int get_part_id_charset_func_part(partition_info *part_info, - uint32 *part_id, - longlong *func_value); -static int get_part_id_charset_func_subpart(partition_info *part_info, - uint32 *part_id, - longlong *func_value); -static int get_part_part_id_charset_func(partition_info *part_info, - uint32 *part_id, - longlong *func_value); -static int get_subpart_id_charset_func(partition_info *part_info, - uint32 *part_id); +int get_partition_id_list_col(partition_info *part_info, + uint32 *part_id, + longlong *func_value); int get_partition_id_list(partition_info *part_info, uint32 *part_id, longlong *func_value); +int get_partition_id_range_col(partition_info *part_info, + uint32 *part_id, + longlong *func_value); int get_partition_id_range(partition_info *part_info, uint32 *part_id, longlong *func_value); +static int get_part_id_charset_func_part(partition_info *part_info, + uint32 *part_id, + longlong *func_value); +static int get_part_id_charset_func_subpart(partition_info *part_info, + uint32 *part_id); int get_partition_id_hash_nosub(partition_info *part_info, uint32 *part_id, longlong *func_value); @@ -97,30 +109,9 @@ int get_partition_id_linear_hash_nosub(partition_info *part_info, int get_partition_id_linear_key_nosub(partition_info *part_info, uint32 *part_id, longlong *func_value); -int get_partition_id_range_sub_hash(partition_info *part_info, - uint32 *part_id, - longlong *func_value); -int get_partition_id_range_sub_key(partition_info *part_info, - uint32 *part_id, - longlong *func_value); -int get_partition_id_range_sub_linear_hash(partition_info *part_info, - uint32 *part_id, - longlong *func_value); -int get_partition_id_range_sub_linear_key(partition_info *part_info, - uint32 *part_id, - longlong *func_value); -int get_partition_id_list_sub_hash(partition_info *part_info, - uint32 *part_id, - longlong *func_value); -int get_partition_id_list_sub_key(partition_info *part_info, - uint32 *part_id, - longlong *func_value); -int get_partition_id_list_sub_linear_hash(partition_info *part_info, - uint32 *part_id, - longlong *func_value); -int get_partition_id_list_sub_linear_key(partition_info *part_info, - uint32 *part_id, - longlong *func_value); +int get_partition_id_with_sub(partition_info *part_info, + uint32 *part_id, + longlong *func_value); int get_partition_id_hash_sub(partition_info *part_info, uint32 *part_id); int get_partition_id_key_sub(partition_info *part_info, @@ -138,17 +129,64 @@ uint32 get_next_partition_id_range(PARTITION_ITERATOR* part_iter); uint32 get_next_partition_id_list(PARTITION_ITERATOR* part_iter); int get_part_iter_for_interval_via_mapping(partition_info *part_info, bool is_subpart, + uint32 *store_length_array, uchar *min_value, uchar *max_value, + uint min_len, uint max_len, uint flags, PARTITION_ITERATOR *part_iter); +int get_part_iter_for_interval_cols_via_map(partition_info *part_info, + bool is_subpart, + uint32 *store_length_array, + uchar *min_value, uchar *max_value, + uint min_len, uint max_len, + uint flags, + PARTITION_ITERATOR *part_iter); int get_part_iter_for_interval_via_walking(partition_info *part_info, bool is_subpart, + uint32 *store_length_array, uchar *min_value, uchar *max_value, + uint min_len, uint max_len, uint flags, PARTITION_ITERATOR *part_iter); +static int cmp_rec_and_tuple(part_column_list_val *val, uint32 nvals_in_rec); +static int cmp_rec_and_tuple_prune(part_column_list_val *val, + uint32 n_vals_in_rec, + bool tail_is_min); #ifdef WITH_PARTITION_STORAGE_ENGINE /* + Convert constants in VALUES definition to the character set the + corresponding field uses. + + SYNOPSIS + convert_charset_partition_constant() + item Item to convert + cs Character set to convert to + + RETURN VALUE + NULL Error + item New converted item +*/ + +Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs) +{ + THD *thd= current_thd; + Name_resolution_context *context= &thd->lex->current_select->context; + TABLE_LIST *save_list= context->table_list; + const char *save_where= thd->where; + + item= item->safe_charset_converter(cs); + context->table_list= NULL; + thd->where= "convert character set partition constant"; + if (!item || item->fix_fields(thd, (Item**)NULL)) + item= NULL; + thd->where= save_where; + context->table_list= save_list; + return item; +} + + +/* A support function to check if a name is in a list of strings SYNOPSIS @@ -165,7 +203,7 @@ bool is_name_in_list(char *name, List<char> list_names) { List_iterator<char> names_it(list_names); - uint no_names= list_names.elements; + uint num_names= list_names.elements; uint i= 0; do @@ -173,7 +211,7 @@ bool is_name_in_list(char *name, char *list_name= names_it++; if (!(my_strcasecmp(system_charset_info, name, list_name))) return TRUE; - } while (++i < no_names); + } while (++i < num_names); return FALSE; } @@ -200,26 +238,26 @@ bool partition_default_handling(TABLE *table, partition_info *part_info, { DBUG_ENTER("partition_default_handling"); - if (part_info->use_default_no_partitions) + if (part_info->use_default_num_partitions) { if (!is_create_table_ind && - table->file->get_no_parts(normalized_path, &part_info->no_parts)) + table->file->get_no_parts(normalized_path, &part_info->num_parts)) { DBUG_RETURN(TRUE); } } else if (part_info->is_sub_partitioned() && - part_info->use_default_no_subpartitions) + part_info->use_default_num_subpartitions) { - uint no_parts; + uint num_parts; if (!is_create_table_ind && - (table->file->get_no_parts(normalized_path, &no_parts))) + (table->file->get_no_parts(normalized_path, &num_parts))) { DBUG_RETURN(TRUE); } - DBUG_ASSERT(part_info->no_parts > 0); - part_info->no_subparts= no_parts / part_info->no_parts; - DBUG_ASSERT((no_parts % part_info->no_parts) == 0); + DBUG_ASSERT(part_info->num_parts > 0); + part_info->num_subparts= num_parts / part_info->num_parts; + DBUG_ASSERT((num_parts % part_info->num_parts) == 0); } part_info->set_up_defaults_for_partitioning(table->file, (ulonglong)0, (uint)0); @@ -252,8 +290,8 @@ bool check_reorganise_list(partition_info *new_part_info, List<char> list_part_names) { uint new_count, old_count; - uint no_new_parts= new_part_info->partitions.elements; - uint no_old_parts= old_part_info->partitions.elements; + uint num_new_parts= new_part_info->partitions.elements; + uint num_old_parts= old_part_info->partitions.elements; List_iterator<partition_element> new_parts_it(new_part_info->partitions); bool same_part_info= (new_part_info == old_part_info); DBUG_ENTER("check_reorganise_list"); @@ -276,8 +314,8 @@ bool check_reorganise_list(partition_info *new_part_info, if (!is_name_in_list(old_name, list_part_names)) DBUG_RETURN(TRUE); } - } while (old_count < no_old_parts); - } while (new_count < no_new_parts); + } while (old_count < num_old_parts); + } while (new_count < num_new_parts); DBUG_RETURN(FALSE); } @@ -452,9 +490,10 @@ static bool set_up_field_array(TABLE *table, bool is_sub_part) { Field **ptr, *field, **field_array; - uint no_fields= 0; + uint num_fields= 0; uint size_field_array; uint i= 0; + uint inx; partition_info *part_info= table->part_info; int result= FALSE; DBUG_ENTER("set_up_field_array"); @@ -463,9 +502,19 @@ static bool set_up_field_array(TABLE *table, while ((field= *(ptr++))) { if (field->flags & GET_FIXED_FIELDS_FLAG) - no_fields++; + num_fields++; + } + if (num_fields > MAX_REF_PARTS) + { + char *ptr; + if (is_sub_part) + ptr= (char*)"subpartition function"; + else + ptr= (char*)"partition function"; + my_error(ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR, MYF(0), ptr); + DBUG_RETURN(TRUE); } - if (no_fields == 0) + if (num_fields == 0) { /* We are using hidden key as partitioning field @@ -473,8 +522,8 @@ static bool set_up_field_array(TABLE *table, DBUG_ASSERT(!is_sub_part); DBUG_RETURN(result); } - size_field_array= (no_fields+1)*sizeof(Field*); - field_array= (Field**)sql_alloc(size_field_array); + size_field_array= (num_fields+1)*sizeof(Field*); + field_array= (Field**)sql_calloc(size_field_array); if (unlikely(!field_array)) { mem_alloc_error(size_field_array); @@ -489,7 +538,32 @@ static bool set_up_field_array(TABLE *table, field->flags|= FIELD_IN_PART_FUNC_FLAG; if (likely(!result)) { - field_array[i++]= field; + if (!is_sub_part && part_info->column_list) + { + List_iterator<char> it(part_info->part_field_list); + char *field_name; + + DBUG_ASSERT(num_fields == part_info->part_field_list.elements); + inx= 0; + do + { + field_name= it++; + if (!my_strcasecmp(system_charset_info, + field_name, + field->field_name)) + break; + } while (++inx < num_fields); + if (inx == num_fields) + { + mem_alloc_error(1); + result= TRUE; + continue; + } + } + else + inx= i; + field_array[inx]= field; + i++; /* We check that the fields are proper. It is required for each @@ -507,16 +581,16 @@ static bool set_up_field_array(TABLE *table, } } } - field_array[no_fields]= 0; + field_array[num_fields]= 0; if (!is_sub_part) { part_info->part_field_array= field_array; - part_info->no_part_fields= no_fields; + part_info->num_part_fields= num_fields; } else { part_info->subpart_field_array= field_array; - part_info->no_subpart_fields= no_fields; + part_info->num_subpart_fields= num_fields; } DBUG_RETURN(result); } @@ -555,36 +629,36 @@ static bool create_full_part_field_array(THD *thd, TABLE *table, if (!part_info->is_sub_partitioned()) { part_info->full_part_field_array= part_info->part_field_array; - part_info->no_full_part_fields= part_info->no_part_fields; + part_info->num_full_part_fields= part_info->num_part_fields; } else { Field *field, **field_array; - uint no_part_fields=0, size_field_array; + uint num_part_fields=0, size_field_array; ptr= table->field; while ((field= *(ptr++))) { if (field->flags & FIELD_IN_PART_FUNC_FLAG) - no_part_fields++; + num_part_fields++; } - size_field_array= (no_part_fields+1)*sizeof(Field*); - field_array= (Field**)sql_alloc(size_field_array); + size_field_array= (num_part_fields+1)*sizeof(Field*); + field_array= (Field**)sql_calloc(size_field_array); if (unlikely(!field_array)) { mem_alloc_error(size_field_array); result= TRUE; goto end; } - no_part_fields= 0; + num_part_fields= 0; ptr= table->field; while ((field= *(ptr++))) { if (field->flags & FIELD_IN_PART_FUNC_FLAG) - field_array[no_part_fields++]= field; + field_array[num_part_fields++]= field; } - field_array[no_part_fields]=0; + field_array[num_part_fields]=0; part_info->full_part_field_array= field_array; - part_info->no_full_part_fields= no_part_fields; + part_info->num_full_part_fields= num_part_fields; } /* @@ -780,16 +854,16 @@ static bool handle_list_of_fields(List_iterator<char> it, goto end; } } - if (is_list_empty) + if (is_list_empty && part_info->part_type == HASH_PARTITION) { uint primary_key= table->s->primary_key; if (primary_key != MAX_KEY) { - uint no_key_parts= table->key_info[primary_key].key_parts, i; + uint num_key_parts= table->key_info[primary_key].key_parts, i; /* In the case of an empty list we use primary key as partition key. */ - for (i= 0; i < no_key_parts; i++) + for (i= 0; i < num_key_parts; i++) { Field *field= table->key_info[primary_key].key_part[i].field; field->flags|= GET_FIXED_FIELDS_FLAG; @@ -853,7 +927,7 @@ int check_signed_flag(partition_info *part_info) error= ER_PARTITION_CONST_DOMAIN_ERROR; break; } - } while (++i < part_info->no_parts); + } while (++i < part_info->num_parts); } return error; } @@ -872,7 +946,6 @@ int check_signed_flag(partition_info *part_info) table The table object part_info Reference to partitioning data structure is_sub_part Is the table subpartitioned as well - is_field_to_be_setup Flag if we are to set-up field arrays RETURN VALUE TRUE An error occurred, something was wrong with the @@ -895,8 +968,8 @@ int check_signed_flag(partition_info *part_info) on the field object. */ -bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table, - bool is_sub_part, bool is_field_to_be_setup) +static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table, + bool is_sub_part) { partition_info *part_info= table->part_info; uint dir_length, home_dir_length; @@ -911,13 +984,6 @@ bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table, bool save_use_only_table_context; DBUG_ENTER("fix_fields_part_func"); - if (part_info->fixed) - { - if (!(is_sub_part || (error= check_signed_flag(part_info)))) - result= FALSE; - goto end; - } - /* Set-up the TABLE_LIST object to be a list with a single table Set the object to zero to create NULL pointers and set alias @@ -989,8 +1055,7 @@ bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table, if (unlikely(error)) { DBUG_PRINT("info", ("Field in partition function not part of table")); - if (is_field_to_be_setup) - clear_field_flag(table); + clear_field_flag(table); goto end; } thd->where= save_where; @@ -1002,11 +1067,7 @@ bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table, } if ((!is_sub_part) && (error= check_signed_flag(part_info))) goto end; - result= FALSE; - if (is_field_to_be_setup) - result= set_up_field_array(table, is_sub_part); - if (!is_sub_part) - part_info->fixed= TRUE; + result= set_up_field_array(table, is_sub_part); end: table->get_fields_in_item_tree= FALSE; table->map= 0; //Restore old value @@ -1185,9 +1246,9 @@ void check_range_capable_PF(TABLE *table) static bool set_up_partition_bitmap(THD *thd, partition_info *part_info) { uint32 *bitmap_buf; - uint bitmap_bits= part_info->no_subparts? - (part_info->no_subparts* part_info->no_parts): - part_info->no_parts; + uint bitmap_bits= part_info->num_subparts? + (part_info->num_subparts* part_info->num_parts): + part_info->num_parts; uint bitmap_bytes= bitmap_buffer_size(bitmap_bits); DBUG_ENTER("set_up_partition_bitmap"); @@ -1287,64 +1348,47 @@ static void set_up_partition_func_pointers(partition_info *part_info) if (part_info->is_sub_partitioned()) { + part_info->get_partition_id= get_partition_id_with_sub; if (part_info->part_type == RANGE_PARTITION) { - part_info->get_part_partition_id= get_partition_id_range; + if (part_info->column_list) + part_info->get_part_partition_id= get_partition_id_range_col; + else + part_info->get_part_partition_id= get_partition_id_range; if (part_info->list_of_subpart_fields) { if (part_info->linear_hash_ind) - { - part_info->get_partition_id= get_partition_id_range_sub_linear_key; part_info->get_subpartition_id= get_partition_id_linear_key_sub; - } else - { - part_info->get_partition_id= get_partition_id_range_sub_key; part_info->get_subpartition_id= get_partition_id_key_sub; - } } else { if (part_info->linear_hash_ind) - { - part_info->get_partition_id= get_partition_id_range_sub_linear_hash; part_info->get_subpartition_id= get_partition_id_linear_hash_sub; - } else - { - part_info->get_partition_id= get_partition_id_range_sub_hash; part_info->get_subpartition_id= get_partition_id_hash_sub; - } } } else /* LIST Partitioning */ { - part_info->get_part_partition_id= get_partition_id_list; + if (part_info->column_list) + part_info->get_part_partition_id= get_partition_id_list_col; + else + part_info->get_part_partition_id= get_partition_id_list; if (part_info->list_of_subpart_fields) { if (part_info->linear_hash_ind) - { - part_info->get_partition_id= get_partition_id_list_sub_linear_key; part_info->get_subpartition_id= get_partition_id_linear_key_sub; - } else - { - part_info->get_partition_id= get_partition_id_list_sub_key; part_info->get_subpartition_id= get_partition_id_key_sub; - } } else { if (part_info->linear_hash_ind) - { - part_info->get_partition_id= get_partition_id_list_sub_linear_hash; part_info->get_subpartition_id= get_partition_id_linear_hash_sub; - } else - { - part_info->get_partition_id= get_partition_id_list_sub_hash; part_info->get_subpartition_id= get_partition_id_hash_sub; - } } } } @@ -1353,9 +1397,19 @@ static void set_up_partition_func_pointers(partition_info *part_info) part_info->get_part_partition_id= NULL; part_info->get_subpartition_id= NULL; if (part_info->part_type == RANGE_PARTITION) - part_info->get_partition_id= get_partition_id_range; + { + if (part_info->column_list) + part_info->get_partition_id= get_partition_id_range_col; + else + part_info->get_partition_id= get_partition_id_range; + } else if (part_info->part_type == LIST_PARTITION) - part_info->get_partition_id= get_partition_id_list; + { + if (part_info->column_list) + part_info->get_partition_id= get_partition_id_list_col; + else + part_info->get_partition_id= get_partition_id_list; + } else /* HASH partitioning */ { if (part_info->list_of_part_fields) @@ -1374,32 +1428,43 @@ static void set_up_partition_func_pointers(partition_info *part_info) } } } - if (part_info->full_part_charset_field_array) - { - DBUG_ASSERT(part_info->get_partition_id); - part_info->get_partition_id_charset= part_info->get_partition_id; - if (part_info->part_charset_field_array && - part_info->subpart_charset_field_array) - part_info->get_partition_id= get_part_id_charset_func_all; - else if (part_info->part_charset_field_array) - part_info->get_partition_id= get_part_id_charset_func_part; - else - part_info->get_partition_id= get_part_id_charset_func_subpart; - } - if (part_info->part_charset_field_array && - part_info->is_sub_partitioned()) + /* + We need special functions to handle character sets since they require copy + of field pointers and restore afterwards. For subpartitioned tables we do + the copy and restore individually on the part and subpart parts. For non- + subpartitioned tables we use the same functions as used for the parts part + of subpartioning. + Thus for subpartitioned tables the get_partition_id is always + get_partition_id_with_sub, even when character sets exists. + */ + if (part_info->part_charset_field_array) { - DBUG_ASSERT(part_info->get_part_partition_id); - part_info->get_part_partition_id_charset= + if (part_info->is_sub_partitioned()) + { + DBUG_ASSERT(part_info->get_part_partition_id); + if (!part_info->column_list) + { + part_info->get_part_partition_id_charset= part_info->get_part_partition_id; - part_info->get_part_partition_id= get_part_part_id_charset_func; + part_info->get_part_partition_id= get_part_id_charset_func_part; + } + } + else + { + DBUG_ASSERT(part_info->get_partition_id); + if (!part_info->column_list) + { + part_info->get_part_partition_id_charset= part_info->get_partition_id; + part_info->get_part_partition_id= get_part_id_charset_func_part; + } + } } if (part_info->subpart_charset_field_array) { DBUG_ASSERT(part_info->get_subpartition_id); part_info->get_subpartition_id_charset= part_info->get_subpartition_id; - part_info->get_subpartition_id= get_subpart_id_charset_func; + part_info->get_subpartition_id= get_part_id_charset_func_subpart; } DBUG_VOID_RETURN; } @@ -1407,22 +1472,22 @@ static void set_up_partition_func_pointers(partition_info *part_info) /* For linear hashing we need a mask which is on the form 2**n - 1 where - 2**n >= no_parts. Thus if no_parts is 6 then mask is 2**3 - 1 = 8 - 1 = 7. + 2**n >= num_parts. Thus if num_parts is 6 then mask is 2**3 - 1 = 8 - 1 = 7. SYNOPSIS set_linear_hash_mask() part_info Reference to partitioning data structure - no_parts Number of parts in linear hash partitioning + num_parts Number of parts in linear hash partitioning RETURN VALUE NONE */ -void set_linear_hash_mask(partition_info *part_info, uint no_parts) +void set_linear_hash_mask(partition_info *part_info, uint num_parts) { uint mask; - for (mask= 1; mask < no_parts; mask<<=1) + for (mask= 1; mask < num_parts; mask<<=1) ; part_info->linear_hash_mask= mask - 1; } @@ -1436,7 +1501,7 @@ void set_linear_hash_mask(partition_info *part_info, uint no_parts) get_part_id_from_linear_hash() hash_value Hash value calculated by HASH function or KEY function mask Mask calculated previously by set_linear_hash_mask - no_parts Number of partitions in HASH partitioned part + num_parts Number of partitions in HASH partitioned part RETURN VALUE part_id The calculated partition identity (starting at 0) @@ -1449,11 +1514,11 @@ void set_linear_hash_mask(partition_info *part_info, uint no_parts) */ static uint32 get_part_id_from_linear_hash(longlong hash_value, uint mask, - uint no_parts) + uint num_parts) { uint32 part_id= (uint32)(hash_value & mask); - if (part_id >= no_parts) + if (part_id >= num_parts) { uint new_mask= ((mask + 1) >> 1) - 1; part_id= (uint32)(hash_value & new_mask); @@ -1597,7 +1662,7 @@ bool fix_partition_func(THD *thd, TABLE *table, function is correct. */ if (part_info->linear_hash_ind) - set_linear_hash_mask(part_info, part_info->no_subparts); + set_linear_hash_mask(part_info, part_info->num_subparts); if (part_info->list_of_subpart_fields) { List_iterator<char> it(part_info->subpart_field_list); @@ -1607,12 +1672,12 @@ bool fix_partition_func(THD *thd, TABLE *table, else { if (unlikely(fix_fields_part_func(thd, part_info->subpart_expr, - table, TRUE, TRUE))) + table, TRUE))) goto end; if (unlikely(part_info->subpart_expr->result_type() != INT_RESULT)) { - my_error(ER_PARTITION_FUNC_NOT_ALLOWED_ERROR, MYF(0), - "SUBPARTITION"); + my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0), + subpart_str); goto end; } } @@ -1625,7 +1690,7 @@ bool fix_partition_func(THD *thd, TABLE *table, if (part_info->part_type == HASH_PARTITION) { if (part_info->linear_hash_ind) - set_linear_hash_mask(part_info, part_info->no_parts); + set_linear_hash_mask(part_info, part_info->num_parts); if (part_info->list_of_part_fields) { List_iterator<char> it(part_info->part_field_list); @@ -1635,32 +1700,43 @@ bool fix_partition_func(THD *thd, TABLE *table, else { if (unlikely(fix_fields_part_func(thd, part_info->part_expr, - table, FALSE, TRUE))) + table, FALSE))) goto end; if (unlikely(part_info->part_expr->result_type() != INT_RESULT)) { - my_error(ER_PARTITION_FUNC_NOT_ALLOWED_ERROR, MYF(0), part_str); + my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0), part_str); goto end; } part_info->part_result_type= INT_RESULT; } + part_info->fixed= TRUE; } else { const char *error_str; - if (unlikely(fix_fields_part_func(thd, part_info->part_expr, - table, FALSE, TRUE))) - goto end; + if (part_info->column_list) + { + List_iterator<char> it(part_info->part_field_list); + if (unlikely(handle_list_of_fields(it, table, part_info, FALSE))) + goto end; + } + else + { + if (unlikely(fix_fields_part_func(thd, part_info->part_expr, + table, FALSE))) + goto end; + } + part_info->fixed= TRUE; if (part_info->part_type == RANGE_PARTITION) { error_str= partition_keywords[PKW_RANGE].str; - if (unlikely(part_info->check_range_constants())) + if (unlikely(part_info->check_range_constants(thd))) goto end; } else if (part_info->part_type == LIST_PARTITION) { error_str= partition_keywords[PKW_LIST].str; - if (unlikely(part_info->check_list_constants())) + if (unlikely(part_info->check_list_constants(thd))) goto end; } else @@ -1669,12 +1745,13 @@ bool fix_partition_func(THD *thd, TABLE *table, my_error(ER_INCONSISTENT_PARTITION_INFO_ERROR, MYF(0)); goto end; } - if (unlikely(part_info->no_parts < 1)) + if (unlikely(part_info->num_parts < 1)) { my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), error_str); goto end; } - if (unlikely(part_info->part_expr->result_type() != INT_RESULT)) + if (unlikely(!part_info->column_list && + part_info->part_expr->result_type() != INT_RESULT)) { my_error(ER_PARTITION_FUNC_NOT_ALLOWED_ERROR, MYF(0), part_str); goto end; @@ -1682,7 +1759,8 @@ bool fix_partition_func(THD *thd, TABLE *table, } if (((part_info->part_type != HASH_PARTITION || part_info->list_of_part_fields == FALSE) && - check_part_func_fields(part_info->part_field_array, TRUE)) || + (!part_info->column_list && + check_part_func_fields(part_info->part_field_array, TRUE))) || (part_info->list_of_part_fields == FALSE && part_info->is_sub_partitioned() && check_part_func_fields(part_info->subpart_field_array, TRUE))) @@ -1705,6 +1783,11 @@ bool fix_partition_func(THD *thd, TABLE *table, my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0)); goto end; } + if (unlikely(part_info->check_partition_field_length())) + { + my_error(ER_PARTITION_FIELDS_TOO_LONG, MYF(0)); + goto end; + } check_range_capable_PF(table); set_up_partition_key_maps(table, part_info); set_up_partition_func_pointers(part_info); @@ -1727,9 +1810,9 @@ end: static int add_write(File fptr, const char *buf, uint len) { - uint len_written= my_write(fptr, (const uchar*)buf, len, MYF(0)); + uint ret_code= my_write(fptr, (const uchar*)buf, len, MYF(MY_FNABP)); - if (likely(len == len_written)) + if (likely(ret_code == 0)) return 0; else return 1; @@ -1778,14 +1861,8 @@ static int add_begin_parenthesis(File fptr) static int add_part_key_word(File fptr, const char *key_string) { int err= add_string(fptr, key_string); - err+= add_space(fptr); - return err + add_begin_parenthesis(fptr); -} - -static int add_hash(File fptr) -{ - return add_part_key_word(fptr, partition_keywords[PKW_HASH].str); + return err; } static int add_partition(File fptr) @@ -1816,16 +1893,16 @@ static int add_subpartition_by(File fptr) return err + add_partition_by(fptr); } -static int add_key_partition(File fptr, List<char> field_list) +static int add_part_field_list(File fptr, List<char> field_list) { - uint i, no_fields; - int err; + uint i, num_fields; + int err= 0; List_iterator<char> part_it(field_list); - err= add_part_key_word(fptr, partition_keywords[PKW_KEY].str); - no_fields= field_list.elements; + num_fields= field_list.elements; i= 0; - while (i < no_fields) + err+= add_begin_parenthesis(fptr); + while (i < num_fields) { const char *field_str= part_it++; String field_string("", 0, system_charset_info); @@ -1836,10 +1913,11 @@ static int add_key_partition(File fptr, List<char> field_list) strlen(field_str)); thd->options= save_options; err+= add_string_object(fptr, &field_string); - if (i != (no_fields-1)) + if (i != (num_fields-1)) err+= add_comma(fptr); i++; } + err+= add_end_parenthesis(fptr); return err; } @@ -1949,37 +2027,200 @@ static int add_partition_options(File fptr, partition_element *p_elem) return err + add_engine(fptr,p_elem->engine_type); } -static int add_partition_values(File fptr, partition_info *part_info, partition_element *p_elem) +static int check_part_field(Create_field *sql_field, + bool *need_cs_check) +{ + *need_cs_check= FALSE; + if (sql_field->sql_type == MYSQL_TYPE_TIMESTAMP) + goto error; + if (sql_field->sql_type < MYSQL_TYPE_VARCHAR || + sql_field->sql_type == MYSQL_TYPE_NEWDECIMAL) + return FALSE; + if (sql_field->sql_type >= MYSQL_TYPE_TINY_BLOB && + sql_field->sql_type <= MYSQL_TYPE_BLOB) + { + my_error(ER_BLOB_FIELD_IN_PART_FUNC_ERROR, MYF(0)); + return TRUE; + } + switch (sql_field->sql_type) + { + case MYSQL_TYPE_VARCHAR: + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_VAR_STRING: + *need_cs_check= TRUE; + return FALSE; + break; + default: + goto error; + } +error: + my_error(ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD, MYF(0), + sql_field->field_name); + return TRUE; +} + +static Create_field* get_sql_field(char *field_name, + Alter_info *alter_info) +{ + List_iterator<Create_field> it(alter_info->create_list); + Create_field *sql_field; + DBUG_ENTER("get_sql_field"); + + while ((sql_field= it++)) + { + if (!(my_strcasecmp(system_charset_info, + sql_field->field_name, + field_name))) + { + DBUG_RETURN(sql_field); + } + } + DBUG_RETURN(NULL); +} + +static int add_column_list_values(File fptr, partition_info *part_info, + part_elem_value *list_value, + HA_CREATE_INFO *create_info, + Alter_info *alter_info) +{ + int err= 0; + uint i; + List_iterator<char> it(part_info->part_field_list); + uint num_elements= part_info->part_field_list.elements; + bool use_parenthesis= (part_info->part_type == LIST_PARTITION && + part_info->num_columns > 1U); + + if (use_parenthesis) + err+= add_begin_parenthesis(fptr); + for (i= 0; i < num_elements; i++) + { + part_column_list_val *col_val= &list_value->col_val_array[i]; + char *field_name= it++; + if (col_val->max_value) + err+= add_string(fptr, partition_keywords[PKW_MAXVALUE].str); + else if (col_val->null_value) + err+= add_string(fptr, "NULL"); + else + { + char buffer[MAX_KEY_LENGTH]; + String str(buffer, sizeof(buffer), &my_charset_bin); + Item *item_expr= col_val->item_expression; + if (item_expr->null_value) + err+= add_string(fptr, "NULL"); + else + { + String *res; + CHARSET_INFO *field_cs; + + /* + This function is called at a very early stage, even before + we have prepared the sql_field objects. Thus we have to + find the proper sql_field object and get the character set + from that object. + */ + if (create_info) + { + Create_field *sql_field; + bool need_cs_check= FALSE; + + if (!(sql_field= get_sql_field(field_name, + alter_info))) + { + my_error(ER_FIELD_NOT_FOUND_PART_ERROR, MYF(0)); + return 1; + } + if (check_part_field(sql_field, &need_cs_check)) + return 1; + if (need_cs_check) + field_cs= get_sql_field_charset(sql_field, create_info); + else + field_cs= NULL; + } + else + field_cs= part_info->part_field_array[i]->charset(); + if (field_cs && field_cs != item_expr->collation.collation) + { + if (!(item_expr= convert_charset_partition_constant(item_expr, + field_cs))) + { + my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0)); + return 1; + } + } + res= item_expr->val_str(&str); + if (!res) + { + my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0)); + return 1; + } + if (item_expr->result_type() == STRING_RESULT) + { + if (field_cs) + { + err+= add_string(fptr,"_"); + err+= add_string(fptr, field_cs->csname); + } + err+= add_string(fptr,"'"); + } + err+= add_string_object(fptr, res); + if (item_expr->result_type() == STRING_RESULT) + err+= add_string(fptr,"'"); + } + } + if (i != (num_elements - 1)) + err+= add_string(fptr, comma_str); + } + if (use_parenthesis) + err+= add_end_parenthesis(fptr); + return err; +} + +static int add_partition_values(File fptr, partition_info *part_info, + partition_element *p_elem, + HA_CREATE_INFO *create_info, + Alter_info *alter_info) { int err= 0; if (part_info->part_type == RANGE_PARTITION) { err+= add_string(fptr, " VALUES LESS THAN "); - if (!p_elem->max_value) + if (part_info->column_list) { + List_iterator<part_elem_value> list_val_it(p_elem->list_val_list); + part_elem_value *list_value= list_val_it++; err+= add_begin_parenthesis(fptr); - if (p_elem->signed_flag) - err+= add_int(fptr, p_elem->range_value); - else - err+= add_uint(fptr, p_elem->range_value); + err+= add_column_list_values(fptr, part_info, list_value, + create_info, alter_info); err+= add_end_parenthesis(fptr); } else - err+= add_string(fptr, partition_keywords[PKW_MAXVALUE].str); + { + if (!p_elem->max_value) + { + err+= add_begin_parenthesis(fptr); + if (p_elem->signed_flag) + err+= add_int(fptr, p_elem->range_value); + else + err+= add_uint(fptr, p_elem->range_value); + err+= add_end_parenthesis(fptr); + } + else + err+= add_string(fptr, partition_keywords[PKW_MAXVALUE].str); + } } else if (part_info->part_type == LIST_PARTITION) { uint i; List_iterator<part_elem_value> list_val_it(p_elem->list_val_list); err+= add_string(fptr, " VALUES IN "); - uint no_items= p_elem->list_val_list.elements; + uint num_items= p_elem->list_val_list.elements; err+= add_begin_parenthesis(fptr); if (p_elem->has_null_value) { err+= add_string(fptr, "NULL"); - if (no_items == 0) + if (num_items == 0) { err+= add_end_parenthesis(fptr); goto end; @@ -1991,13 +2232,19 @@ static int add_partition_values(File fptr, partition_info *part_info, partition_ { part_elem_value *list_value= list_val_it++; - if (!list_value->unsigned_flag) - err+= add_int(fptr, list_value->value); + if (part_info->column_list) + err+= add_column_list_values(fptr, part_info, list_value, + create_info, alter_info); else - err+= add_uint(fptr, list_value->value); - if (i != (no_items-1)) + { + if (!list_value->unsigned_flag) + err+= add_int(fptr, list_value->value); + else + err+= add_uint(fptr, list_value->value); + } + if (i != (num_items-1)) err+= add_comma(fptr); - } while (++i < no_items); + } while (++i < num_items); err+= add_end_parenthesis(fptr); } end: @@ -2016,6 +2263,8 @@ end: use_sql_alloc Allocate buffer from sql_alloc if true otherwise use my_malloc show_partition_options Should we display partition options + create_info Info generated by parser + alter_info Info generated by parser RETURN VALUES NULL error @@ -2044,9 +2293,11 @@ end: char *generate_partition_syntax(partition_info *part_info, uint *buf_length, bool use_sql_alloc, - bool show_partition_options) + bool show_partition_options, + HA_CREATE_INFO *create_info, + Alter_info *alter_info) { - uint i,j, tot_no_parts, no_subparts; + uint i,j, tot_num_parts, num_subparts; partition_element *part_elem; ulonglong buffer_length; char path[FN_REFLEN]; @@ -2077,9 +2328,12 @@ char *generate_partition_syntax(partition_info *part_info, if (part_info->linear_hash_ind) err+= add_string(fptr, partition_keywords[PKW_LINEAR].str); if (part_info->list_of_part_fields) - err+= add_key_partition(fptr, part_info->part_field_list); + { + err+= add_part_key_word(fptr, partition_keywords[PKW_KEY].str); + err+= add_part_field_list(fptr, part_info->part_field_list); + } else - err+= add_hash(fptr); + err+= add_part_key_word(fptr, partition_keywords[PKW_HASH].str); break; default: DBUG_ASSERT(0); @@ -2089,15 +2343,23 @@ char *generate_partition_syntax(partition_info *part_info, DBUG_RETURN(NULL); } if (part_info->part_expr) + { + err+= add_begin_parenthesis(fptr); err+= add_string_len(fptr, part_info->part_func_string, part_info->part_func_len); - err+= add_end_parenthesis(fptr); - if ((!part_info->use_default_no_partitions) && + err+= add_end_parenthesis(fptr); + } + else if (part_info->column_list) + { + err+= add_string(fptr, partition_keywords[PKW_COLUMNS].str); + err+= add_part_field_list(fptr, part_info->part_field_list); + } + if ((!part_info->use_default_num_partitions) && part_info->use_default_partitions) { err+= add_string(fptr, "\n"); err+= add_string(fptr, "PARTITIONS "); - err+= add_int(fptr, part_info->no_parts); + err+= add_int(fptr, part_info->num_parts); } if (part_info->is_sub_partitioned()) { @@ -2107,23 +2369,29 @@ char *generate_partition_syntax(partition_info *part_info, if (part_info->linear_hash_ind) err+= add_string(fptr, partition_keywords[PKW_LINEAR].str); if (part_info->list_of_subpart_fields) - err+= add_key_partition(fptr, part_info->subpart_field_list); + { + add_part_key_word(fptr, partition_keywords[PKW_KEY].str); + add_part_field_list(fptr, part_info->subpart_field_list); + } else - err+= add_hash(fptr); + err+= add_part_key_word(fptr, partition_keywords[PKW_HASH].str); if (part_info->subpart_expr) + { + err+= add_begin_parenthesis(fptr); err+= add_string_len(fptr, part_info->subpart_func_string, part_info->subpart_func_len); - err+= add_end_parenthesis(fptr); - if ((!part_info->use_default_no_subpartitions) && + err+= add_end_parenthesis(fptr); + } + if ((!part_info->use_default_num_subpartitions) && part_info->use_default_subpartitions) { err+= add_string(fptr, "\n"); err+= add_string(fptr, "SUBPARTITIONS "); - err+= add_int(fptr, part_info->no_subparts); + err+= add_int(fptr, part_info->num_subparts); } } - tot_no_parts= part_info->partitions.elements; - no_subparts= part_info->no_subparts; + tot_num_parts= part_info->partitions.elements; + num_subparts= part_info->num_subparts; if (!part_info->use_default_partitions) { @@ -2146,7 +2414,8 @@ char *generate_partition_syntax(partition_info *part_info, first= FALSE; err+= add_partition(fptr); err+= add_name_string(fptr, part_elem->partition_name); - err+= add_partition_values(fptr, part_info, part_elem); + err+= add_partition_values(fptr, part_info, part_elem, + create_info, alter_info); if (!part_info->is_sub_partitioned() || part_info->use_default_subpartitions) { @@ -2167,7 +2436,7 @@ char *generate_partition_syntax(partition_info *part_info, err+= add_name_string(fptr, part_elem->partition_name); if (show_partition_options) err+= add_partition_options(fptr, part_elem); - if (j != (no_subparts-1)) + if (j != (num_subparts-1)) { err+= add_comma(fptr); err+= add_string(fptr, "\n"); @@ -2176,12 +2445,12 @@ char *generate_partition_syntax(partition_info *part_info, } else err+= add_end_parenthesis(fptr); - } while (++j < no_subparts); + } while (++j < num_subparts); } } - if (i == (tot_no_parts-1)) + if (i == (tot_num_parts-1)) err+= add_end_parenthesis(fptr); - } while (++i < tot_no_parts); + } while (++i < tot_num_parts); } if (err) goto close_file; @@ -2328,14 +2597,14 @@ static uint32 calculate_key_value(Field **field_array) get_part_id_for_sub() loc_part_id Local partition id sub_part_id Subpartition id - no_subparts Number of subparts + num_subparts Number of subparts */ inline static uint32 get_part_id_for_sub(uint32 loc_part_id, uint32 sub_part_id, - uint no_subparts) + uint num_subparts) { - return (uint32)((loc_part_id * no_subparts) + sub_part_id); + return (uint32)((loc_part_id * num_subparts) + sub_part_id); } @@ -2344,7 +2613,7 @@ static uint32 get_part_id_for_sub(uint32 loc_part_id, uint32 sub_part_id, SYNOPSIS get_part_id_hash() - no_parts Number of hash partitions + num_parts Number of hash partitions part_expr Item tree of hash function out:part_id The returned partition id out:func_value Value of hash function @@ -2354,7 +2623,7 @@ static uint32 get_part_id_for_sub(uint32 loc_part_id, uint32 sub_part_id, FALSE Success */ -static int get_part_id_hash(uint no_parts, +static int get_part_id_hash(uint num_parts, Item *part_expr, uint32 *part_id, longlong *func_value) @@ -2365,7 +2634,7 @@ static int get_part_id_hash(uint no_parts, if (part_val_int(part_expr, func_value)) DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND); - int_hash_id= *func_value % no_parts; + int_hash_id= *func_value % num_parts; *part_id= int_hash_id < 0 ? (uint32) -int_hash_id : (uint32) int_hash_id; DBUG_RETURN(FALSE); @@ -2379,7 +2648,7 @@ static int get_part_id_hash(uint no_parts, get_part_id_linear_hash() part_info A reference to the partition_info struct where all the desired information is given - no_parts Number of hash partitions + num_parts Number of hash partitions part_expr Item tree of hash function out:part_id The returned partition id out:func_value Value of hash function @@ -2390,7 +2659,7 @@ static int get_part_id_hash(uint no_parts, */ static int get_part_id_linear_hash(partition_info *part_info, - uint no_parts, + uint num_parts, Item *part_expr, uint32 *part_id, longlong *func_value) @@ -2402,7 +2671,7 @@ static int get_part_id_linear_hash(partition_info *part_info, *part_id= get_part_id_from_linear_hash(*func_value, part_info->linear_hash_mask, - no_parts); + num_parts); DBUG_RETURN(FALSE); } @@ -2413,7 +2682,7 @@ static int get_part_id_linear_hash(partition_info *part_info, SYNOPSIS get_part_id_key() field_array Array of fields for PARTTION KEY - no_parts Number of KEY partitions + num_parts Number of KEY partitions RETURN VALUE Calculated partition id @@ -2421,12 +2690,12 @@ static int get_part_id_linear_hash(partition_info *part_info, inline static uint32 get_part_id_key(Field **field_array, - uint no_parts, + uint num_parts, longlong *func_value) { DBUG_ENTER("get_part_id_key"); *func_value= calculate_key_value(field_array); - DBUG_RETURN((uint32) (*func_value % no_parts)); + DBUG_RETURN((uint32) (*func_value % num_parts)); } @@ -2438,7 +2707,7 @@ static uint32 get_part_id_key(Field **field_array, part_info A reference to the partition_info struct where all the desired information is given field_array Array of fields for PARTTION KEY - no_parts Number of KEY partitions + num_parts Number of KEY partitions RETURN VALUE Calculated partition id @@ -2447,15 +2716,15 @@ static uint32 get_part_id_key(Field **field_array, inline static uint32 get_part_id_linear_key(partition_info *part_info, Field **field_array, - uint no_parts, + uint num_parts, longlong *func_value) { - DBUG_ENTER("get_partition_id_linear_key"); + DBUG_ENTER("get_part_id_linear_key"); *func_value= calculate_key_value(field_array); DBUG_RETURN(get_part_id_from_linear_hash(*func_value, part_info->linear_hash_mask, - no_parts)); + num_parts)); } /* @@ -2489,7 +2758,8 @@ static void copy_to_part_field_buffers(Field **ptr, if (!field->maybe_null() || !field->is_null()) { CHARSET_INFO *cs= ((Field_str*)field)->charset(); - uint len= field->pack_length(); + uint max_len= field->pack_length(); + uint data_len= field->data_length(); uchar *field_buf= *field_bufs; /* We only use the field buffer for VARCHAR and CHAR strings @@ -2501,17 +2771,17 @@ static void copy_to_part_field_buffers(Field **ptr, if (field->type() == MYSQL_TYPE_VARCHAR) { uint len_bytes= ((Field_varstring*)field)->length_bytes; - my_strnxfrm(cs, field_buf + len_bytes, (len - len_bytes), - field->ptr + len_bytes, field->field_length); + my_strnxfrm(cs, field_buf + len_bytes, max_len, + field->ptr + len_bytes, data_len); if (len_bytes == 1) - *field_buf= (uchar) field->field_length; + *field_buf= (uchar) data_len; else - int2store(field_buf, field->field_length); + int2store(field_buf, data_len); } else { - my_strnxfrm(cs, field_buf, len, - field->ptr, field->field_length); + my_strnxfrm(cs, field_buf, max_len, + field->ptr, max_len); } field->ptr= field_buf; } @@ -2541,6 +2811,44 @@ static void restore_part_field_pointers(Field **ptr, uchar **restore_ptr) return; } +/* + This function is used to calculate the partition id where all partition + fields have been prepared to point to a record where the partition field + values are bound. + + SYNOPSIS + get_partition_id() + part_info A reference to the partition_info struct where all the + desired information is given + out:part_id The partition id is returned through this pointer + out:func_value Value of partition function (longlong) + + RETURN VALUE + part_id Partition id of partition that would contain + row with given values of PF-fields + HA_ERR_NO_PARTITION_FOUND The fields of the partition function didn't + fit into any partition and thus the values of + the PF-fields are not allowed. + + DESCRIPTION + A routine used from write_row, update_row and delete_row from any + handler supporting partitioning. It is also a support routine for + get_partition_set used to find the set of partitions needed to scan + for a certain index scan or full table scan. + + It is actually 9 different variants of this function which are called + through a function pointer. + + get_partition_id_list + get_partition_id_list_col + get_partition_id_range + get_partition_id_range_col + get_partition_id_hash_nosub + get_partition_id_key_nosub + get_partition_id_linear_hash_nosub + get_partition_id_linear_key_nosub + get_partition_id_with_sub +*/ /* This function is used to calculate the main partition to use in the case of @@ -2562,67 +2870,26 @@ static void restore_part_field_pointers(Field **ptr, uchar **restore_ptr) DESCRIPTION - It is actually 6 different variants of this function which are called + It is actually 8 different variants of this function which are called through a function pointer. get_partition_id_list + get_partition_id_list_col get_partition_id_range + get_partition_id_range_col get_partition_id_hash_nosub get_partition_id_key_nosub get_partition_id_linear_hash_nosub get_partition_id_linear_key_nosub */ -static int get_part_id_charset_func_subpart(partition_info *part_info, - uint32 *part_id, - longlong *func_value) -{ - int res; - copy_to_part_field_buffers(part_info->subpart_charset_field_array, - part_info->subpart_field_buffers, - part_info->restore_subpart_field_ptrs); - res= part_info->get_partition_id_charset(part_info, part_id, func_value); - restore_part_field_pointers(part_info->subpart_charset_field_array, - part_info->restore_subpart_field_ptrs); - return res; -} - - static int get_part_id_charset_func_part(partition_info *part_info, uint32 *part_id, longlong *func_value) { int res; - copy_to_part_field_buffers(part_info->part_charset_field_array, - part_info->part_field_buffers, - part_info->restore_part_field_ptrs); - res= part_info->get_partition_id_charset(part_info, part_id, func_value); - restore_part_field_pointers(part_info->part_charset_field_array, - part_info->restore_part_field_ptrs); - return res; -} + DBUG_ENTER("get_part_id_charset_func_part"); - -static int get_part_id_charset_func_all(partition_info *part_info, - uint32 *part_id, - longlong *func_value) -{ - int res; - copy_to_part_field_buffers(part_info->full_part_field_array, - part_info->full_part_field_buffers, - part_info->restore_full_part_field_ptrs); - res= part_info->get_partition_id_charset(part_info, part_id, func_value); - restore_part_field_pointers(part_info->full_part_field_array, - part_info->restore_full_part_field_ptrs); - return res; -} - - -static int get_part_part_id_charset_func(partition_info *part_info, - uint32 *part_id, - longlong *func_value) -{ - int res; copy_to_part_field_buffers(part_info->part_charset_field_array, part_info->part_field_buffers, part_info->restore_part_field_ptrs); @@ -2630,21 +2897,58 @@ static int get_part_part_id_charset_func(partition_info *part_info, part_id, func_value); restore_part_field_pointers(part_info->part_charset_field_array, part_info->restore_part_field_ptrs); - return res; + DBUG_RETURN(res); } -static int get_subpart_id_charset_func(partition_info *part_info, - uint32 *part_id) +static int get_part_id_charset_func_subpart(partition_info *part_info, + uint32 *part_id) { int res; + DBUG_ENTER("get_part_id_charset_func_subpart"); + copy_to_part_field_buffers(part_info->subpart_charset_field_array, part_info->subpart_field_buffers, part_info->restore_subpart_field_ptrs); res= part_info->get_subpartition_id_charset(part_info, part_id); restore_part_field_pointers(part_info->subpart_charset_field_array, part_info->restore_subpart_field_ptrs); - return res; + DBUG_RETURN(res); +} + +int get_partition_id_list_col(partition_info *part_info, + uint32 *part_id, + longlong *func_value) +{ + part_column_list_val *list_col_array= part_info->list_col_array; + uint num_columns= part_info->part_field_list.elements; + int list_index, cmp; + int min_list_index= 0; + int max_list_index= part_info->num_list_values - 1; + DBUG_ENTER("get_partition_id_list_col"); + + while (max_list_index >= min_list_index) + { + list_index= (max_list_index + min_list_index) >> 1; + cmp= cmp_rec_and_tuple(list_col_array + list_index*num_columns, + num_columns); + if (cmp > 0) + min_list_index= list_index + 1; + else if (cmp < 0) + { + if (!list_index) + goto notfound; + max_list_index= list_index - 1; + } + else + { + *part_id= (uint32)list_col_array[list_index].partition_id; + DBUG_RETURN(0); + } + } +notfound: + *part_id= 0; + DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND); } @@ -2655,7 +2959,7 @@ int get_partition_id_list(partition_info *part_info, LIST_PART_ENTRY *list_array= part_info->list_array; int list_index; int min_list_index= 0; - int max_list_index= part_info->no_list_values - 1; + int max_list_index= part_info->num_list_values - 1; longlong part_func_value; int error= part_val_int(part_info->part_expr, &part_func_value); longlong list_value; @@ -2725,7 +3029,7 @@ notfound: index idx. The function returns first number idx, such that list_array[idx].list_value is NOT contained within the passed interval. - If all array elements are contained, part_info->no_list_values is + If all array elements are contained, part_info->num_list_values is returned. NOTE @@ -2739,6 +3043,44 @@ notfound: The edge of corresponding sub-array of part_info->list_array */ +uint32 get_partition_id_cols_list_for_endpoint(partition_info *part_info, + bool left_endpoint, + bool include_endpoint, + uint32 nparts) +{ + part_column_list_val *list_col_array= part_info->list_col_array; + uint num_columns= part_info->part_field_list.elements; + int list_index, cmp; + uint min_list_index= 0; + uint max_list_index= part_info->num_list_values - 1; + bool tailf= !(left_endpoint ^ include_endpoint); + DBUG_ENTER("get_partition_id_cols_list_for_endpoint"); + + do + { + list_index= (max_list_index + min_list_index) >> 1; + cmp= cmp_rec_and_tuple_prune(list_col_array + list_index*num_columns, + nparts, tailf); + if (cmp > 0) + min_list_index= list_index + 1; + else if (cmp < 0) + { + if (!list_index) + goto notfound; + max_list_index= list_index - 1; + } + else + { + DBUG_RETURN(list_index + test(!tailf)); + } + } while (max_list_index >= min_list_index); + if (cmp > 0) + list_index++; +notfound: + DBUG_RETURN(list_index); +} + + uint32 get_list_array_idx_for_endpoint_charset(partition_info *part_info, bool left_endpoint, bool include_endpoint) @@ -2760,7 +3102,7 @@ uint32 get_list_array_idx_for_endpoint(partition_info *part_info, { LIST_PART_ENTRY *list_array= part_info->list_array; uint list_index; - uint min_list_index= 0, max_list_index= part_info->no_list_values - 1; + uint min_list_index= 0, max_list_index= part_info->num_list_values - 1; longlong list_value; /* Get the partitioning function value for the endpoint */ longlong part_func_value= @@ -2790,7 +3132,7 @@ uint32 get_list_array_idx_for_endpoint(partition_info *part_info, if (unsigned_flag) part_func_value-= 0x8000000000000000ULL; - DBUG_ASSERT(part_info->no_list_values); + DBUG_ASSERT(part_info->num_list_values); do { list_index= (max_list_index + min_list_index) >> 1; @@ -2815,12 +3157,49 @@ notfound: } +int get_partition_id_range_col(partition_info *part_info, + uint32 *part_id, + longlong *func_value) +{ + part_column_list_val *range_col_array= part_info->range_col_array; + uint num_columns= part_info->part_field_list.elements; + uint max_partition= part_info->num_parts - 1; + uint min_part_id= 0; + uint max_part_id= max_partition; + uint loc_part_id; + DBUG_ENTER("get_partition_id_range_col"); + + while (max_part_id > min_part_id) + { + loc_part_id= (max_part_id + min_part_id + 1) >> 1; + if (cmp_rec_and_tuple(range_col_array + loc_part_id*num_columns, + num_columns) >= 0) + min_part_id= loc_part_id + 1; + else + max_part_id= loc_part_id - 1; + } + loc_part_id= max_part_id; + if (loc_part_id != max_partition) + if (cmp_rec_and_tuple(range_col_array + loc_part_id*num_columns, + num_columns) >= 0) + loc_part_id++; + *part_id= (uint32)loc_part_id; + if (loc_part_id == max_partition && + (cmp_rec_and_tuple(range_col_array + loc_part_id*num_columns, + num_columns) >= 0)) + DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND); + + DBUG_PRINT("exit",("partition: %d", *part_id)); + DBUG_RETURN(0); +} + + int get_partition_id_range(partition_info *part_info, uint32 *part_id, longlong *func_value) { longlong *range_array= part_info->range_int_array; - uint max_partition= part_info->no_parts - 1; + uint max_partition= part_info->num_parts - 1; uint min_part_id= 0; uint max_part_id= max_partition; uint loc_part_id; @@ -2897,7 +3276,7 @@ int get_partition_id_range(partition_info *part_info, represented by range_int_array[idx] has EMPTY intersection with the passed interval. If the interval represented by the last array element has non-empty - intersection with the passed interval, part_info->no_parts is + intersection with the passed interval, part_info->num_parts is returned. RETURN @@ -2925,7 +3304,7 @@ uint32 get_partition_id_range_for_endpoint(partition_info *part_info, bool include_endpoint) { longlong *range_array= part_info->range_int_array; - uint max_partition= part_info->no_parts - 1; + uint max_partition= part_info->num_parts - 1; uint min_part_id= 0, max_part_id= max_partition, loc_part_id; /* Get the partitioning function value for the endpoint */ longlong part_func_value= @@ -3008,7 +3387,7 @@ int get_partition_id_hash_nosub(partition_info *part_info, uint32 *part_id, longlong *func_value) { - return get_part_id_hash(part_info->no_parts, part_info->part_expr, + return get_part_id_hash(part_info->num_parts, part_info->part_expr, part_id, func_value); } @@ -3017,7 +3396,7 @@ int get_partition_id_linear_hash_nosub(partition_info *part_info, uint32 *part_id, longlong *func_value) { - return get_part_id_linear_hash(part_info, part_info->no_parts, + return get_part_id_linear_hash(part_info, part_info->num_parts, part_info->part_expr, part_id, func_value); } @@ -3027,232 +3406,44 @@ int get_partition_id_key_nosub(partition_info *part_info, longlong *func_value) { *part_id= get_part_id_key(part_info->part_field_array, - part_info->no_parts, func_value); + part_info->num_parts, func_value); return 0; } int get_partition_id_linear_key_nosub(partition_info *part_info, - uint32 *part_id, - longlong *func_value) + uint32 *part_id, + longlong *func_value) { *part_id= get_part_id_linear_key(part_info, part_info->part_field_array, - part_info->no_parts, func_value); + part_info->num_parts, func_value); return 0; } -int get_partition_id_range_sub_hash(partition_info *part_info, - uint32 *part_id, - longlong *func_value) -{ - uint32 loc_part_id, sub_part_id; - uint no_subparts; - longlong local_func_value; - int error; - DBUG_ENTER("get_partition_id_range_sub_hash"); - LINT_INIT(loc_part_id); - LINT_INIT(sub_part_id); - - if (unlikely((error= get_partition_id_range(part_info, &loc_part_id, - func_value)))) - { - DBUG_RETURN(error); - } - no_subparts= part_info->no_subparts; - if (unlikely((error= get_part_id_hash(no_subparts, part_info->subpart_expr, - &sub_part_id, &local_func_value)))) - { - DBUG_RETURN(error); - } - - *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts); - DBUG_RETURN(0); -} - - -int get_partition_id_range_sub_linear_hash(partition_info *part_info, - uint32 *part_id, - longlong *func_value) -{ - uint32 loc_part_id, sub_part_id; - uint no_subparts; - longlong local_func_value; - int error; - DBUG_ENTER("get_partition_id_range_sub_linear_hash"); - LINT_INIT(loc_part_id); - LINT_INIT(sub_part_id); - - if (unlikely((error= get_partition_id_range(part_info, &loc_part_id, - func_value)))) - { - DBUG_RETURN(error); - } - no_subparts= part_info->no_subparts; - if (unlikely((error= get_part_id_linear_hash(part_info, no_subparts, - part_info->subpart_expr, - &sub_part_id, - &local_func_value)))) - { - DBUG_RETURN(error); - } - - *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts); - DBUG_RETURN(0); -} - - -int get_partition_id_range_sub_key(partition_info *part_info, - uint32 *part_id, - longlong *func_value) -{ - uint32 loc_part_id, sub_part_id; - uint no_subparts; - longlong local_func_value; - int error; - DBUG_ENTER("get_partition_id_range_sub_key"); - LINT_INIT(loc_part_id); - - if (unlikely((error= get_partition_id_range(part_info, &loc_part_id, - func_value)))) - { - DBUG_RETURN(error); - } - no_subparts= part_info->no_subparts; - sub_part_id= get_part_id_key(part_info->subpart_field_array, - no_subparts, &local_func_value); - *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts); - DBUG_RETURN(0); -} - - -int get_partition_id_range_sub_linear_key(partition_info *part_info, - uint32 *part_id, - longlong *func_value) -{ - uint32 loc_part_id, sub_part_id; - uint no_subparts; - longlong local_func_value; - int error; - DBUG_ENTER("get_partition_id_range_sub_linear_key"); - LINT_INIT(loc_part_id); - - if (unlikely((error= get_partition_id_range(part_info, &loc_part_id, - func_value)))) - { - DBUG_RETURN(error); - } - no_subparts= part_info->no_subparts; - sub_part_id= get_part_id_linear_key(part_info, - part_info->subpart_field_array, - no_subparts, &local_func_value); - *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts); - DBUG_RETURN(0); -} - - -int get_partition_id_list_sub_hash(partition_info *part_info, - uint32 *part_id, - longlong *func_value) -{ - uint32 loc_part_id, sub_part_id; - uint no_subparts; - longlong local_func_value; - int error; - DBUG_ENTER("get_partition_id_list_sub_hash"); - LINT_INIT(sub_part_id); - - if (unlikely((error= get_partition_id_list(part_info, &loc_part_id, - func_value)))) - { - DBUG_RETURN(error); - } - no_subparts= part_info->no_subparts; - if (unlikely((error= get_part_id_hash(no_subparts, part_info->subpart_expr, - &sub_part_id, &local_func_value)))) - { - DBUG_RETURN(error); - } - - *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts); - DBUG_RETURN(0); -} - - -int get_partition_id_list_sub_linear_hash(partition_info *part_info, - uint32 *part_id, - longlong *func_value) -{ - uint32 loc_part_id, sub_part_id; - uint no_subparts; - longlong local_func_value; - int error; - DBUG_ENTER("get_partition_id_list_sub_linear_hash"); - LINT_INIT(sub_part_id); - - if (unlikely((error= get_partition_id_list(part_info, &loc_part_id, - func_value)))) - { - DBUG_RETURN(error); - } - no_subparts= part_info->no_subparts; - if (unlikely((error= get_part_id_linear_hash(part_info, no_subparts, - part_info->subpart_expr, - &sub_part_id, - &local_func_value)))) - { - DBUG_RETURN(error); - } - - *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts); - DBUG_RETURN(0); -} - - -int get_partition_id_list_sub_key(partition_info *part_info, - uint32 *part_id, - longlong *func_value) +int get_partition_id_with_sub(partition_info *part_info, + uint32 *part_id, + longlong *func_value) { uint32 loc_part_id, sub_part_id; - uint no_subparts; - longlong local_func_value; + uint num_subparts; int error; - DBUG_ENTER("get_partition_id_range_sub_key"); + DBUG_ENTER("get_partition_id_with_sub"); - if (unlikely((error= get_partition_id_list(part_info, &loc_part_id, - func_value)))) + if (unlikely((error= part_info->get_part_partition_id(part_info, + &loc_part_id, + func_value)))) { DBUG_RETURN(error); } - no_subparts= part_info->no_subparts; - sub_part_id= get_part_id_key(part_info->subpart_field_array, - no_subparts, &local_func_value); - *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts); - DBUG_RETURN(0); -} - - -int get_partition_id_list_sub_linear_key(partition_info *part_info, - uint32 *part_id, - longlong *func_value) -{ - uint32 loc_part_id, sub_part_id; - uint no_subparts; - longlong local_func_value; - int error; - DBUG_ENTER("get_partition_id_list_sub_linear_key"); - - if (unlikely((error= get_partition_id_list(part_info, &loc_part_id, - func_value)))) + num_subparts= part_info->num_subparts; + if (unlikely((error= part_info->get_subpartition_id(part_info, + &sub_part_id)))) { DBUG_RETURN(error); - } - no_subparts= part_info->no_subparts; - sub_part_id= get_part_id_linear_key(part_info, - part_info->subpart_field_array, - no_subparts, &local_func_value); - *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts); + } + *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, num_subparts); DBUG_RETURN(0); } @@ -3285,7 +3476,7 @@ int get_partition_id_hash_sub(partition_info *part_info, uint32 *part_id) { longlong func_value; - return get_part_id_hash(part_info->no_subparts, part_info->subpart_expr, + return get_part_id_hash(part_info->num_subparts, part_info->subpart_expr, part_id, &func_value); } @@ -3294,7 +3485,7 @@ int get_partition_id_linear_hash_sub(partition_info *part_info, uint32 *part_id) { longlong func_value; - return get_part_id_linear_hash(part_info, part_info->no_subparts, + return get_part_id_linear_hash(part_info, part_info->num_subparts, part_info->subpart_expr, part_id, &func_value); } @@ -3305,7 +3496,7 @@ int get_partition_id_key_sub(partition_info *part_info, { longlong func_value; *part_id= get_part_id_key(part_info->subpart_field_array, - part_info->no_subparts, &func_value); + part_info->num_subparts, &func_value); return FALSE; } @@ -3316,7 +3507,7 @@ int get_partition_id_linear_key_sub(partition_info *part_info, longlong func_value; *part_id= get_part_id_linear_key(part_info, part_info->subpart_field_array, - part_info->no_subparts, &func_value); + part_info->num_subparts, &func_value); return FALSE; } @@ -3615,16 +3806,16 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index, const key_range *key_spec, part_id_range *part_spec) { partition_info *part_info= table->part_info; - uint no_parts= part_info->get_tot_partitions(); + uint num_parts= part_info->get_tot_partitions(); uint i, part_id; - uint sub_part= no_parts; - uint32 part_part= no_parts; + uint sub_part= num_parts; + uint32 part_part= num_parts; KEY *key_info= NULL; bool found_part_field= FALSE; DBUG_ENTER("get_partition_set"); part_spec->start_part= 0; - part_spec->end_part= no_parts - 1; + part_spec->end_part= num_parts - 1; if ((index < MAX_KEY) && key_spec->flag == (uint)HA_READ_KEY_EXACT && part_info->some_fields_in_PF.is_set(index)) @@ -3661,7 +3852,7 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index, { if (get_sub_part_id_from_key(table, buf, key_info, key_spec, &sub_part)) { - part_spec->start_part= no_parts; + part_spec->start_part= num_parts; DBUG_VOID_RETURN; } } @@ -3675,7 +3866,7 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index, allowed values. Thus it is certain that the result of this scan will be empty. */ - part_spec->start_part= no_parts; + part_spec->start_part= num_parts; DBUG_VOID_RETURN; } } @@ -3713,7 +3904,7 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index, { if (get_sub_part_id_from_key(table, buf, key_info, key_spec, &sub_part)) { - part_spec->start_part= no_parts; + part_spec->start_part= num_parts; clear_indicator_in_key_fields(key_info); DBUG_VOID_RETURN; } @@ -3722,7 +3913,7 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index, { if (get_part_id_from_key(table,buf,key_info,key_spec,&part_part)) { - part_spec->start_part= no_parts; + part_spec->start_part= num_parts; clear_indicator_in_key_fields(key_info); DBUG_VOID_RETURN; } @@ -3743,29 +3934,29 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index, nothing or we have discovered a range of partitions with possible holes in it. We need a bitvector to further the work here. */ - if (!(part_part == no_parts && sub_part == no_parts)) + if (!(part_part == num_parts && sub_part == num_parts)) { /* We can only arrive here if we are using subpartitioning. */ - if (part_part != no_parts) + if (part_part != num_parts) { /* We know the top partition and need to scan all underlying subpartitions. This is a range without holes. */ - DBUG_ASSERT(sub_part == no_parts); - part_spec->start_part= part_part * part_info->no_subparts; - part_spec->end_part= part_spec->start_part+part_info->no_subparts - 1; + DBUG_ASSERT(sub_part == num_parts); + part_spec->start_part= part_part * part_info->num_subparts; + part_spec->end_part= part_spec->start_part+part_info->num_subparts - 1; } else { - DBUG_ASSERT(sub_part != no_parts); + DBUG_ASSERT(sub_part != num_parts); part_spec->start_part= sub_part; part_spec->end_part=sub_part+ - (part_info->no_subparts*(part_info->no_parts-1)); - for (i= 0, part_id= sub_part; i < part_info->no_parts; - i++, part_id+= part_info->no_subparts) + (part_info->num_subparts*(part_info->num_parts-1)); + for (i= 0, part_id= sub_part; i < part_info->num_parts; + i++, part_id+= part_info->num_subparts) ; //Set bit part_id in bit array } } @@ -3884,10 +4075,12 @@ bool mysql_unpack_partition(THD *thd, mem_alloc_error(sizeof(partition_info)); goto end; } - lex.part_info->part_state= part_state; - lex.part_info->part_state_len= part_state_len; + part_info= lex.part_info; + part_info->part_state= part_state; + part_info->part_state_len= part_state_len; DBUG_PRINT("info", ("Parse: %s", part_buf)); - if (parse_sql(thd, & parser_state, NULL)) + if (parse_sql(thd, & parser_state, NULL) || + part_info->fix_parser_data(thd)) { thd->free_items(); goto end; @@ -3908,7 +4101,6 @@ bool mysql_unpack_partition(THD *thd, */ DBUG_PRINT("info", ("Successful parse")); - part_info= lex.part_info; DBUG_PRINT("info", ("default engine = %s, default_db_type = %s", ha_resolve_storage_engine_name(part_info->default_engine_type), ha_resolve_storage_engine_name(default_db_type))); @@ -4029,9 +4221,9 @@ set_engine_all_partitions(partition_info *part_info, partition_element *sub_elem= sub_it++; sub_elem->engine_type= engine_type; - } while (++j < part_info->no_subparts); + } while (++j < part_info->num_subparts); } - } while (++i < part_info->no_parts); + } while (++i < part_info->num_parts); } /* SYNOPSIS @@ -4176,7 +4368,7 @@ uint set_part_state(Alter_info *alter_info, partition_info *tab_part_info, enum partition_state part_state) { uint part_count= 0; - uint no_parts_found= 0; + uint num_parts_found= 0; List_iterator<partition_element> part_it(tab_part_info->partitions); do @@ -4191,15 +4383,15 @@ uint set_part_state(Alter_info *alter_info, partition_info *tab_part_info, I.e mark the partition as a partition to be "changed" by analyzing/optimizing/rebuilding/checking/repairing/... */ - no_parts_found++; + num_parts_found++; part_elem->part_state= part_state; DBUG_PRINT("info", ("Setting part_state to %u for partition %s", part_state, part_elem->partition_name)); } else part_elem->part_state= PART_NORMAL; - } while (++part_count < tab_part_info->no_parts); - return no_parts_found; + } while (++part_count < tab_part_info->num_parts); + return num_parts_found; } @@ -4266,6 +4458,11 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, partition_info *tab_part_info= table->part_info; partition_info *alt_part_info= thd->work_part_info; uint flags= 0; + bool is_last_partition_reorged; + part_elem_value *tab_max_elem_val= NULL; + part_elem_value *alt_max_elem_val= NULL; + longlong tab_max_range= 0, alt_max_range= 0; + if (!tab_part_info) { my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0)); @@ -4275,13 +4472,13 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, { uint new_part_no, curr_part_no; if (tab_part_info->part_type != HASH_PARTITION || - tab_part_info->use_default_no_partitions) + tab_part_info->use_default_num_partitions) { my_error(ER_REORG_NO_PARAM_ERROR, MYF(0)); DBUG_RETURN(TRUE); } new_part_no= table->file->get_default_no_partitions(create_info); - curr_part_no= tab_part_info->no_parts; + curr_part_no= tab_part_info->num_parts; if (new_part_no == curr_part_no) { /* @@ -4299,7 +4496,7 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, setting the flag for no default number of partitions */ alter_info->flags|= ALTER_ADD_PARTITION; - thd->work_part_info->no_parts= new_part_no - curr_part_no; + thd->work_part_info->num_parts= new_part_no - curr_part_no; } else { @@ -4308,7 +4505,7 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, without setting the flag for no default number of partitions */ alter_info->flags|= ALTER_COALESCE_PARTITION; - alter_info->no_parts= curr_part_no - new_part_no; + alter_info->num_parts= curr_part_no - new_part_no; } } if (!(flags= table->file->alter_table_flags(alter_info->flags))) @@ -4320,34 +4517,73 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, ((flags & (HA_FAST_CHANGE_PARTITION | HA_PARTITION_ONE_PHASE)) != 0); DBUG_PRINT("info", ("*fast_alter_partition: %d flags: 0x%x", *fast_alter_partition, flags)); - if (((alter_info->flags & ALTER_ADD_PARTITION) || - (alter_info->flags & ALTER_REORGANIZE_PARTITION)) && - (thd->work_part_info->part_type != tab_part_info->part_type) && - (thd->work_part_info->part_type != NOT_A_PARTITION)) + if ((alter_info->flags & ALTER_ADD_PARTITION) || + (alter_info->flags & ALTER_REORGANIZE_PARTITION)) { - if (thd->work_part_info->part_type == RANGE_PARTITION) - { - my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0), - "RANGE", "LESS THAN"); - } - else if (thd->work_part_info->part_type == LIST_PARTITION) + if (thd->work_part_info->part_type != tab_part_info->part_type) { - DBUG_ASSERT(thd->work_part_info->part_type == LIST_PARTITION); - my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0), - "LIST", "IN"); + if (thd->work_part_info->part_type == NOT_A_PARTITION) + { + if (tab_part_info->part_type == RANGE_PARTITION) + { + my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), "RANGE"); + DBUG_RETURN(TRUE); + } + else if (tab_part_info->part_type == LIST_PARTITION) + { + my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), "LIST"); + DBUG_RETURN(TRUE); + } + /* + Hash partitions can be altered without parser finds out about + that it is HASH partitioned. So no error here. + */ + } + else + { + if (thd->work_part_info->part_type == RANGE_PARTITION) + { + my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0), + "RANGE", "LESS THAN"); + } + else if (thd->work_part_info->part_type == LIST_PARTITION) + { + DBUG_ASSERT(thd->work_part_info->part_type == LIST_PARTITION); + my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0), + "LIST", "IN"); + } + else if (tab_part_info->part_type == RANGE_PARTITION) + { + my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0), + "RANGE", "LESS THAN"); + } + else + { + DBUG_ASSERT(tab_part_info->part_type == LIST_PARTITION); + my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0), + "LIST", "IN"); + } + DBUG_RETURN(TRUE); + } } - else if (tab_part_info->part_type == RANGE_PARTITION) + if ((tab_part_info->column_list && + alt_part_info->num_columns != tab_part_info->num_columns) || + (!tab_part_info->column_list && + (tab_part_info->part_type == RANGE_PARTITION || + tab_part_info->part_type == LIST_PARTITION) && + alt_part_info->num_columns != 1U) || + (!tab_part_info->column_list && + tab_part_info->part_type == HASH_PARTITION && + alt_part_info->num_columns != 0)) { - my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0), - "RANGE", "LESS THAN"); + my_error(ER_PARTITION_COLUMN_LIST_ERROR, MYF(0)); + DBUG_RETURN(TRUE); } - else + alt_part_info->column_list= tab_part_info->column_list; + if (alt_part_info->fix_parser_data(thd)) { - DBUG_ASSERT(tab_part_info->part_type == LIST_PARTITION); - my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0), - "LIST", "IN"); + DBUG_RETURN(TRUE); } - DBUG_RETURN(TRUE); } if (alter_info->flags & ALTER_ADD_PARTITION) { @@ -4357,9 +4593,9 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, partitioning scheme as currently set-up. Partitions are always added at the end in ADD PARTITION. */ - uint no_new_partitions= alt_part_info->no_parts; - uint no_orig_partitions= tab_part_info->no_parts; - uint check_total_partitions= no_new_partitions + no_orig_partitions; + uint num_new_partitions= alt_part_info->num_parts; + uint num_orig_partitions= tab_part_info->num_parts; + uint check_total_partitions= num_new_partitions + num_orig_partitions; uint new_total_partitions= check_total_partitions; /* We allow quite a lot of values to be supplied by defaults, however we @@ -4376,22 +4612,22 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, my_error(ER_PARTITION_MAXVALUE_ERROR, MYF(0)); DBUG_RETURN(TRUE); } - if (no_new_partitions == 0) + if (num_new_partitions == 0) { my_error(ER_ADD_PARTITION_NO_NEW_PARTITION, MYF(0)); DBUG_RETURN(TRUE); } if (tab_part_info->is_sub_partitioned()) { - if (alt_part_info->no_subparts == 0) - alt_part_info->no_subparts= tab_part_info->no_subparts; - else if (alt_part_info->no_subparts != tab_part_info->no_subparts) + if (alt_part_info->num_subparts == 0) + alt_part_info->num_subparts= tab_part_info->num_subparts; + else if (alt_part_info->num_subparts != tab_part_info->num_subparts) { my_error(ER_ADD_PARTITION_SUBPART_ERROR, MYF(0)); DBUG_RETURN(TRUE); } check_total_partitions= new_total_partitions* - alt_part_info->no_subparts; + alt_part_info->num_subparts; } if (check_total_partitions > MAX_PARTITIONS) { @@ -4401,8 +4637,8 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, alt_part_info->part_type= tab_part_info->part_type; alt_part_info->subpart_type= tab_part_info->subpart_type; if (alt_part_info->set_up_defaults_for_partitioning(table->file, - ULL(0), - tab_part_info->no_parts)) + ULL(0), + tab_part_info->num_parts)) { DBUG_RETURN(TRUE); } @@ -4477,7 +4713,7 @@ that are reorganised. uint lower_2n= upper_2n >> 1; bool all_parts= TRUE; if (tab_part_info->linear_hash_ind && - no_new_partitions < upper_2n) + num_new_partitions < upper_2n) { /* An analysis of which parts needs reorganisation shows that it is @@ -4486,7 +4722,7 @@ that are reorganised. onwards it starts again from partition 0 and goes on until it reaches p(upper_2n - 1). If the last new partition reaches beyond upper_2n - 1 then the first interval will end with - p(lower_2n - 1) and start with p(no_orig_partitions - lower_2n). + p(lower_2n - 1) and start with p(num_orig_partitions - lower_2n). If lower_2n partitions are added then p0 to p(lower_2n - 1) will be reorganised which means that the two interval becomes one interval at this point. Thus only when adding less than @@ -4514,7 +4750,7 @@ that are reorganised. to TRUE. In this case we don't get into this if-part at all. */ all_parts= FALSE; - if (no_new_partitions >= lower_2n) + if (num_new_partitions >= lower_2n) { /* In this case there is only one interval since the two intervals @@ -4530,8 +4766,8 @@ that are reorganised. Also in this case there is only one interval since we are not going over a 2**n boundary */ - start_part= no_orig_partitions - lower_2n; - end_part= start_part + (no_new_partitions - 1); + start_part= num_orig_partitions - lower_2n; + end_part= start_part + (num_new_partitions - 1); } else { @@ -4540,7 +4776,7 @@ that are reorganised. new parts that would ensure that the intervals become overlapping. */ - start_part= no_orig_partitions - lower_2n; + start_part= num_orig_partitions - lower_2n; end_part= upper_2n - 1; start_sec_part= 0; end_sec_part= new_total_partitions - (upper_2n + 1); @@ -4557,7 +4793,7 @@ that are reorganised. { p_elem->part_state= PART_CHANGED; } - } while (++part_no < no_orig_partitions); + } while (++part_no < num_orig_partitions); } /* Need to concatenate the lists here to make it possible to check the @@ -4580,8 +4816,8 @@ that are reorganised. mem_alloc_error(1); DBUG_RETURN(TRUE); } - } while (++part_count < no_new_partitions); - tab_part_info->no_parts+= no_new_partitions; + } while (++part_count < num_new_partitions); + tab_part_info->num_parts+= num_new_partitions; } /* If we specify partitions explicitly we don't use defaults anymore. @@ -4596,7 +4832,7 @@ that are reorganised. DBUG_PRINT("info", ("part_info: 0x%lx", (long) tab_part_info)); tab_part_info->use_default_partitions= FALSE; } - tab_part_info->use_default_no_partitions= FALSE; + tab_part_info->use_default_num_partitions= FALSE; tab_part_info->is_auto_partitioned= FALSE; } } @@ -4610,8 +4846,8 @@ that are reorganised. command to drop the partition failed in the middle. */ uint part_count= 0; - uint no_parts_dropped= alter_info->partition_names.elements; - uint no_parts_found= 0; + uint num_parts_dropped= alter_info->partition_names.elements; + uint num_parts_found= 0; List_iterator<partition_element> part_it(tab_part_info->partitions); tab_part_info->is_auto_partitioned= FALSE; @@ -4621,7 +4857,7 @@ that are reorganised. my_error(ER_ONLY_ON_RANGE_LIST_PARTITION, MYF(0), "DROP"); DBUG_RETURN(TRUE); } - if (no_parts_dropped >= tab_part_info->no_parts) + if (num_parts_dropped >= tab_part_info->num_parts) { my_error(ER_DROP_LAST_PARTITION, MYF(0)); DBUG_RETURN(TRUE); @@ -4635,11 +4871,11 @@ that are reorganised. /* Set state to indicate that the partition is to be dropped. */ - no_parts_found++; + num_parts_found++; part_elem->part_state= PART_TO_BE_DROPPED; } - } while (++part_count < tab_part_info->no_parts); - if (no_parts_found != no_parts_dropped) + } while (++part_count < tab_part_info->num_parts); + if (num_parts_found != num_parts_dropped) { my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "DROP"); DBUG_RETURN(TRUE); @@ -4649,14 +4885,14 @@ that are reorganised. my_error(ER_ROW_IS_REFERENCED, MYF(0)); DBUG_RETURN(TRUE); } - tab_part_info->no_parts-= no_parts_dropped; + tab_part_info->num_parts-= num_parts_dropped; } else if (alter_info->flags & ALTER_REBUILD_PARTITION) { - uint no_parts_found; - uint no_parts_opt= alter_info->partition_names.elements; - no_parts_found= set_part_state(alter_info, tab_part_info, PART_CHANGED); - if (no_parts_found != no_parts_opt && + uint num_parts_found; + uint num_parts_opt= alter_info->partition_names.elements; + num_parts_found= set_part_state(alter_info, tab_part_info, PART_CHANGED); + if (num_parts_found != num_parts_opt && (!(alter_info->flags & ALTER_ALL_PARTITION))) { my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "REBUILD"); @@ -4670,20 +4906,20 @@ that are reorganised. } else if (alter_info->flags & ALTER_COALESCE_PARTITION) { - uint no_parts_coalesced= alter_info->no_parts; - uint no_parts_remain= tab_part_info->no_parts - no_parts_coalesced; + uint num_parts_coalesced= alter_info->num_parts; + uint num_parts_remain= tab_part_info->num_parts - num_parts_coalesced; List_iterator<partition_element> part_it(tab_part_info->partitions); if (tab_part_info->part_type != HASH_PARTITION) { my_error(ER_COALESCE_ONLY_ON_HASH_PARTITION, MYF(0)); DBUG_RETURN(TRUE); } - if (no_parts_coalesced == 0) + if (num_parts_coalesced == 0) { my_error(ER_COALESCE_PARTITION_NO_PARTITION, MYF(0)); DBUG_RETURN(TRUE); } - if (no_parts_coalesced >= tab_part_info->no_parts) + if (num_parts_coalesced >= tab_part_info->num_parts) { my_error(ER_DROP_LAST_PARTITION, MYF(0)); DBUG_RETURN(TRUE); @@ -4731,21 +4967,21 @@ state of p1. uint upper_2n= tab_part_info->linear_hash_mask + 1; uint lower_2n= upper_2n >> 1; all_parts= FALSE; - if (no_parts_coalesced >= lower_2n) + if (num_parts_coalesced >= lower_2n) { all_parts= TRUE; } - else if (no_parts_remain >= lower_2n) + else if (num_parts_remain >= lower_2n) { - end_part= tab_part_info->no_parts - (lower_2n + 1); - start_part= no_parts_remain - lower_2n; + end_part= tab_part_info->num_parts - (lower_2n + 1); + start_part= num_parts_remain - lower_2n; } else { start_part= 0; - end_part= tab_part_info->no_parts - (lower_2n + 1); + end_part= tab_part_info->num_parts - (lower_2n + 1); end_sec_part= (lower_2n >> 1) - 1; - start_sec_part= end_sec_part - (lower_2n - (no_parts_remain + 1)); + start_sec_part= end_sec_part - (lower_2n - (num_parts_remain + 1)); } } do @@ -4756,19 +4992,19 @@ state of p1. (part_count >= start_part && part_count <= end_part) || (part_count >= start_sec_part && part_count <= end_sec_part))) p_elem->part_state= PART_CHANGED; - if (++part_count > no_parts_remain) + if (++part_count > num_parts_remain) { if (*fast_alter_partition) p_elem->part_state= PART_REORGED_DROPPED; else part_it.remove(); } - } while (part_count < tab_part_info->no_parts); - tab_part_info->no_parts= no_parts_remain; + } while (part_count < tab_part_info->num_parts); + tab_part_info->num_parts= num_parts_remain; } if (!(alter_info->flags & ALTER_TABLE_REORG)) { - tab_part_info->use_default_no_partitions= FALSE; + tab_part_info->use_default_num_partitions= FALSE; tab_part_info->is_auto_partitioned= FALSE; } } @@ -4785,33 +5021,32 @@ state of p1. range as those changed from. This command can be used on RANGE and LIST partitions. */ - uint no_parts_reorged= alter_info->partition_names.elements; - uint no_parts_new= thd->work_part_info->partitions.elements; - partition_info *alt_part_info= thd->work_part_info; + uint num_parts_reorged= alter_info->partition_names.elements; + uint num_parts_new= thd->work_part_info->partitions.elements; uint check_total_partitions; tab_part_info->is_auto_partitioned= FALSE; - if (no_parts_reorged > tab_part_info->no_parts) + if (num_parts_reorged > tab_part_info->num_parts) { my_error(ER_REORG_PARTITION_NOT_EXIST, MYF(0)); DBUG_RETURN(TRUE); } if (!(tab_part_info->part_type == RANGE_PARTITION || tab_part_info->part_type == LIST_PARTITION) && - (no_parts_new != no_parts_reorged)) + (num_parts_new != num_parts_reorged)) { my_error(ER_REORG_HASH_ONLY_ON_SAME_NO, MYF(0)); DBUG_RETURN(TRUE); } if (tab_part_info->is_sub_partitioned() && - alt_part_info->no_subparts && - alt_part_info->no_subparts != tab_part_info->no_subparts) + alt_part_info->num_subparts && + alt_part_info->num_subparts != tab_part_info->num_subparts) { my_error(ER_PARTITION_WRONG_NO_SUBPART_ERROR, MYF(0)); DBUG_RETURN(TRUE); } - check_total_partitions= tab_part_info->no_parts + no_parts_new; - check_total_partitions-= no_parts_reorged; + check_total_partitions= tab_part_info->num_parts + num_parts_new; + check_total_partitions-= num_parts_reorged; if (check_total_partitions > MAX_PARTITIONS) { my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0)); @@ -4819,7 +5054,7 @@ state of p1. } alt_part_info->part_type= tab_part_info->part_type; alt_part_info->subpart_type= tab_part_info->subpart_type; - alt_part_info->no_subparts= tab_part_info->no_subparts; + alt_part_info->num_subparts= tab_part_info->num_subparts; DBUG_ASSERT(!alt_part_info->use_default_partitions); if (alt_part_info->set_up_defaults_for_partitioning(table->file, ULL(0), @@ -4867,9 +5102,7 @@ the generated partition syntax in a correct manner. uint part_count= 0; bool found_first= FALSE; bool found_last= FALSE; - bool is_last_partition_reorged; uint drop_count= 0; - longlong tab_max_range= 0, alt_max_range= 0; do { partition_element *part_elem= tab_it++; @@ -4879,7 +5112,13 @@ the generated partition syntax in a correct manner. { is_last_partition_reorged= TRUE; drop_count++; - tab_max_range= part_elem->range_value; + if (tab_part_info->column_list) + { + List_iterator<part_elem_value> p(part_elem->list_val_list); + tab_max_elem_val= p++; + } + else + tab_max_range= part_elem->range_value; if (*fast_alter_partition && tab_part_info->temp_partitions.push_back(part_elem)) { @@ -4891,20 +5130,28 @@ the generated partition syntax in a correct manner. if (!found_first) { uint alt_part_count= 0; - found_first= TRUE; + partition_element *alt_part_elem; List_iterator<partition_element> alt_it(alt_part_info->partitions); + found_first= TRUE; do { - partition_element *alt_part_elem= alt_it++; - alt_max_range= alt_part_elem->range_value; + alt_part_elem= alt_it++; + if (tab_part_info->column_list) + { + List_iterator<part_elem_value> p(alt_part_elem->list_val_list); + alt_max_elem_val= p++; + } + else + alt_max_range= alt_part_elem->range_value; + if (*fast_alter_partition) alt_part_elem->part_state= PART_TO_BE_ADDED; if (alt_part_count == 0) tab_it.replace(alt_part_elem); else tab_it.after(alt_part_elem); - } while (++alt_part_count < no_parts_new); + } while (++alt_part_count < num_parts_new); } else if (found_last) { @@ -4919,32 +5166,13 @@ the generated partition syntax in a correct manner. if (found_first) found_last= TRUE; } - } while (++part_count < tab_part_info->no_parts); - if (drop_count != no_parts_reorged) + } while (++part_count < tab_part_info->num_parts); + if (drop_count != num_parts_reorged) { my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "REORGANIZE"); DBUG_RETURN(TRUE); } - if (tab_part_info->part_type == RANGE_PARTITION && - ((is_last_partition_reorged && - alt_max_range < tab_max_range) || - (!is_last_partition_reorged && - alt_max_range != tab_max_range))) - { - /* - For range partitioning the total resulting range before and - after the change must be the same except in one case. This is - when the last partition is reorganised, in this case it is - acceptable to increase the total range. - The reason is that it is not allowed to have "holes" in the - middle of the ranges and thus we should not allow to reorganise - to create "holes". Also we should not allow using REORGANIZE - to drop data. - */ - my_error(ER_REORG_OUTSIDE_RANGE, MYF(0)); - DBUG_RETURN(TRUE); - } - tab_part_info->no_parts= check_total_partitions; + tab_part_info->num_parts= check_total_partitions; } } else @@ -4960,13 +5188,45 @@ the generated partition syntax in a correct manner. !alt_part_info->use_default_subpartitions) { tab_part_info->use_default_subpartitions= FALSE; - tab_part_info->use_default_no_subpartitions= FALSE; + tab_part_info->use_default_num_subpartitions= FALSE; } if (tab_part_info->check_partition_info(thd, (handlerton**)NULL, - table->file, ULL(0), FALSE)) + table->file, ULL(0), TRUE)) { DBUG_RETURN(TRUE); } + /* + The check below needs to be performed after check_partition_info + since this function "fixes" the item trees of the new partitions + to reorganize into + */ + if (alter_info->flags == ALTER_REORGANIZE_PARTITION && + tab_part_info->part_type == RANGE_PARTITION && + ((is_last_partition_reorged && + (tab_part_info->column_list ? + (tab_part_info->compare_column_values( + alt_max_elem_val->col_val_array, + tab_max_elem_val->col_val_array) < 0) : + alt_max_range < tab_max_range)) || + (!is_last_partition_reorged && + (tab_part_info->column_list ? + (tab_part_info->compare_column_values( + alt_max_elem_val->col_val_array, + tab_max_elem_val->col_val_array) != 0) : + alt_max_range != tab_max_range)))) + { + /* + For range partitioning the total resulting range before and + after the change must be the same except in one case. This is + when the last partition is reorganised, in this case it is + acceptable to increase the total range. + The reason is that it is not allowed to have "holes" in the + middle of the ranges and thus we should not allow to reorganise + to create "holes". + */ + my_error(ER_REORG_OUTSIDE_RANGE, MYF(0)); + DBUG_RETURN(TRUE); + } } } else @@ -5080,6 +5340,10 @@ the generated partition syntax in a correct manner. { DBUG_PRINT("info", ("partition changed")); *partition_changed= TRUE; + if (thd->work_part_info->fix_parser_data(thd)) + { + DBUG_RETURN(TRUE); + } } /* Set up partition default_engine_type either from the create_info @@ -5240,8 +5504,8 @@ static bool mysql_drop_partitions(ALTER_PARTITION_PARAM_TYPE *lpt) part_it.remove(); remove_count++; } - } while (++i < part_info->no_parts); - part_info->no_parts-= remove_count; + } while (++i < part_info->num_parts); + part_info->num_parts-= remove_count; DBUG_RETURN(FALSE); } @@ -5363,7 +5627,7 @@ static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt, char normal_path[FN_REFLEN]; List_iterator<partition_element> part_it(part_info->partitions); uint temp_partitions= part_info->temp_partitions.elements; - uint no_elements= part_info->partitions.elements; + uint num_elements= part_info->partitions.elements; uint i= 0; DBUG_ENTER("write_log_changed_partitions"); @@ -5376,7 +5640,7 @@ static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt, if (part_info->is_sub_partitioned()) { List_iterator<partition_element> sub_it(part_elem->subpartitions); - uint no_subparts= part_info->no_subparts; + uint num_subparts= part_info->num_subparts; uint j= 0; do { @@ -5405,7 +5669,7 @@ static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt, *next_entry= log_entry->entry_pos; sub_elem->log_entry= log_entry; insert_part_info_log_entry_list(part_info, log_entry); - } while (++j < no_subparts); + } while (++j < num_subparts); } else { @@ -5433,7 +5697,7 @@ static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt, insert_part_info_log_entry_list(part_info, log_entry); } } - } while (++i < no_elements); + } while (++i < num_elements); DBUG_RETURN(FALSE); } @@ -5459,14 +5723,14 @@ static bool write_log_dropped_partitions(ALTER_PARTITION_PARAM_TYPE *lpt, char tmp_path[FN_LEN]; List_iterator<partition_element> part_it(part_info->partitions); List_iterator<partition_element> temp_it(part_info->temp_partitions); - uint no_temp_partitions= part_info->temp_partitions.elements; - uint no_elements= part_info->partitions.elements; + uint num_temp_partitions= part_info->temp_partitions.elements; + uint num_elements= part_info->partitions.elements; DBUG_ENTER("write_log_dropped_partitions"); ddl_log_entry.action_type= DDL_LOG_DELETE_ACTION; if (temp_list) - no_elements= no_temp_partitions; - while (no_elements--) + num_elements= num_temp_partitions; + while (num_elements--) { partition_element *part_elem; if (temp_list) @@ -5480,14 +5744,14 @@ static bool write_log_dropped_partitions(ALTER_PARTITION_PARAM_TYPE *lpt, uint name_variant; if (part_elem->part_state == PART_CHANGED || (part_elem->part_state == PART_TO_BE_ADDED && - no_temp_partitions)) + num_temp_partitions)) name_variant= TEMP_PART_NAME; else name_variant= NORMAL_PART_NAME; if (part_info->is_sub_partitioned()) { List_iterator<partition_element> sub_it(part_elem->subpartitions); - uint no_subparts= part_info->no_subparts; + uint num_subparts= part_info->num_subparts; uint j= 0; do { @@ -5507,7 +5771,7 @@ static bool write_log_dropped_partitions(ALTER_PARTITION_PARAM_TYPE *lpt, *next_entry= log_entry->entry_pos; sub_elem->log_entry= log_entry; insert_part_info_log_entry_list(part_info, log_entry); - } while (++j < no_subparts); + } while (++j < num_subparts); } else { @@ -6605,16 +6869,19 @@ void make_used_partitions_str(partition_info *part_info, String *parts_str) IMPLEMENTATION There are two available interval analyzer functions: (1) get_part_iter_for_interval_via_mapping - (2) get_part_iter_for_interval_via_walking + (2) get_part_iter_for_interval_cols_via_map + (3) get_part_iter_for_interval_via_walking They both have limited applicability: (1) is applicable for "PARTITION BY <RANGE|LIST>(func(t.field))", where func is a monotonic function. - - (2) is applicable for + + (2) is applicable for "PARTITION BY <RANGE|LIST> COLUMN_LIST (field_list) + + (3) is applicable for "[SUB]PARTITION BY <any-partitioning-type>(any_func(t.integer_field))" - If both are applicable, (1) is preferred over (2). + If both (1) and (3) are applicable, (1) is preferred over (3). This function sets part_info::get_part_iter_for_interval according to this criteria, and also sets some auxilary fields that the function @@ -6634,10 +6901,19 @@ static void set_up_range_analysis_info(partition_info *part_info) switch (part_info->part_type) { case RANGE_PARTITION: case LIST_PARTITION: - if (part_info->part_expr->get_monotonicity_info() != NON_MONOTONIC) + if (!part_info->column_list) + { + if (part_info->part_expr->get_monotonicity_info() != NON_MONOTONIC) + { + part_info->get_part_iter_for_interval= + get_part_iter_for_interval_via_mapping; + goto setup_subparts; + } + } + else { part_info->get_part_iter_for_interval= - get_part_iter_for_interval_via_mapping; + get_part_iter_for_interval_cols_via_map; goto setup_subparts; } default: @@ -6648,7 +6924,7 @@ static void set_up_range_analysis_info(partition_info *part_info) Check if get_part_iter_for_interval_via_walking() can be used for partitioning */ - if (part_info->no_part_fields == 1) + if (part_info->num_part_fields == 1) { Field *field= part_info->part_field_array[0]; switch (field->type()) { @@ -6670,7 +6946,7 @@ setup_subparts: Check if get_part_iter_for_interval_via_walking() can be used for subpartitioning */ - if (part_info->no_subpart_fields == 1) + if (part_info->num_subpart_fields == 1) { Field *field= part_info->subpart_field_array[0]; switch (field->type()) { @@ -6688,9 +6964,118 @@ setup_subparts: } +/* + This function takes a memory of packed fields in opt-range format + and stores it in record format. To avoid having to worry about how + the length of fields are calculated in opt-range format we send + an array of lengths used for each field in store_length_array. + + SYNOPSIS + store_tuple_to_record() + pfield Field array + store_length_array Array of field lengths + value Memory where fields are stored + value_end End of memory + + RETURN VALUE + nparts Number of fields assigned +*/ +uint32 store_tuple_to_record(Field **pfield, + uint32 *store_length_array, + uchar *value, + uchar *value_end) +{ + /* This function is inspired by store_key_image_rec. */ + uint32 nparts= 0; + uchar *loc_value; + while (value < value_end) + { + loc_value= value; + if ((*pfield)->real_maybe_null()) + { + if (*loc_value) + (*pfield)->set_null(); + else + (*pfield)->set_notnull(); + loc_value++; + } + uint len= (*pfield)->pack_length(); + (*pfield)->set_key_image(loc_value, len); + value+= *store_length_array; + store_length_array++; + nparts++; + pfield++; + } + return nparts; +} + +/* + RANGE(columns) partitioning: compare value bound and probe tuple. + + The value bound always is a full tuple (but may include the MAX_VALUE + special value). + + The probe tuple may be a prefix of partitioning tuple. The tail_is_min + parameter specifies whether the suffix components should be assumed to + hold MAX_VALUE +*/ + +static int cmp_rec_and_tuple(part_column_list_val *val, uint32 nvals_in_rec) +{ + partition_info *part_info= val->part_info; + Field **field= part_info->part_field_array; + Field **fields_end= field + nvals_in_rec; + int res; + + for (; field != fields_end; field++, val++) + { + if (val->max_value) + return -1; + if ((*field)->is_null()) + { + if (val->null_value) + continue; + return -1; + } + if (val->null_value) + return +1; + res= (*field)->cmp((const uchar*)val->column_value); + if (res) + return res; + } + return 0; +} + + +static int cmp_rec_and_tuple_prune(part_column_list_val *val, + uint32 n_vals_in_rec, + bool tail_is_min) +{ + int cmp; + Field **field; + partition_info *part_info; + if ((cmp= cmp_rec_and_tuple(val, n_vals_in_rec))) + return cmp; + part_info= val->part_info; + field= part_info->part_field_array + n_vals_in_rec; + for (; *field; field++, val++) + { + if (tail_is_min) + return -1; + if (!tail_is_min && !val->max_value) + return +1; + } + return 0; +} + + typedef uint32 (*get_endpoint_func)(partition_info*, bool left_endpoint, bool include_endpoint); +typedef uint32 (*get_col_endpoint_func)(partition_info*, bool left_endpoint, + bool include_endpoint, + uint32 num_parts); + /* Partitioning Interval Analysis: Initialize the iterator for "mapping" case @@ -6726,18 +7111,145 @@ typedef uint32 (*get_endpoint_func)(partition_info*, bool left_endpoint, -1 - All partitions would match (iterator not initialized) */ +uint32 get_partition_id_cols_range_for_endpoint(partition_info *part_info, + bool left_endpoint, + bool include_endpoint, + uint32 nparts) +{ + uint max_partition= part_info->num_parts - 1; + uint min_part_id= 0, max_part_id= max_partition, loc_part_id; + part_column_list_val *range_col_array= part_info->range_col_array; + uint num_columns= part_info->part_field_list.elements; + bool tailf= !(left_endpoint ^ include_endpoint); + DBUG_ENTER("get_partition_id_cols_range_for_endpoint"); + + /* Get the partitioning function value for the endpoint */ + while (max_part_id > min_part_id) + { + loc_part_id= (max_part_id + min_part_id + 1) >> 1; + if (cmp_rec_and_tuple_prune(range_col_array + loc_part_id*num_columns, + nparts, tailf) >= 0) + min_part_id= loc_part_id + 1; + else + max_part_id= loc_part_id - 1; + } + loc_part_id= max_part_id; + if (loc_part_id < max_partition && + cmp_rec_and_tuple_prune(range_col_array + (loc_part_id+1)*num_columns, + nparts, tailf) >= 0 + ) + { + loc_part_id++; + } + if (left_endpoint) + { + if (cmp_rec_and_tuple_prune(range_col_array + loc_part_id*num_columns, + nparts, tailf) >= 0) + loc_part_id++; + } + else + { + if (loc_part_id < max_partition) + { + int res= cmp_rec_and_tuple_prune(range_col_array + + loc_part_id * num_columns, + nparts, tailf); + if (!res) + loc_part_id += test(include_endpoint); + else if (res > 0) + loc_part_id++; + } + loc_part_id++; + } + DBUG_RETURN(loc_part_id); +} + + +int get_part_iter_for_interval_cols_via_map(partition_info *part_info, + bool is_subpart, + uint32 *store_length_array, + uchar *min_value, uchar *max_value, + uint min_len, uint max_len, + uint flags, + PARTITION_ITERATOR *part_iter) +{ + uint32 nparts; + get_col_endpoint_func get_col_endpoint; + DBUG_ENTER("get_part_iter_for_interval_cols_via_map"); + + if (part_info->part_type == RANGE_PARTITION) + { + get_col_endpoint= get_partition_id_cols_range_for_endpoint; + part_iter->get_next= get_next_partition_id_range; + } + else if (part_info->part_type == LIST_PARTITION) + { + get_col_endpoint= get_partition_id_cols_list_for_endpoint; + part_iter->get_next= get_next_partition_id_list; + part_iter->part_info= part_info; + DBUG_ASSERT(part_info->num_list_values); + } + else + assert(0); + + if (flags & NO_MIN_RANGE) + part_iter->part_nums.start= part_iter->part_nums.cur= 0; + else + { + // Copy from min_value to record + nparts= store_tuple_to_record(part_info->part_field_array, + store_length_array, + min_value, + min_value + min_len); + part_iter->part_nums.start= part_iter->part_nums.cur= + get_col_endpoint(part_info, TRUE, !(flags & NEAR_MIN), + nparts); + } + if (flags & NO_MAX_RANGE) + { + if (part_info->part_type == RANGE_PARTITION) + part_iter->part_nums.end= part_info->num_parts; + else /* LIST_PARTITION */ + { + DBUG_ASSERT(part_info->part_type == LIST_PARTITION); + part_iter->part_nums.end= part_info->num_list_values; + } + } + else + { + // Copy from max_value to record + nparts= store_tuple_to_record(part_info->part_field_array, + store_length_array, + max_value, + max_value + max_len); + part_iter->part_nums.end= get_col_endpoint(part_info, FALSE, + !(flags & NEAR_MAX), + nparts); + } + if (part_iter->part_nums.start == part_iter->part_nums.end) + DBUG_RETURN(0); + DBUG_RETURN(1); +} + + int get_part_iter_for_interval_via_mapping(partition_info *part_info, bool is_subpart, + uint32 *store_length_array, /* ignored */ uchar *min_value, uchar *max_value, + uint min_len, uint max_len, /* ignored */ uint flags, PARTITION_ITERATOR *part_iter) { - DBUG_ASSERT(!is_subpart); Field *field= part_info->part_field_array[0]; uint32 max_endpoint_val; get_endpoint_func get_endpoint; bool can_match_multiple_values; /* is not '=' */ uint field_len= field->pack_length_in_rec(); + DBUG_ENTER("get_part_iter_for_interval_via_mapping"); + DBUG_ASSERT(!is_subpart); + (void) store_length_array; + (void)min_len; + (void)max_len; part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE; if (part_info->part_type == RANGE_PARTITION) @@ -6746,7 +7258,7 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info, get_endpoint= get_partition_id_range_for_endpoint_charset; else get_endpoint= get_partition_id_range_for_endpoint; - max_endpoint_val= part_info->no_parts; + max_endpoint_val= part_info->num_parts; part_iter->get_next= get_next_partition_id_range; } else if (part_info->part_type == LIST_PARTITION) @@ -6756,7 +7268,7 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info, get_endpoint= get_list_array_idx_for_endpoint_charset; else get_endpoint= get_list_array_idx_for_endpoint; - max_endpoint_val= part_info->no_list_values; + max_endpoint_val= part_info->num_list_values; part_iter->get_next= get_next_partition_id_list; part_iter->part_info= part_info; if (max_endpoint_val == 0) @@ -6769,7 +7281,7 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info, part_iter->part_nums.start= part_iter->part_nums.end= 0; part_iter->part_nums.cur= 0; part_iter->ret_null_part= part_iter->ret_null_part_orig= TRUE; - return -1; + DBUG_RETURN(-1); } } else @@ -6805,7 +7317,7 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info, { /* The right bound is X <= NULL, i.e. it is a "X IS NULL" interval */ part_iter->part_nums.end= 0; - return 1; + DBUG_RETURN(1); } } else @@ -6829,11 +7341,11 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info, part_iter->part_nums.cur= part_iter->part_nums.start= 0; part_iter->part_nums.end= 0; part_iter->ret_null_part= part_iter->ret_null_part_orig= TRUE; - return 1; + DBUG_RETURN(1); } part_iter->part_nums.cur= part_iter->part_nums.start; if (part_iter->part_nums.start == max_endpoint_val) - return 0; /* No partitions */ + DBUG_RETURN(0); /* No partitions */ } } @@ -6847,9 +7359,9 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info, part_iter->part_nums.end= get_endpoint(part_info, 0, include_endp); if (part_iter->part_nums.start >= part_iter->part_nums.end && !part_iter->ret_null_part) - return 0; /* No partitions */ + DBUG_RETURN(0); /* No partitions */ } - return 1; /* Ok, iterator initialized */ + DBUG_RETURN(1); /* Ok, iterator initialized */ } @@ -6897,25 +7409,32 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info, */ int get_part_iter_for_interval_via_walking(partition_info *part_info, - bool is_subpart, - uchar *min_value, uchar *max_value, - uint flags, - PARTITION_ITERATOR *part_iter) + bool is_subpart, + uint32 *store_length_array, /* ignored */ + uchar *min_value, uchar *max_value, + uint min_len, uint max_len, /* ignored */ + uint flags, + PARTITION_ITERATOR *part_iter) { Field *field; uint total_parts; partition_iter_func get_next_func; + DBUG_ENTER("get_part_iter_for_interval_via_walking"); + (void)store_length_array; + (void)min_len; + (void)max_len; + part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE; if (is_subpart) { field= part_info->subpart_field_array[0]; - total_parts= part_info->no_subparts; + total_parts= part_info->num_subparts; get_next_func= get_next_subpartition_via_walking; } else { field= part_info->part_field_array[0]; - total_parts= part_info->no_parts; + total_parts= part_info->num_parts; get_next_func= get_next_partition_via_walking; } @@ -6935,7 +7454,7 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info, if (!part_info->get_subpartition_id(part_info, &part_id)) { init_single_partition_iterator(part_id, part_iter); - return 1; /* Ok, iterator initialized */ + DBUG_RETURN(1); /* Ok, iterator initialized */ } } else @@ -6948,10 +7467,10 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info, if (!res) { init_single_partition_iterator(part_id, part_iter); - return 1; /* Ok, iterator initialized */ + DBUG_RETURN(1); /* Ok, iterator initialized */ } } - return 0; /* No partitions match */ + DBUG_RETURN(0); /* No partitions match */ } if ((field->real_maybe_null() && @@ -6959,7 +7478,7 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info, (!(flags & NO_MAX_RANGE) && *max_value))) || // X <? NULL (flags & (NO_MIN_RANGE | NO_MAX_RANGE))) // -inf at any bound { - return -1; /* Can't handle this interval, have to use all partitions */ + DBUG_RETURN(-1); /* Can't handle this interval, have to use all partitions */ } /* Get integers for left and right interval bound */ @@ -6978,7 +7497,7 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info, an empty interval by "wrapping around" a + 4G-1 + 1 = a. */ if ((ulonglong)b - (ulonglong)a == ~0ULL) - return -1; + DBUG_RETURN(-1); a += test(flags & NEAR_MIN); b += test(!(flags & NEAR_MAX)); @@ -7001,13 +7520,13 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info, - there are not many values to enumerate. */ if ((n_values > 2*total_parts) && n_values > MAX_RANGE_TO_WALK) - return -1; + DBUG_RETURN(-1); part_iter->field_vals.start= part_iter->field_vals.cur= a; part_iter->field_vals.end= b; part_iter->part_info= part_info; part_iter->get_next= get_next_func; - return 1; + DBUG_RETURN(1); } @@ -7055,8 +7574,9 @@ uint32 get_next_partition_id_range(PARTITION_ITERATOR* part_iter) DESCRIPTION This implementation of PARTITION_ITERATOR::get_next() is special for - LIST partitioning: it enumerates partition ids in - part_info->list_array[i] where i runs over [min_idx, max_idx] interval. + LIST partitioning: it enumerates partition ids in + part_info->list_array[i] (list_col_array[i] for COLUMN_LIST LIST + partitioning) where i runs over [min_idx, max_idx] interval. The function conforms to partition_iter_func type. RETURN @@ -7078,8 +7598,13 @@ uint32 get_next_partition_id_list(PARTITION_ITERATOR *part_iter) return NOT_A_PARTITION_ID; } else - return part_iter->part_info->list_array[part_iter-> - part_nums.cur++].partition_id; + { + partition_info *part_info= part_iter->part_info; + uint32 num_part= part_iter->part_nums.cur++; + return part_info->column_list ? + part_info->list_col_array[num_part].partition_id : + part_info->list_array[num_part].partition_id; + } } @@ -7220,5 +7745,17 @@ void create_subpartition_name(char *out, const char *in1, strxmov(out, in1, "#P#", transl_part_name, "#SP#", transl_subpart_name, "#REN#", NullS); } + +uint get_partition_field_store_length(Field *field) +{ + uint store_length; + + store_length= field->key_length(); + if (field->real_maybe_null()) + store_length+= HA_KEY_NULL_LENGTH; + if (field->real_type() == MYSQL_TYPE_VARCHAR) + store_length+= HA_KEY_BLOB_LENGTH; + return store_length; +} #endif diff --git a/sql/sql_partition.h b/sql/sql_partition.h index 0c47340016c..8d14a6f3ea4 100644 --- a/sql/sql_partition.h +++ b/sql/sql_partition.h @@ -1,7 +1,7 @@ #ifndef SQL_PARTITION_INCLUDED #define SQL_PARTITION_INCLUDED -/* Copyright (C) 2006 MySQL AB +/* Copyright (C) 2006-2008 MySQL AB, Sun Microsystems Inc. 2008-2009 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -68,15 +68,13 @@ int get_part_for_delete(const uchar *buf, const uchar *rec0, void prune_partition_set(const TABLE *table, part_id_range *part_spec); bool check_partition_info(partition_info *part_info,handlerton **eng_type, TABLE *table, handler *file, HA_CREATE_INFO *info); -void set_linear_hash_mask(partition_info *part_info, uint no_parts); +void set_linear_hash_mask(partition_info *part_info, uint num_parts); bool fix_partition_func(THD *thd, TABLE *table, bool create_table_ind); -char *generate_partition_syntax(partition_info *part_info, - uint *buf_length, bool use_sql_alloc, - bool show_partition_options); bool partition_key_modified(TABLE *table, const MY_BITMAP *fields); void get_partition_set(const TABLE *table, uchar *buf, const uint index, const key_range *key_spec, part_id_range *part_spec); +uint get_partition_field_store_length(Field *field); void get_full_part_id_from_key(const TABLE *table, uchar *buf, KEY *key_info, const key_range *key_spec, @@ -99,6 +97,7 @@ bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table, bool check_part_func_fields(Field **ptr, bool ok_with_charsets); bool field_is_partition_charset(Field *field); +Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs); /* A "Get next" function for partition iterator. @@ -176,11 +175,14 @@ typedef struct st_partition_iter SYNOPSIS get_partitions_in_range_iter() part_info Partitioning info - is_subpart - min_val Left edge, field value in opt_range_key format. - max_val Right edge, field value in opt_range_key format. + is_subpart + store_length_array Length of fields packed in opt_range_key format + min_val Left edge, field value in opt_range_key format + max_val Right edge, field value in opt_range_key format + min_len Length of minimum value + max_len Length of maximum value flags Some combination of NEAR_MIN, NEAR_MAX, NO_MIN_RANGE, - NO_MAX_RANGE. + NO_MAX_RANGE part_iter Iterator structure to be initialized DESCRIPTION @@ -194,8 +196,9 @@ typedef struct st_partition_iter The set of partitions is returned by initializing an iterator in *part_iter NOTES - There are currently two functions of this type: + There are currently three functions of this type: - get_part_iter_for_interval_via_walking + - get_part_iter_for_interval_cols_via_map - get_part_iter_for_interval_via_mapping RETURN @@ -206,7 +209,9 @@ typedef struct st_partition_iter typedef int (*get_partitions_in_range_iter)(partition_info *part_info, bool is_subpart, + uint32 *store_length_array, uchar *min_val, uchar *max_val, + uint min_len, uint max_len, uint flags, PARTITION_ITERATOR *part_iter); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 61fda3ec184..64674eb5c57 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1574,7 +1574,8 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, ((part_syntax= generate_partition_syntax(table->part_info, &part_syntax_len, FALSE, - show_table_options)))) + show_table_options, + NULL, NULL)))) { packet->append(STRING_WITH_LEN("\n/*!50100")); packet->append(part_syntax, part_syntax_len); @@ -4949,6 +4950,50 @@ static void store_schema_partitions_record(THD *thd, TABLE *schema_table, return; } +static int +get_partition_column_description(partition_info *part_info, + part_elem_value *list_value, + String &tmp_str) +{ + uint num_elements= part_info->part_field_list.elements; + uint i; + DBUG_ENTER("get_partition_column_description"); + + for (i= 0; i < num_elements; i++) + { + part_column_list_val *col_val= &list_value->col_val_array[i]; + if (col_val->max_value) + tmp_str.append(partition_keywords[PKW_MAXVALUE].str); + else if (col_val->null_value) + tmp_str.append("NULL"); + else + { + char buffer[MAX_KEY_LENGTH]; + String str(buffer, sizeof(buffer), &my_charset_bin); + Item *item= col_val->item_expression; + + if (!(item= part_info->get_column_item(item, + part_info->part_field_array[i]))) + { + DBUG_RETURN(1); + } + String *res= item->val_str(&str); + if (!res) + { + my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0)); + DBUG_RETURN(1); + } + if (item->result_type() == STRING_RESULT) + tmp_str.append("'"); + tmp_str.append(*res); + if (item->result_type() == STRING_RESULT) + tmp_str.append("'"); + } + if (i != num_elements - 1) + tmp_str.append(","); + } + DBUG_RETURN(0); +} static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables, TABLE *table, bool res, @@ -4959,6 +5004,7 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables, char buff[61]; String tmp_res(buff, sizeof(buff), cs); String tmp_str; + uint num_elements; TABLE *show_table= tables->table; handler *file; #ifdef WITH_PARTITION_STORAGE_ENGINE @@ -4991,12 +5037,18 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables, /* Partition method*/ switch (part_info->part_type) { case RANGE_PARTITION: - table->field[7]->store(partition_keywords[PKW_RANGE].str, - partition_keywords[PKW_RANGE].length, cs); - break; case LIST_PARTITION: - table->field[7]->store(partition_keywords[PKW_LIST].str, - partition_keywords[PKW_LIST].length, cs); + tmp_res.length(0); + if (part_info->part_type == RANGE_PARTITION) + tmp_res.append(partition_keywords[PKW_RANGE].str, + partition_keywords[PKW_RANGE].length); + else + tmp_res.append(partition_keywords[PKW_LIST].str, + partition_keywords[PKW_LIST].length); + if (part_info->column_list) + tmp_res.append(partition_keywords[PKW_COLUMNS].str, + partition_keywords[PKW_COLUMNS].length); + table->field[7]->store(tmp_res.ptr(), tmp_res.length(), cs); break; case HASH_PARTITION: tmp_res.length(0); @@ -5074,36 +5126,68 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables, /* Partition description */ if (part_info->part_type == RANGE_PARTITION) { - if (part_elem->range_value != LONGLONG_MAX) - table->field[11]->store((longlong) part_elem->range_value, FALSE); + if (part_info->column_list) + { + List_iterator<part_elem_value> list_val_it(part_elem->list_val_list); + part_elem_value *list_value= list_val_it++; + tmp_str.length(0); + if (get_partition_column_description(part_info, + list_value, + tmp_str)) + { + DBUG_RETURN(1); + } + table->field[11]->store(tmp_str.ptr(), tmp_str.length(), cs); + } else - table->field[11]->store(partition_keywords[PKW_MAXVALUE].str, + { + if (part_elem->range_value != LONGLONG_MAX) + table->field[11]->store((longlong) part_elem->range_value, FALSE); + else + table->field[11]->store(partition_keywords[PKW_MAXVALUE].str, partition_keywords[PKW_MAXVALUE].length, cs); + } table->field[11]->set_notnull(); } else if (part_info->part_type == LIST_PARTITION) { List_iterator<part_elem_value> list_val_it(part_elem->list_val_list); part_elem_value *list_value; - uint no_items= part_elem->list_val_list.elements; + uint num_items= part_elem->list_val_list.elements; tmp_str.length(0); tmp_res.length(0); if (part_elem->has_null_value) { tmp_str.append("NULL"); - if (no_items > 0) + if (num_items > 0) tmp_str.append(","); } while ((list_value= list_val_it++)) { - if (!list_value->unsigned_flag) - tmp_res.set(list_value->value, cs); + if (part_info->column_list) + { + if (part_info->part_field_list.elements > 1U) + tmp_str.append("("); + if (get_partition_column_description(part_info, + list_value, + tmp_str)) + { + DBUG_RETURN(1); + } + if (part_info->part_field_list.elements > 1U) + tmp_str.append(")"); + } else - tmp_res.set((ulonglong)list_value->value, cs); - tmp_str.append(tmp_res); - if (--no_items != 0) + { + if (!list_value->unsigned_flag) + tmp_res.set(list_value->value, cs); + else + tmp_res.set((ulonglong)list_value->value, cs); + tmp_str.append(tmp_res); + } + if (--num_items != 0) tmp_str.append(","); - }; + } table->field[11]->store(tmp_str.ptr(), tmp_str.length(), cs); table->field[11]->set_notnull(); } @@ -6606,7 +6690,7 @@ ST_FIELD_INFO partitions_fields_info[]= (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), 0, OPEN_FULL_TABLE}, {"SUBPARTITION_ORDINAL_POSITION", 21 , MYSQL_TYPE_LONGLONG, 0, (MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), 0, OPEN_FULL_TABLE}, - {"PARTITION_METHOD", 12, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE}, + {"PARTITION_METHOD", 18, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE}, {"SUBPARTITION_METHOD", 12, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE}, {"PARTITION_EXPRESSION", 65535, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE}, {"SUBPARTITION_EXPRESSION", 65535, MYSQL_TYPE_STRING, 0, 1, 0, diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 4d474738843..21a08383fa7 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1585,7 +1585,9 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags) { if (!(part_syntax_buf= generate_partition_syntax(part_info, &syntax_len, - TRUE, TRUE))) + TRUE, TRUE, + lpt->create_info, + lpt->alter_info))) { DBUG_RETURN(TRUE); } @@ -1677,7 +1679,9 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags) char *tmp_part_syntax_str; if (!(part_syntax_buf= generate_partition_syntax(part_info, &syntax_len, - TRUE, TRUE))) + TRUE, TRUE, + lpt->create_info, + lpt->alter_info))) { error= 1; goto err; @@ -2498,6 +2502,39 @@ int prepare_create_field(Create_field *sql_field, DBUG_RETURN(0); } + +/* + Get character set from field object generated by parser using + default values when not set. + + SYNOPSIS + get_sql_field_charset() + sql_field The sql_field object + create_info Info generated by parser + + RETURN VALUES + cs Character set +*/ + +CHARSET_INFO* get_sql_field_charset(Create_field *sql_field, + HA_CREATE_INFO *create_info) +{ + CHARSET_INFO *cs= sql_field->charset; + + if (!cs) + cs= create_info->default_table_charset; + /* + table_charset is set only in ALTER TABLE t1 CONVERT TO CHARACTER SET csname + if we want change character set for all varchar/char columns. + But the table charset must not affect the BLOB fields, so don't + allow to change my_charset_bin to somethig else. + */ + if (create_info->table_charset && cs != &my_charset_bin) + cs= create_info->table_charset; + return cs; +} + + /* Preparation for table creation @@ -2561,18 +2598,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, executing a prepared statement for the second time. */ sql_field->length= sql_field->char_length; - if (!sql_field->charset) - sql_field->charset= create_info->default_table_charset; - /* - table_charset is set in ALTER TABLE if we want change character set - for all varchar/char columns. - But the table charset must not affect the BLOB fields, so don't - allow to change my_charset_bin to somethig else. - */ - if (create_info->table_charset && sql_field->charset != &my_charset_bin) - sql_field->charset= create_info->table_charset; - - save_cs= sql_field->charset; + save_cs= sql_field->charset= get_sql_field_charset(sql_field, + create_info); if ((sql_field->flags & BINCMP_FLAG) && !(sql_field->charset= get_charset_by_csname(sql_field->charset->csname, MY_CS_BINSORT,MYF(0)))) @@ -3622,6 +3649,9 @@ bool mysql_create_table_no_lock(THD *thd, } if (check_engine(thd, table_name, create_info)) DBUG_RETURN(TRUE); + + set_table_default_charset(thd, create_info, (char*) db); + db_options= create_info->table_options; if (create_info->row_type == ROW_TYPE_DYNAMIC) db_options|=HA_OPTION_PACK_RECORD; @@ -3715,7 +3745,7 @@ bool mysql_create_table_no_lock(THD *thd, ha_resolve_storage_engine_name(part_info->default_engine_type), ha_resolve_storage_engine_name(create_info->db_type))); if (part_info->check_partition_info(thd, &engine_type, file, - create_info, TRUE)) + create_info, FALSE)) goto err; part_info->default_engine_type= engine_type; @@ -3725,7 +3755,9 @@ bool mysql_create_table_no_lock(THD *thd, */ if (!(part_syntax_buf= generate_partition_syntax(part_info, &syntax_len, - TRUE, TRUE))) + TRUE, TRUE, + create_info, + alter_info))) goto err; part_info->part_info_string= part_syntax_buf; part_info->part_info_len= syntax_len; @@ -3751,9 +3783,9 @@ bool mysql_create_table_no_lock(THD *thd, creates a proper .par file. The current part_info object is only used to create the frm-file and .par-file. */ - if (part_info->use_default_no_partitions && - part_info->no_parts && - (int)part_info->no_parts != + if (part_info->use_default_num_partitions && + part_info->num_parts && + (int)part_info->num_parts != file->get_default_no_partitions(create_info)) { uint i; @@ -3764,13 +3796,13 @@ bool mysql_create_table_no_lock(THD *thd, (part_it++)->part_state= PART_TO_BE_DROPPED; } else if (part_info->is_sub_partitioned() && - part_info->use_default_no_subpartitions && - part_info->no_subparts && - (int)part_info->no_subparts != + part_info->use_default_num_subpartitions && + part_info->num_subparts && + (int)part_info->num_subparts != file->get_default_no_partitions(create_info)) { DBUG_ASSERT(thd->lex->sql_command != SQLCOM_CREATE_TABLE); - part_info->no_subparts= file->get_default_no_partitions(create_info); + part_info->num_subparts= file->get_default_no_partitions(create_info); } } else if (create_info->db_type != engine_type) @@ -3792,8 +3824,6 @@ bool mysql_create_table_no_lock(THD *thd, } #endif - set_table_default_charset(thd, create_info, (char*) db); - if (mysql_prepare_create_table(thd, create_info, alter_info, internal_tmp_table, &db_options, file, @@ -4583,11 +4613,11 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0)); DBUG_RETURN(TRUE); } - uint no_parts_found; - uint no_parts_opt= alter_info->partition_names.elements; - no_parts_found= set_part_state(alter_info, table->table->part_info, - PART_ADMIN); - if (no_parts_found != no_parts_opt && + uint num_parts_found; + uint num_parts_opt= alter_info->partition_names.elements; + num_parts_found= set_part_state(alter_info, table->table->part_info, + PART_ADMIN); + if (num_parts_found != num_parts_opt && (!(alter_info->flags & ALTER_ALL_PARTITION))) { char buff[FN_REFLEN + MYSQL_ERRMSG_SIZE]; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index bf8f7a0ebe7..a8813fedd27 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1,4 +1,4 @@ -/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright 2000-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -611,6 +611,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token COLLATE_SYM /* SQL-2003-R */ %token COLLATION_SYM /* SQL-2003-N */ %token COLUMNS +%token COLUMN_LIST_SYM %token COLUMN_SYM /* SQL-2003-R */ %token COLUMN_NAME_SYM /* SQL-2003-N */ %token COMMENT_SYM @@ -1187,9 +1188,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %type <ulonglong_number> ulonglong_num real_ulonglong_num size_number -%type <p_elem_value> - part_bit_expr - %type <lock_type> replace_lock_option opt_low_priority insert_lock_option load_data_lock @@ -1325,6 +1323,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); view_check_option trigger_tail sp_tail sf_tail udf_tail event_tail install uninstall partition_entry binlog_base64_event init_key_options key_options key_opts key_opt key_using_alg + part_column_list server_def server_options_list server_option definer_opt no_definer definer END_OF_INPUT @@ -4062,25 +4061,28 @@ partition_entry: ; partition: - BY part_type_def opt_no_parts opt_sub_part part_defs + BY part_type_def opt_num_parts opt_sub_part part_defs ; part_type_def: opt_linear KEY_SYM '(' part_field_list ')' { - LEX *lex= Lex; - lex->part_info->list_of_part_fields= TRUE; - lex->part_info->part_type= HASH_PARTITION; + partition_info *part_info= Lex->part_info; + part_info->list_of_part_fields= TRUE; + part_info->column_list= FALSE; + part_info->part_type= HASH_PARTITION; } | opt_linear HASH_SYM { Lex->part_info->part_type= HASH_PARTITION; } part_func {} - | RANGE_SYM + | RANGE_SYM part_func { Lex->part_info->part_type= RANGE_PARTITION; } - part_func {} - | LIST_SYM + | RANGE_SYM part_column_list + { Lex->part_info->part_type= RANGE_PARTITION; } + | LIST_SYM part_func + { Lex->part_info->part_type= LIST_PARTITION; } + | LIST_SYM part_column_list { Lex->part_info->part_type= LIST_PARTITION; } - part_func {} ; opt_linear: @@ -4102,59 +4104,66 @@ part_field_item_list: part_field_item: ident { - if (Lex->part_info->part_field_list.push_back($1.str)) + partition_info *part_info= Lex->part_info; + part_info->num_columns++; + if (part_info->part_field_list.push_back($1.str)) { mem_alloc_error(1); MYSQL_YYABORT; } + if (part_info->num_columns > MAX_REF_PARTS) + { + my_error(ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR, MYF(0), + "list of partition fields"); + MYSQL_YYABORT; + } + } + ; + +part_column_list: + COLUMN_LIST_SYM '(' part_field_list ')' + { + partition_info *part_info= Lex->part_info; + part_info->column_list= TRUE; + part_info->list_of_part_fields= TRUE; } ; + part_func: '(' remember_name part_func_expr remember_end ')' { - LEX *lex= Lex; - uint expr_len= (uint)($4 - $2) - 1; - lex->part_info->list_of_part_fields= FALSE; - lex->part_info->part_expr= $3; - char *func_string= (char*) sql_memdup($2+1, expr_len); - if (func_string == NULL) - MYSQL_YYABORT; - lex->part_info->part_func_string= func_string; - lex->part_info->part_func_len= expr_len; + partition_info *part_info= Lex->part_info; + if (part_info->set_part_expr($2+1, $3, $4, FALSE)) + { MYSQL_YYABORT; } + part_info->num_columns= 1; + part_info->column_list= FALSE; } ; sub_part_func: '(' remember_name part_func_expr remember_end ')' { - LEX *lex= Lex; - uint expr_len= (uint)($4 - $2) - 1; - lex->part_info->list_of_subpart_fields= FALSE; - lex->part_info->subpart_expr= $3; - char *func_string= (char*) sql_memdup($2+1, expr_len); - if (func_string == NULL) - MYSQL_YYABORT; - lex->part_info->subpart_func_string= func_string; - lex->part_info->subpart_func_len= expr_len; + if (Lex->part_info->set_part_expr($2+1, $3, $4, TRUE)) + { MYSQL_YYABORT; } } ; -opt_no_parts: +opt_num_parts: /* empty */ {} | PARTITIONS_SYM real_ulong_num { - uint no_parts= $2; - LEX *lex= Lex; - if (no_parts == 0) + uint num_parts= $2; + partition_info *part_info= Lex->part_info; + if (num_parts == 0) { my_error(ER_NO_PARTS_ERROR, MYF(0), "partitions"); MYSQL_YYABORT; } - lex->part_info->no_parts= no_parts; - lex->part_info->use_default_no_partitions= FALSE; + part_info->num_parts= num_parts; + part_info->use_default_num_partitions= FALSE; } ; @@ -4162,15 +4171,15 @@ opt_sub_part: /* empty */ {} | SUBPARTITION_SYM BY opt_linear HASH_SYM sub_part_func { Lex->part_info->subpart_type= HASH_PARTITION; } - opt_no_subparts {} + opt_num_subparts {} | SUBPARTITION_SYM BY opt_linear KEY_SYM '(' sub_part_field_list ')' { - LEX *lex= Lex; - lex->part_info->subpart_type= HASH_PARTITION; - lex->part_info->list_of_subpart_fields= TRUE; + partition_info *part_info= Lex->part_info; + part_info->subpart_type= HASH_PARTITION; + part_info->list_of_subpart_fields= TRUE; } - opt_no_subparts {} + opt_num_subparts {} ; sub_part_field_list: @@ -4181,11 +4190,18 @@ sub_part_field_list: sub_part_field_item: ident { - if (Lex->part_info->subpart_field_list.push_back($1.str)) + partition_info *part_info= Lex->part_info; + if (part_info->subpart_field_list.push_back($1.str)) { mem_alloc_error(1); MYSQL_YYABORT; } + if (part_info->subpart_field_list.elements > MAX_REF_PARTS) + { + my_error(ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR, MYF(0), + "list of subpartition fields"); + MYSQL_YYABORT; + } } ; @@ -4205,33 +4221,46 @@ part_func_expr: } ; -opt_no_subparts: +opt_num_subparts: /* empty */ {} | SUBPARTITIONS_SYM real_ulong_num { - uint no_parts= $2; + uint num_parts= $2; LEX *lex= Lex; - if (no_parts == 0) + if (num_parts == 0) { my_error(ER_NO_PARTS_ERROR, MYF(0), "subpartitions"); MYSQL_YYABORT; } - lex->part_info->no_subparts= no_parts; - lex->part_info->use_default_no_subpartitions= FALSE; + lex->part_info->num_subparts= num_parts; + lex->part_info->use_default_num_subpartitions= FALSE; } ; part_defs: /* empty */ - {} + { + partition_info *part_info= Lex->part_info; + if (part_info->part_type == RANGE_PARTITION) + { + my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), + "RANGE"); + MYSQL_YYABORT; + } + else if (part_info->part_type == LIST_PARTITION) + { + my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), + "LIST"); + MYSQL_YYABORT; + } + } | '(' part_def_list ')' { - LEX *lex= Lex; - partition_info *part_info= lex->part_info; + partition_info *part_info= Lex->part_info; uint count_curr_parts= part_info->partitions.elements; - if (part_info->no_parts != 0) + if (part_info->num_parts != 0) { - if (part_info->no_parts != + if (part_info->num_parts != count_curr_parts) { my_parse_error(ER(ER_PARTITION_WRONG_NO_PART_ERROR)); @@ -4240,7 +4269,7 @@ part_defs: } else if (count_curr_parts > 0) { - part_info->no_parts= count_curr_parts; + part_info->num_parts= count_curr_parts; } part_info->count_curr_subparts= 0; } @@ -4254,8 +4283,7 @@ part_def_list: part_definition: PARTITION_SYM { - LEX *lex= Lex; - partition_info *part_info= lex->part_info; + partition_info *part_info= Lex->part_info; partition_element *p_elem= new partition_element(); if (!p_elem || part_info->partitions.push_back(p_elem)) @@ -4267,7 +4295,7 @@ part_definition: part_info->curr_part_elem= p_elem; part_info->current_partition= p_elem; part_info->use_default_partitions= FALSE; - part_info->use_default_no_partitions= FALSE; + part_info->use_default_num_partitions= FALSE; } part_name opt_part_values @@ -4279,8 +4307,7 @@ part_definition: part_name: ident { - LEX *lex= Lex; - partition_info *part_info= lex->part_info; + partition_info *part_info= Lex->part_info; partition_element *p_elem= part_info->curr_part_elem; p_elem->partition_name= $1.str; } @@ -4290,15 +4317,16 @@ opt_part_values: /* empty */ { LEX *lex= Lex; + partition_info *part_info= lex->part_info; if (! lex->is_partition_management()) { - if (lex->part_info->part_type == RANGE_PARTITION) + if (part_info->part_type == RANGE_PARTITION) { my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0), "RANGE", "LESS THAN"); MYSQL_YYABORT; } - if (lex->part_info->part_type == LIST_PARTITION) + if (part_info->part_type == LIST_PARTITION) { my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0), "LIST", "IN"); @@ -4306,14 +4334,15 @@ opt_part_values: } } else - lex->part_info->part_type= HASH_PARTITION; + part_info->part_type= HASH_PARTITION; } - | VALUES LESS_SYM THAN_SYM part_func_max + | VALUES LESS_SYM THAN_SYM { LEX *lex= Lex; + partition_info *part_info= lex->part_info; if (! lex->is_partition_management()) { - if (Lex->part_info->part_type != RANGE_PARTITION) + if (part_info->part_type != RANGE_PARTITION) { my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0), "RANGE", "LESS THAN"); @@ -4321,153 +4350,183 @@ opt_part_values: } } else - lex->part_info->part_type= RANGE_PARTITION; + part_info->part_type= RANGE_PARTITION; } - | VALUES IN_SYM '(' part_list_func ')' + part_func_max {} + | VALUES IN_SYM { LEX *lex= Lex; + partition_info *part_info= lex->part_info; if (! lex->is_partition_management()) { - if (Lex->part_info->part_type != LIST_PARTITION) + if (part_info->part_type != LIST_PARTITION) { my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0), - "LIST", "IN"); + "LIST", "IN"); MYSQL_YYABORT; } } else - lex->part_info->part_type= LIST_PARTITION; + part_info->part_type= LIST_PARTITION; } + part_values_in {} ; part_func_max: - max_value_sym + MAX_VALUE_SYM { - LEX *lex= Lex; - if (lex->part_info->defined_max_value) + partition_info *part_info= Lex->part_info; + + if (part_info->num_columns && + part_info->num_columns != 1U) { - my_parse_error(ER(ER_PARTITION_MAXVALUE_ERROR)); + part_info->print_debug("Kilroy II", NULL); + my_parse_error(ER(ER_PARTITION_COLUMN_LIST_ERROR)); MYSQL_YYABORT; } - lex->part_info->defined_max_value= TRUE; - lex->part_info->curr_part_elem->max_value= TRUE; - lex->part_info->curr_part_elem->range_value= LONGLONG_MAX; - } - | part_range_func - { - if (Lex->part_info->defined_max_value) + else + part_info->num_columns= 1U; + if (part_info->init_column_part()) { - my_parse_error(ER(ER_PARTITION_MAXVALUE_ERROR)); MYSQL_YYABORT; } - if (Lex->part_info->curr_part_elem->has_null_value) + if (part_info->add_max_value()) { - my_parse_error(ER(ER_NULL_IN_VALUES_LESS_THAN)); MYSQL_YYABORT; } } + | part_value_item {} ; -max_value_sym: - MAX_VALUE_SYM - | '(' MAX_VALUE_SYM ')' - ; - -part_range_func: - '(' part_bit_expr ')' +part_values_in: + part_value_item + { + LEX *lex= Lex; + partition_info *part_info= lex->part_info; + part_info->print_debug("part_values_in: part_value_item", NULL); + + if (part_info->num_columns != 1U) + { + if (!lex->is_partition_management() || + part_info->num_columns == 0 || + part_info->num_columns > MAX_REF_PARTS) + { + part_info->print_debug("Kilroy III", NULL); + my_parse_error(ER(ER_PARTITION_COLUMN_LIST_ERROR)); + MYSQL_YYABORT; + } + /* + Reorganize the current large array into a list of small + arrays with one entry in each array. This can happen + in the first partition of an ALTER TABLE statement where + we ADD or REORGANIZE partitions. Also can only happen + for LIST partitions. + */ + if (part_info->reorganize_into_single_field_col_val()) + { + MYSQL_YYABORT; + } + } + } + | '(' part_value_list ')' { partition_info *part_info= Lex->part_info; - if (!($2->unsigned_flag)) - part_info->curr_part_elem->signed_flag= TRUE; - part_info->curr_part_elem->range_value= $2->value; + if (part_info->num_columns < 2U) + { + my_parse_error(ER(ER_ROW_SINGLE_PARTITION_FIELD_ERROR)); + MYSQL_YYABORT; + } } ; -part_list_func: - part_list_item {} - | part_list_func ',' part_list_item {} +part_value_list: + part_value_item {} + | part_value_list ',' part_value_item {} ; -part_list_item: - part_bit_expr +part_value_item: + '(' { - part_elem_value *value_ptr= $1; partition_info *part_info= Lex->part_info; - if (!value_ptr->unsigned_flag) - part_info->curr_part_elem->signed_flag= TRUE; - if (!value_ptr->null_value && - part_info->curr_part_elem-> - list_val_list.push_back(value_ptr)) + part_info->print_debug("( part_value_item", NULL); + /* Initialisation code needed for each list of value expressions */ + if (!(part_info->part_type == LIST_PARTITION && + part_info->num_columns == 1U) && + part_info->init_column_part()) { - mem_alloc_error(sizeof(part_elem_value)); MYSQL_YYABORT; } } - ; - -part_bit_expr: - bit_expr + part_value_item_list {} + ')' { - Item *part_expr= $1; - THD *thd= YYTHD; - LEX *lex= thd->lex; - Name_resolution_context *context= &lex->current_select->context; - TABLE_LIST *save_list= context->table_list; - const char *save_where= thd->where; - - context->table_list= 0; - thd->where= "partition function"; - - part_elem_value *value_ptr= - (part_elem_value*)sql_alloc(sizeof(part_elem_value)); - if (!value_ptr) + LEX *lex= Lex; + partition_info *part_info= Lex->part_info; + part_info->print_debug(") part_value_item", NULL); + if (part_info->num_columns == 0) + part_info->num_columns= part_info->curr_list_object; + if (part_info->num_columns != part_info->curr_list_object) { - mem_alloc_error(sizeof(part_elem_value)); + /* + All value items lists must be of equal length, in some cases + which is covered by the above if-statement we don't know yet + how many columns is in the partition so the assignment above + ensures that we only report errors when we know we have an + error. + */ + part_info->print_debug("Kilroy I", NULL); + my_parse_error(ER(ER_PARTITION_COLUMN_LIST_ERROR)); MYSQL_YYABORT; } - if (part_expr->walk(&Item::check_partition_func_processor, 0, - NULL)) + part_info->curr_list_object= 0; + } + ; + +part_value_item_list: + part_value_expr_item {} + | part_value_item_list ',' part_value_expr_item {} + ; + +part_value_expr_item: + MAX_VALUE_SYM + { + partition_info *part_info= Lex->part_info; + part_column_list_val *col_val; + if (part_info->part_type == LIST_PARTITION) { - my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0)); + my_parse_error(ER(ER_MAXVALUE_IN_VALUES_IN)); MYSQL_YYABORT; } - if (part_expr->fix_fields(YYTHD, (Item**)0) || - ((context->table_list= save_list), FALSE) || - (!part_expr->const_item()) || - (!lex->safe_to_cache_query)) + if (part_info->add_max_value()) { - my_error(ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR, MYF(0)); MYSQL_YYABORT; } - thd->where= save_where; - value_ptr->value= part_expr->val_int(); - value_ptr->unsigned_flag= TRUE; - if (!part_expr->unsigned_flag && - value_ptr->value < 0) - value_ptr->unsigned_flag= FALSE; - if ((value_ptr->null_value= part_expr->null_value)) + } + | bit_expr + { + LEX *lex= Lex; + partition_info *part_info= lex->part_info; + Item *part_expr= $1; + + if (!lex->safe_to_cache_query) { - if (Lex->part_info->curr_part_elem->has_null_value) - { - my_error(ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR, MYF(0)); - MYSQL_YYABORT; - } - Lex->part_info->curr_part_elem->has_null_value= TRUE; + my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0)); + MYSQL_YYABORT; } - else if (part_expr->result_type() != INT_RESULT) + if (part_info->add_column_list_value(YYTHD, part_expr)) { - my_parse_error(ER(ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR)); MYSQL_YYABORT; } - $$= value_ptr; } ; + opt_sub_partition: /* empty */ { - if (Lex->part_info->no_subparts != 0 && - !Lex->part_info->use_default_subpartitions) + partition_info *part_info= Lex->part_info; + if (part_info->num_subparts != 0 && + !part_info->use_default_subpartitions) { /* We come here when we have defined subpartitions on the first @@ -4479,11 +4538,10 @@ opt_sub_partition: } | '(' sub_part_list ')' { - LEX *lex= Lex; - partition_info *part_info= lex->part_info; - if (part_info->no_subparts != 0) + partition_info *part_info= Lex->part_info; + if (part_info->num_subparts != 0) { - if (part_info->no_subparts != + if (part_info->num_subparts != part_info->count_curr_subparts) { my_parse_error(ER(ER_PARTITION_WRONG_NO_SUBPART_ERROR)); @@ -4497,7 +4555,7 @@ opt_sub_partition: my_parse_error(ER(ER_PARTITION_WRONG_NO_SUBPART_ERROR)); MYSQL_YYABORT; } - part_info->no_subparts= part_info->count_curr_subparts; + part_info->num_subparts= part_info->count_curr_subparts; } part_info->count_curr_subparts= 0; } @@ -4511,8 +4569,7 @@ sub_part_list: sub_part_definition: SUBPARTITION_SYM { - LEX *lex= Lex; - partition_info *part_info= lex->part_info; + partition_info *part_info= Lex->part_info; partition_element *curr_part= part_info->current_partition; partition_element *sub_p_elem= new partition_element(curr_part); if (part_info->use_default_subpartitions && @@ -4540,7 +4597,7 @@ sub_part_definition: } part_info->curr_part_elem= sub_p_elem; part_info->use_default_subpartitions= FALSE; - part_info->use_default_no_subpartitions= FALSE; + part_info->use_default_num_subpartitions= FALSE; part_info->count_curr_subparts++; } sub_name opt_part_options {} @@ -4566,9 +4623,9 @@ opt_part_option: { Lex->part_info->curr_part_elem->tablespace_name= $3.str; } | opt_storage ENGINE_SYM opt_equal storage_engines { - LEX *lex= Lex; - lex->part_info->curr_part_elem->engine_type= $4; - lex->part_info->default_engine_type= $4; + partition_info *part_info= Lex->part_info; + part_info->curr_part_elem->engine_type= $4; + part_info->default_engine_type= $4; } | NODEGROUP_SYM opt_equal real_ulong_num { Lex->part_info->curr_part_elem->nodegroup_id= (uint16) $3; } @@ -5975,7 +6032,7 @@ alter_commands: LEX *lex= Lex; lex->alter_info.flags|= ALTER_COALESCE_PARTITION; lex->no_write_to_binlog= $3; - lex->alter_info.no_parts= $4; + lex->alter_info.num_parts= $4; } | TRUNCATE_SYM PARTITION_SYM all_or_alt_part_name_list { @@ -6024,12 +6081,11 @@ add_part_extra: | '(' part_def_list ')' { LEX *lex= Lex; - lex->part_info->no_parts= lex->part_info->partitions.elements; + lex->part_info->num_parts= lex->part_info->partitions.elements; } | PARTITIONS_SYM real_ulong_num { - LEX *lex= Lex; - lex->part_info->no_parts= $2; + Lex->part_info->num_parts= $2; } ; @@ -6059,8 +6115,8 @@ reorg_parts_rule: } INTO '(' part_def_list ')' { - LEX *lex= Lex; - lex->part_info->no_parts= lex->part_info->partitions.elements; + partition_info *part_info= Lex->part_info; + part_info->num_parts= part_info->partitions.elements; } ; @@ -11880,7 +11936,6 @@ keyword_sp: | MAX_SIZE_SYM {} | MAX_UPDATES_PER_HOUR {} | MAX_USER_CONNECTIONS_SYM {} - | MAX_VALUE_SYM {} | MEDIUM_SYM {} | MEMORY_SYM {} | MERGE_SYM {} |