summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorMats Kindahl <mats@sun.com>2008-10-23 21:27:09 +0200
committerMats Kindahl <mats@sun.com>2008-10-23 21:27:09 +0200
commit32c161f3ea7314051090fb02eca03ef347394010 (patch)
tree09212d65be943c6c5dfd6d6a834841683d81f9f8 /sql
parent3be6d967c5d04fa6dbeab1c25f28673d3abf8433 (diff)
parente291aab7da9587f742291e7cc5e10e568846066e (diff)
downloadmariadb-git-32c161f3ea7314051090fb02eca03ef347394010.tar.gz
Merging 5.1 main into 5.1-rpl
Diffstat (limited to 'sql')
-rw-r--r--sql/event_db_repository.cc14
-rw-r--r--sql/event_parse_data.cc44
-rw-r--r--sql/event_parse_data.h10
-rw-r--r--sql/field.cc15
-rw-r--r--sql/field.h10
-rw-r--r--sql/ha_ndbcluster.cc25
-rw-r--r--sql/ha_partition.cc571
-rw-r--r--sql/ha_partition.h71
-rw-r--r--sql/handler.cc115
-rw-r--r--sql/handler.h28
-rw-r--r--sql/item.cc26
-rw-r--r--sql/item.h2
-rw-r--r--sql/item_func.cc28
-rw-r--r--sql/item_func.h15
-rw-r--r--sql/lock.cc2
-rw-r--r--sql/log.cc7
-rw-r--r--sql/log_event.cc31
-rw-r--r--sql/mysql_priv.h5
-rw-r--r--sql/mysqld.cc66
-rw-r--r--sql/opt_range.cc84
-rw-r--r--sql/parse_file.cc24
-rw-r--r--sql/parse_file.h5
-rw-r--r--sql/partition_info.h5
-rw-r--r--sql/set_var.cc17
-rw-r--r--sql/sp_head.cc13
-rw-r--r--sql/sql_base.cc115
-rw-r--r--sql/sql_cache.cc3
-rw-r--r--sql/sql_class.cc25
-rw-r--r--sql/sql_class.h4
-rw-r--r--sql/sql_db.cc17
-rw-r--r--sql/sql_insert.cc27
-rw-r--r--sql/sql_lex.cc2
-rw-r--r--sql/sql_lex.h2
-rw-r--r--sql/sql_load.cc4
-rw-r--r--sql/sql_parse.cc35
-rw-r--r--sql/sql_partition.cc283
-rw-r--r--sql/sql_select.cc11
-rw-r--r--sql/sql_show.cc21
-rw-r--r--sql/sql_table.cc132
-rw-r--r--sql/sql_union.cc1
-rw-r--r--sql/sql_update.cc35
-rw-r--r--sql/sql_view.cc4
-rw-r--r--sql/sql_yacc.yy13
-rw-r--r--sql/table.cc24
-rw-r--r--sql/table.h11
45 files changed, 1497 insertions, 505 deletions
diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc
index 382fd024aa8..c26b740d24a 100644
--- a/sql/event_db_repository.cc
+++ b/sql/event_db_repository.cc
@@ -185,6 +185,8 @@ mysql_event_fill_row(THD *thd,
DBUG_PRINT("info", ("dbname=[%s]", et->dbname.str));
DBUG_PRINT("info", ("name =[%s]", et->name.str));
+ DBUG_ASSERT(et->on_completion != Event_parse_data::ON_COMPLETION_DEFAULT);
+
if (table->s->fields < ET_FIELD_COUNT)
{
/*
@@ -745,6 +747,18 @@ Event_db_repository::update_event(THD *thd, Event_parse_data *parse_data,
store_record(table,record[1]);
+ /*
+ We check whether ALTER EVENT was given dates that are in the past.
+ However to know how to react, we need the ON COMPLETION type. The
+ check is deferred to this point because by now we have the previous
+ setting (from the event-table) to fall back on if nothing was specified
+ in the ALTER EVENT-statement.
+ */
+
+ if (parse_data->check_dates(thd,
+ (int) table->field[ET_FIELD_ON_COMPLETION]->val_int()))
+ goto end;
+
/* Don't update create on row update. */
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
diff --git a/sql/event_parse_data.cc b/sql/event_parse_data.cc
index e87e4593f8f..df419e92d0d 100644
--- a/sql/event_parse_data.cc
+++ b/sql/event_parse_data.cc
@@ -45,7 +45,7 @@ Event_parse_data::new_instance(THD *thd)
*/
Event_parse_data::Event_parse_data()
- :on_completion(Event_parse_data::ON_COMPLETION_DROP),
+ :on_completion(Event_parse_data::ON_COMPLETION_DEFAULT),
status(Event_parse_data::ENABLED),
do_not_create(FALSE),
body_changed(FALSE),
@@ -114,6 +114,12 @@ Event_parse_data::check_if_in_the_past(THD *thd, my_time_t ltime_utc)
if (ltime_utc >= (my_time_t) thd->query_start())
return;
+ /*
+ We'll come back later when we have the real on_completion value
+ */
+ if (on_completion == Event_parse_data::ON_COMPLETION_DEFAULT)
+ return;
+
if (on_completion == Event_parse_data::ON_COMPLETION_DROP)
{
switch (thd->lex->sql_command) {
@@ -142,6 +148,42 @@ Event_parse_data::check_if_in_the_past(THD *thd, my_time_t ltime_utc)
/*
+ Check time/dates in ALTER EVENT
+
+ We check whether ALTER EVENT was given dates that are in the past.
+ However to know how to react, we need the ON COMPLETION type. Hence,
+ the check is deferred until we have the previous ON COMPLETION type
+ from the event-db to fall back on if nothing was specified in the
+ ALTER EVENT-statement.
+
+ SYNOPSIS
+ Event_parse_data::check_dates()
+ thd Thread
+ on_completion ON COMPLETION value currently in event-db.
+ Will be overridden by value in ALTER EVENT if given.
+
+ RETURN VALUE
+ TRUE an error occurred, do not ALTER
+ FALSE OK
+*/
+
+bool
+Event_parse_data::check_dates(THD *thd, int previous_on_completion)
+{
+ if (on_completion == Event_parse_data::ON_COMPLETION_DEFAULT)
+ {
+ on_completion= previous_on_completion;
+ if (!ends_null)
+ check_if_in_the_past(thd, ends);
+ if (!execute_at_null)
+ check_if_in_the_past(thd, execute_at);
+ }
+ return do_not_create;
+}
+
+
+
+/*
Sets time for execution for one-time event.
SYNOPSIS
diff --git a/sql/event_parse_data.h b/sql/event_parse_data.h
index 221bf92664f..87a800c2078 100644
--- a/sql/event_parse_data.h
+++ b/sql/event_parse_data.h
@@ -38,7 +38,12 @@ public:
enum enum_on_completion
{
- ON_COMPLETION_DROP = 1,
+ /*
+ On CREATE EVENT, DROP is the DEFAULT as per the docs.
+ On ALTER EVENT, "no change" is the DEFAULT.
+ */
+ ON_COMPLETION_DEFAULT = 0,
+ ON_COMPLETION_DROP,
ON_COMPLETION_PRESERVE
};
@@ -80,6 +85,9 @@ public:
bool
check_parse_data(THD *thd);
+ bool
+ check_dates(THD *thd, int previous_on_completion);
+
private:
void
diff --git a/sql/field.cc b/sql/field.cc
index 70cc14bda5f..16bf0fdb070 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -6610,7 +6610,8 @@ String *Field_string::val_str(String *val_buffer __attribute__((unused)),
uint length;
if (table->in_use->variables.sql_mode &
MODE_PAD_CHAR_TO_FULL_LENGTH)
- length= my_charpos(field_charset, ptr, ptr + field_length, field_length);
+ length= my_charpos(field_charset, ptr, ptr + field_length,
+ field_length / field_charset->mbmaxlen);
else
length= field_charset->cset->lengthsp(field_charset, (const char*) ptr,
field_length);
@@ -7698,8 +7699,18 @@ int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs)
return 0;
}
- if (from == value.ptr())
+ /*
+ If the 'from' address is in the range of the temporary 'value'-
+ object we need to copy the content to a different location or it will be
+ invalidated when the 'value'-object is reallocated to make room for
+ the new character set.
+ */
+ if (from >= value.ptr() && from <= value.ptr()+value.length())
{
+ /*
+ If content of the 'from'-address is cached in the 'value'-object
+ it is possible that the content needs a character conversion.
+ */
uint32 dummy_offset;
if (!String::needs_conversion(length, cs, field_charset, &dummy_offset))
{
diff --git a/sql/field.h b/sql/field.h
index 0d0d7fba116..aa69fea6bdd 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1597,8 +1597,16 @@ private:
class Field_blob :public Field_longstr {
protected:
+ /**
+ The number of bytes used to represent the length of the blob.
+ */
uint packlength;
- String value; // For temporaries
+
+ /**
+ The 'value'-object is a cache fronting the storage engine.
+ */
+ String value;
+
public:
Field_blob(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index c97ed6fada8..3454f3558e8 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -9977,34 +9977,23 @@ bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *create_info,
if (table_changes != IS_EQUAL_YES)
DBUG_RETURN(COMPATIBLE_DATA_NO);
- /**
- * Changing from/to primary key
- *
- * This is _not_ correct, but check_if_incompatible_data-interface
- * doesnt give more info, so I guess that we can't do any
- * online add index if not using primary key
- *
- * This as mysql will handle a unique not null index as primary
- * even wo/ user specifiying it... :-(
- *
- */
- if ((table_share->primary_key == MAX_KEY && pk) ||
- (table_share->primary_key != MAX_KEY && !pk) ||
- (table_share->primary_key == MAX_KEY && !pk && ai))
- {
- DBUG_RETURN(COMPATIBLE_DATA_NO);
- }
-
/* Check that auto_increment value was not changed */
if ((create_info->used_fields & HA_CREATE_USED_AUTO) &&
create_info->auto_increment_value != 0)
+ {
+ DBUG_PRINT("info", ("auto_increment value changed"));
DBUG_RETURN(COMPATIBLE_DATA_NO);
+ }
/* Check that row format didn't change */
if ((create_info->used_fields & HA_CREATE_USED_AUTO) &&
get_row_type() != create_info->row_type)
+ {
+ DBUG_PRINT("info", ("row format changed"));
DBUG_RETURN(COMPATIBLE_DATA_NO);
+ }
+ DBUG_PRINT("info", ("new table seems compatible"));
DBUG_RETURN(COMPATIBLE_DATA_YES);
}
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 34cd160e7e4..14e321218ca 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -160,7 +160,8 @@ const uint ha_partition::NO_CURRENT_PART_ID= 0xFFFFFFFF;
ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share)
:handler(hton, share), m_part_info(NULL), m_create_handler(FALSE),
- m_is_sub_partitioned(0), is_clone(FALSE)
+ m_is_sub_partitioned(0), is_clone(FALSE), auto_increment_lock(FALSE),
+ auto_increment_safe_stmt_log_lock(FALSE)
{
DBUG_ENTER("ha_partition::ha_partition(table)");
init_handler_variables();
@@ -182,7 +183,8 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share)
ha_partition::ha_partition(handlerton *hton, partition_info *part_info)
:handler(hton, NULL), m_part_info(part_info),
m_create_handler(TRUE),
- m_is_sub_partitioned(m_part_info->is_sub_partitioned()), is_clone(FALSE)
+ m_is_sub_partitioned(m_part_info->is_sub_partitioned()), is_clone(FALSE),
+ auto_increment_lock(FALSE), auto_increment_safe_stmt_log_lock(FALSE)
{
DBUG_ENTER("ha_partition::ha_partition(part_info)");
init_handler_variables();
@@ -1248,7 +1250,7 @@ int ha_partition::prepare_new_partition(TABLE *tbl,
assumes that external_lock() is last call that may fail here.
Otherwise see description for cleanup_new_partition().
*/
- if ((error= file->ha_external_lock(current_thd, m_lock_type)))
+ if ((error= file->ha_external_lock(ha_thd(), m_lock_type)))
goto error;
DBUG_RETURN(0);
@@ -1336,8 +1338,8 @@ void ha_partition::cleanup_new_partition(uint part_count)
int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
const char *path,
- ulonglong *copied,
- ulonglong *deleted,
+ ulonglong * const copied,
+ ulonglong * const deleted,
const uchar *pack_frm_data
__attribute__((unused)),
size_t pack_frm_len
@@ -1354,7 +1356,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
int error= 1;
bool first;
uint temp_partitions= m_part_info->temp_partitions.elements;
- THD *thd= current_thd;
+ THD *thd= ha_thd();
DBUG_ENTER("ha_partition::change_partitions");
/*
@@ -1628,7 +1630,8 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
partitions.
*/
-int ha_partition::copy_partitions(ulonglong *copied, ulonglong *deleted)
+int ha_partition::copy_partitions(ulonglong * const copied,
+ ulonglong * const deleted)
{
uint reorg_part= 0;
int result= 0;
@@ -1674,13 +1677,13 @@ int ha_partition::copy_partitions(ulonglong *copied, ulonglong *deleted)
table since it doesn't fit into any partition any longer due to
changed partitioning ranges or list values.
*/
- deleted++;
+ (*deleted)++;
}
else
{
THD *thd= ha_thd();
/* Copy record to new handler */
- copied++;
+ (*copied)++;
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
result= m_new_file[new_part]->ha_write_row(m_rec0);
reenable_binlog(thd);
@@ -1714,6 +1717,14 @@ error:
void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
{
+ /*
+ Fix for bug#38751, some engines needs info-calls in ALTER.
+ Archive need this since it flushes in ::info.
+ HA_STATUS_AUTO is optimized so it will not always be forwarded
+ to all partitions, but HA_STATUS_VARIABLE will.
+ */
+ info(HA_STATUS_VARIABLE);
+
info(HA_STATUS_AUTO);
if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
@@ -1804,7 +1815,7 @@ uint ha_partition::del_ren_cre_table(const char *from,
handler **file, **abort_file;
DBUG_ENTER("del_ren_cre_table()");
- if (get_from_handler_file(from, current_thd->mem_root))
+ if (get_from_handler_file(from, ha_thd()->mem_root))
DBUG_RETURN(TRUE);
DBUG_ASSERT(m_file_buffer);
DBUG_PRINT("enter", ("from: (%s) to: (%s)", from, to));
@@ -1931,7 +1942,7 @@ int ha_partition::set_up_table_before_create(TABLE *tbl,
{
int error= 0;
const char *partition_name;
- THD *thd= current_thd;
+ THD *thd= ha_thd();
DBUG_ENTER("set_up_table_before_create");
if (!part_elem)
@@ -2327,7 +2338,7 @@ bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root)
tot_partition_words= (m_tot_parts + 3) / 4;
engine_array= (handlerton **) my_alloca(m_tot_parts * sizeof(handlerton*));
for (i= 0; i < m_tot_parts; i++)
- engine_array[i]= ha_resolve_by_legacy_type(current_thd,
+ engine_array[i]= ha_resolve_by_legacy_type(ha_thd(),
(enum legacy_db_type)
*(uchar *) ((file_buffer) + 12 + i));
address_tot_name_len= file_buffer + 12 + 4 * tot_partition_words;
@@ -2398,8 +2409,10 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
uint alloc_len;
handler **file;
char name_buff[FN_REFLEN];
+ bool is_not_tmp_table= (table_share->tmp_table == NO_TMP_TABLE);
DBUG_ENTER("ha_partition::open");
+ DBUG_ASSERT(table->s == table_share);
ref_length= 0;
m_mode= mode;
m_open_test_lock= test_if_locked;
@@ -2408,9 +2421,9 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
DBUG_RETURN(1);
m_start_key.length= 0;
m_rec0= table->record[0];
- m_rec_length= table->s->reclength;
+ m_rec_length= table_share->reclength;
alloc_len= m_tot_parts * (m_rec_length + PARTITION_BYTES_IN_POS);
- alloc_len+= table->s->max_key_length;
+ alloc_len+= table_share->max_key_length;
if (!m_ordered_rec_buffer)
{
if (!(m_ordered_rec_buffer= (uchar*)my_malloc(alloc_len, MYF(MY_WME))))
@@ -2483,6 +2496,30 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
goto err_handler;
/*
+ Use table_share->ha_data to share auto_increment_value among all handlers
+ for the same table.
+ */
+ if (is_not_tmp_table)
+ pthread_mutex_lock(&table_share->mutex);
+ if (!table_share->ha_data)
+ {
+ HA_DATA_PARTITION *ha_data;
+ /* currently only needed for auto_increment */
+ table_share->ha_data= ha_data= (HA_DATA_PARTITION*)
+ alloc_root(&table_share->mem_root,
+ sizeof(HA_DATA_PARTITION));
+ if (!ha_data)
+ {
+ if (is_not_tmp_table)
+ pthread_mutex_unlock(&table_share->mutex);
+ goto err_handler;
+ }
+ DBUG_PRINT("info", ("table_share->ha_data 0x%p", ha_data));
+ bzero(ha_data, sizeof(HA_DATA_PARTITION));
+ }
+ if (is_not_tmp_table)
+ pthread_mutex_unlock(&table_share->mutex);
+ /*
Some handlers update statistics as part of the open call. This will in
some cases corrupt the statistics of the partition handler and thus
to ensure we have correct statistics we call info from open after
@@ -2539,6 +2576,7 @@ int ha_partition::close(void)
handler **file;
DBUG_ENTER("ha_partition::close");
+ DBUG_ASSERT(table->s == table_share);
delete_queue(&m_queue);
if (!is_clone)
bitmap_free(&(m_part_info->used_partitions));
@@ -2607,6 +2645,7 @@ int ha_partition::external_lock(THD *thd, int lock_type)
handler **file;
DBUG_ENTER("ha_partition::external_lock");
+ DBUG_ASSERT(!auto_increment_lock && !auto_increment_safe_stmt_log_lock);
file= m_file;
m_lock_type= lock_type;
@@ -2825,8 +2864,9 @@ int ha_partition::write_row(uchar * buf)
uint32 part_id;
int error;
longlong func_value;
- bool autoincrement_lock= FALSE;
+ bool have_auto_increment= table->next_number_field && buf == table->record[0];
my_bitmap_map *old_map;
+ HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
THD *thd= ha_thd();
timestamp_auto_set_type orig_timestamp_type= table->timestamp_field_type;
#ifdef NOT_NEEDED
@@ -2844,28 +2884,16 @@ int ha_partition::write_row(uchar * buf)
If we have an auto_increment column and we are writing a changed row
or a new row, then update the auto_increment value in the record.
*/
- if (table->next_number_field && buf == table->record[0])
+ if (have_auto_increment)
{
- /*
- Some engines (InnoDB for example) can change autoincrement
- counter only after 'table->write_row' operation.
- So if another thread gets inside the ha_partition::write_row
- before it is complete, it gets same auto_increment value,
- which means DUP_KEY error (bug #27405)
- Here we separate the access using table_share->mutex, and
- use autoincrement_lock variable to avoid unnecessary locks.
- Probably not an ideal solution.
- */
- if (table_share->tmp_table == NO_TMP_TABLE)
+ if (!ha_data->auto_inc_initialized &&
+ !table->s->next_number_keypart)
{
/*
- Bug#30878 crash when alter table from non partitioned table
- to partitioned.
- Checking if tmp table then there is no need to lock,
- and the table_share->mutex may not be initialised.
+ If auto_increment in table_share is not initialized, start by
+ initializing it.
*/
- autoincrement_lock= TRUE;
- pthread_mutex_lock(&table_share->mutex);
+ info(HA_STATUS_AUTO);
}
error= update_auto_increment();
@@ -2903,11 +2931,11 @@ int ha_partition::write_row(uchar * buf)
DBUG_PRINT("info", ("Insert in partition %d", part_id));
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
error= m_file[part_id]->ha_write_row(buf);
+ if (have_auto_increment && !table->s->next_number_keypart)
+ set_auto_increment_if_higher(table->next_number_field->val_int());
reenable_binlog(thd);
exit:
table->timestamp_field_type= orig_timestamp_type;
- if (autoincrement_lock)
- pthread_mutex_unlock(&table_share->mutex);
DBUG_RETURN(error);
}
@@ -2931,13 +2959,6 @@ exit:
Keep in mind that the server can do updates based on ordering if an
ORDER BY clause was used. Consecutive ordering is not guarenteed.
- Currently new_data will not have an updated auto_increament record, or
- and updated timestamp field. You can do these for partition by doing these:
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
- table->timestamp_field->set_time();
- if (table->next_number_field && record == table->record[0])
- update_auto_increment();
-
Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
new_data is always record[0]
old_data is normally record[1] but may be anything
@@ -2969,17 +2990,23 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
goto exit;
}
- /*
- TODO:
- set_internal_auto_increment=
- max(set_internal_auto_increment, new_data->auto_increment)
- */
m_last_part= new_part_id;
if (new_part_id == old_part_id)
{
DBUG_PRINT("info", ("Update in partition %d", new_part_id));
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
error= m_file[new_part_id]->ha_update_row(old_data, new_data);
+ /*
+ if updating an auto_increment column, update
+ table_share->ha_data->next_auto_inc_val if needed.
+ (not to be used if auto_increment on secondary field in a multi-
+ column index)
+ mysql_update does not set table->next_number_field, so we use
+ table->found_next_number_field instead.
+ */
+ if (table->found_next_number_field && new_data == table->record[0] &&
+ !table->s->next_number_keypart)
+ set_auto_increment_if_higher(table->found_next_number_field->val_int());
reenable_binlog(thd);
goto exit;
}
@@ -2989,6 +3016,9 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
old_part_id, new_part_id));
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
error= m_file[new_part_id]->ha_write_row(new_data);
+ if (table->found_next_number_field && new_data == table->record[0] &&
+ !table->s->next_number_keypart)
+ set_auto_increment_if_higher(table->found_next_number_field->val_int());
reenable_binlog(thd);
if (error)
goto exit;
@@ -3084,8 +3114,17 @@ int ha_partition::delete_all_rows()
{
int error;
handler **file;
+ THD *thd= ha_thd();
DBUG_ENTER("ha_partition::delete_all_rows");
+ if (thd->lex->sql_command == SQLCOM_TRUNCATE)
+ {
+ HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
+ lock_auto_increment();
+ ha_data->next_auto_inc_val= 0;
+ ha_data->auto_inc_initialized= FALSE;
+ unlock_auto_increment();
+ }
file= m_file;
do
{
@@ -3679,10 +3718,12 @@ int ha_partition::index_read_map(uchar *buf, const uchar *key,
enum ha_rkey_function find_flag)
{
DBUG_ENTER("ha_partition::index_read_map");
-
end_range= 0;
m_index_scan_type= partition_index_read;
- DBUG_RETURN(common_index_read(buf, key, keypart_map, find_flag));
+ m_start_key.key= key;
+ m_start_key.keypart_map= keypart_map;
+ m_start_key.flag= find_flag;
+ DBUG_RETURN(common_index_read(buf, TRUE));
}
@@ -3690,41 +3731,63 @@ int ha_partition::index_read_map(uchar *buf, const uchar *key,
Common routine for a number of index_read variants
SYNOPSIS
- common_index_read
-
- see index_read for rest
+ ha_partition::common_index_read()
+ buf Buffer where the record should be returned
+ have_start_key TRUE <=> the left endpoint is available, i.e.
+ we're in index_read call or in read_range_first
+ call and the range has left endpoint
+
+ FALSE <=> there is no left endpoint (we're in
+ read_range_first() call and the range has no left
+ endpoint)
+
+ DESCRIPTION
+ Start scanning the range (when invoked from read_range_first()) or doing
+ an index lookup (when invoked from index_read_XXX):
+ - If possible, perform partition selection
+ - Find the set of partitions we're going to use
+ - Depending on whether we need ordering:
+ NO: Get the first record from first used partition (see
+ handle_unordered_scan_next_partition)
+ YES: Fill the priority queue and get the record that is the first in
+ the ordering
+
+ RETURN
+ 0 OK
+ other HA_ERR_END_OF_FILE or other error code.
*/
-int ha_partition::common_index_read(uchar *buf, const uchar *key,
- key_part_map keypart_map,
- enum ha_rkey_function find_flag)
+int ha_partition::common_index_read(uchar *buf, bool have_start_key)
{
int error;
+ uint key_len;
bool reverse_order= FALSE;
- uint key_len= calculate_key_len(table, active_index, key, keypart_map);
DBUG_ENTER("ha_partition::common_index_read");
+ LINT_INIT(key_len); /* used if have_start_key==TRUE */
- memcpy((void*)m_start_key.key, key, key_len);
- m_start_key.keypart_map= keypart_map;
- m_start_key.length= key_len;
- m_start_key.flag= find_flag;
-
- if ((error= partition_scan_set_up(buf, TRUE)))
+ if (have_start_key)
+ {
+ m_start_key.length= key_len= calculate_key_len(table, active_index,
+ m_start_key.key,
+ m_start_key.keypart_map);
+ }
+ if ((error= partition_scan_set_up(buf, have_start_key)))
{
DBUG_RETURN(error);
}
- if (find_flag == HA_READ_PREFIX_LAST ||
- find_flag == HA_READ_PREFIX_LAST_OR_PREV ||
- find_flag == HA_READ_BEFORE_KEY)
+
+ if (have_start_key &&
+ (m_start_key.flag == HA_READ_PREFIX_LAST ||
+ m_start_key.flag == HA_READ_PREFIX_LAST_OR_PREV ||
+ m_start_key.flag == HA_READ_BEFORE_KEY))
{
reverse_order= TRUE;
m_ordered_scan_ongoing= TRUE;
}
if (!m_ordered_scan_ongoing ||
- (find_flag == HA_READ_KEY_EXACT &&
- (key_len >= m_curr_key_info->key_length ||
- key_len == 0)))
- {
+ (have_start_key && m_start_key.flag == HA_READ_KEY_EXACT &&
+ (key_len >= m_curr_key_info->key_length || key_len == 0)))
+ {
/*
We use unordered index scan either when read_range is used and flag
is set to not use ordered or when an exact key is used and in this
@@ -3815,7 +3878,7 @@ int ha_partition::index_last(uchar * buf)
Common routine for index_first/index_last
SYNOPSIS
- common_index_first_last
+ ha_partition::common_first_last()
see index_first for rest
*/
@@ -3859,7 +3922,10 @@ int ha_partition::index_read_last_map(uchar *buf, const uchar *key,
m_ordered= TRUE; // Safety measure
end_range= 0;
m_index_scan_type= partition_index_read_last;
- DBUG_RETURN(common_index_read(buf, key, keypart_map, HA_READ_PREFIX_LAST));
+ m_start_key.key= key;
+ m_start_key.keypart_map= keypart_map;
+ m_start_key.flag= HA_READ_PREFIX_LAST;
+ DBUG_RETURN(common_index_read(buf, TRUE));
}
@@ -3990,23 +4056,15 @@ int ha_partition::read_range_first(const key_range *start_key,
((end_key->flag == HA_READ_BEFORE_KEY) ? 1 :
(end_key->flag == HA_READ_AFTER_KEY) ? -1 : 0);
}
- range_key_part= m_curr_key_info->key_part;
- if (!start_key) // Read first record
- {
- if (m_ordered)
- m_index_scan_type= partition_index_first;
- else
- m_index_scan_type= partition_index_first_unordered;
- error= common_first_last(m_rec0);
- }
+ range_key_part= m_curr_key_info->key_part;
+ if (start_key)
+ m_start_key= *start_key;
else
- {
- m_index_scan_type= partition_index_read;
- error= common_index_read(m_rec0,
- start_key->key,
- start_key->keypart_map, start_key->flag);
- }
+ m_start_key.key= NULL;
+
+ m_index_scan_type= partition_read_range;
+ error= common_index_read(m_rec0, test(start_key));
DBUG_RETURN(error);
}
@@ -4028,26 +4086,36 @@ int ha_partition::read_range_next()
if (m_ordered)
{
- DBUG_RETURN(handler::read_range_next());
+ DBUG_RETURN(handle_ordered_next(table->record[0], eq_range));
}
- DBUG_RETURN(handle_unordered_next(m_rec0, eq_range));
+ DBUG_RETURN(handle_unordered_next(table->record[0], eq_range));
}
/*
- Common routine to set up scans
+ Common routine to set up index scans
SYNOPSIS
- buf Buffer to later return record in
- idx_read_flag Is it index scan
+ ha_partition::partition_scan_set_up()
+ buf Buffer to later return record in (this function
+ needs it to calculcate partitioning function
+ values)
+
+ idx_read_flag TRUE <=> m_start_key has range start endpoint which
+ probably can be used to determine the set of partitions
+ to scan.
+ FALSE <=> there is no start endpoint.
+
+ DESCRIPTION
+ Find out which partitions we'll need to read when scanning the specified
+ range.
+
+ If we need to scan only one partition, set m_ordered_scan_ongoing=FALSE
+ as we will not need to do merge ordering.
RETURN VALUE
>0 Error code
0 Success
-
- DESCRIPTION
- This is where we check which partitions to actually scan if not all
- of them
*/
int ha_partition::partition_scan_set_up(uchar * buf, bool idx_read_flag)
@@ -4138,10 +4206,19 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
DBUG_ENTER("ha_partition::handle_unordered_next");
/*
- We should consider if this should be split into two functions as
- next_same is alwas a local constant
+ We should consider if this should be split into three functions as
+ partition_read_range is_next_same are always local constants
*/
- if (is_next_same)
+
+ if (m_index_scan_type == partition_read_range)
+ {
+ if (!(error= file->read_range_next()))
+ {
+ m_last_part= m_part_spec.start_part;
+ DBUG_RETURN(0);
+ }
+ }
+ else if (is_next_same)
{
if (!(error= file->index_next_same(buf, m_start_key.key,
m_start_key.length)))
@@ -4150,15 +4227,13 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
DBUG_RETURN(0);
}
}
- else if (!(error= file->index_next(buf)))
+ else
{
- if (!(file->index_flags(active_index, 0, 1) & HA_READ_ORDER) ||
- compare_key(end_range) <= 0)
+ if (!(error= file->index_next(buf)))
{
m_last_part= m_part_spec.start_part;
DBUG_RETURN(0); // Row was in range
}
- error= HA_ERR_END_OF_FILE;
}
if (error == HA_ERR_END_OF_FILE)
@@ -4202,6 +4277,11 @@ int ha_partition::handle_unordered_scan_next_partition(uchar * buf)
file= m_file[i];
m_part_spec.start_part= i;
switch (m_index_scan_type) {
+ case partition_read_range:
+ DBUG_PRINT("info", ("read_range_first on partition %d", i));
+ error= file->read_range_first(m_start_key.key? &m_start_key: NULL,
+ end_range, eq_range, FALSE);
+ break;
case partition_index_read:
DBUG_PRINT("info", ("index_read on partition %d", i));
error= file->index_read_map(buf, m_start_key.key,
@@ -4210,6 +4290,17 @@ int ha_partition::handle_unordered_scan_next_partition(uchar * buf)
break;
case partition_index_first:
DBUG_PRINT("info", ("index_first on partition %d", i));
+ /* MyISAM engine can fail if we call index_first() when indexes disabled */
+ /* that happens if the table is empty. */
+ /* Here we use file->stats.records instead of file->records() because */
+ /* file->records() is supposed to return an EXACT count, and it can be */
+ /* possibly slow. We don't need an exact number, an approximate one- from*/
+ /* the last ::info() call - is sufficient. */
+ if (file->stats.records == 0)
+ {
+ error= HA_ERR_END_OF_FILE;
+ break;
+ }
error= file->index_first(buf);
break;
case partition_index_first_unordered:
@@ -4230,13 +4321,8 @@ int ha_partition::handle_unordered_scan_next_partition(uchar * buf)
}
if (!error)
{
- if (!(file->index_flags(active_index, 0, 1) & HA_READ_ORDER) ||
- compare_key(end_range) <= 0)
- {
- m_last_part= i;
- DBUG_RETURN(0);
- }
- error= HA_ERR_END_OF_FILE;
+ m_last_part= i;
+ DBUG_RETURN(0);
}
if ((error != HA_ERR_END_OF_FILE) && (error != HA_ERR_KEY_NOT_FOUND))
DBUG_RETURN(error);
@@ -4302,10 +4388,32 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
m_start_key.flag);
break;
case partition_index_first:
+ /* MyISAM engine can fail if we call index_first() when indexes disabled */
+ /* that happens if the table is empty. */
+ /* Here we use file->stats.records instead of file->records() because */
+ /* file->records() is supposed to return an EXACT count, and it can be */
+ /* possibly slow. We don't need an exact number, an approximate one- from*/
+ /* the last ::info() call - is sufficient. */
+ if (file->stats.records == 0)
+ {
+ error= HA_ERR_END_OF_FILE;
+ break;
+ }
error= file->index_first(rec_buf_ptr);
reverse_order= FALSE;
break;
case partition_index_last:
+ /* MyISAM engine can fail if we call index_last() when indexes disabled */
+ /* that happens if the table is empty. */
+ /* Here we use file->stats.records instead of file->records() because */
+ /* file->records() is supposed to return an EXACT count, and it can be */
+ /* possibly slow. We don't need an exact number, an approximate one- from*/
+ /* the last ::info() call - is sufficient. */
+ if (file->stats.records == 0)
+ {
+ error= HA_ERR_END_OF_FILE;
+ break;
+ }
error= file->index_last(rec_buf_ptr);
reverse_order= TRUE;
break;
@@ -4315,6 +4423,17 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
m_start_key.keypart_map);
reverse_order= TRUE;
break;
+ case partition_read_range:
+ {
+ /*
+ This can only read record to table->record[0], as it was set when
+ the table was being opened. We have to memcpy data ourselves.
+ */
+ error= file->read_range_first(&m_start_key, end_range, eq_range, TRUE);
+ memcpy(rec_buf_ptr, table->record[0], m_rec_length);
+ reverse_order= FALSE;
+ break;
+ }
default:
DBUG_ASSERT(FALSE);
DBUG_RETURN(HA_ERR_END_OF_FILE);
@@ -4395,8 +4514,13 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same)
uint part_id= m_top_entry;
handler *file= m_file[part_id];
DBUG_ENTER("ha_partition::handle_ordered_next");
-
- if (!is_next_same)
+
+ if (m_index_scan_type == partition_read_range)
+ {
+ error= file->read_range_next();
+ memcpy(rec_buf(part_id), table->record[0], m_rec_length);
+ }
+ else if (!is_next_same)
error= file->index_next(rec_buf(part_id));
else
error= file->index_next_same(rec_buf(part_id), m_start_key.key,
@@ -4544,21 +4668,54 @@ int ha_partition::handle_ordered_prev(uchar *buf)
int ha_partition::info(uint flag)
{
- handler *file, **file_array;
- DBUG_ENTER("ha_partition:info");
+ DBUG_ENTER("ha_partition::info");
if (flag & HA_STATUS_AUTO)
{
- ulonglong auto_increment_value= 0;
+ bool auto_inc_is_first_in_idx= (table_share->next_number_keypart == 0);
+ HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
DBUG_PRINT("info", ("HA_STATUS_AUTO"));
- file_array= m_file;
- do
+ if (!table->found_next_number_field)
+ stats.auto_increment_value= 0;
+ else if (ha_data->auto_inc_initialized)
{
- file= *file_array;
- file->info(HA_STATUS_AUTO);
- set_if_bigger(auto_increment_value, file->stats.auto_increment_value);
- } while (*(++file_array));
- stats.auto_increment_value= auto_increment_value;
+ lock_auto_increment();
+ stats.auto_increment_value= ha_data->next_auto_inc_val;
+ unlock_auto_increment();
+ }
+ else
+ {
+ lock_auto_increment();
+ /* to avoid two concurrent initializations, check again when locked */
+ if (ha_data->auto_inc_initialized)
+ stats.auto_increment_value= ha_data->next_auto_inc_val;
+ else
+ {
+ handler *file, **file_array;
+ ulonglong auto_increment_value= 0;
+ file_array= m_file;
+ DBUG_PRINT("info",
+ ("checking all partitions for auto_increment_value"));
+ do
+ {
+ file= *file_array;
+ file->info(HA_STATUS_AUTO);
+ set_if_bigger(auto_increment_value,
+ file->stats.auto_increment_value);
+ } while (*(++file_array));
+
+ DBUG_ASSERT(auto_increment_value);
+ stats.auto_increment_value= auto_increment_value;
+ if (auto_inc_is_first_in_idx)
+ {
+ set_if_bigger(ha_data->next_auto_inc_val, auto_increment_value);
+ ha_data->auto_inc_initialized= TRUE;
+ DBUG_PRINT("info", ("initializing next_auto_inc_val to %lu",
+ (ulong) ha_data->next_auto_inc_val));
+ }
+ }
+ unlock_auto_increment();
+ }
}
if (flag & HA_STATUS_VARIABLE)
{
@@ -4582,6 +4739,7 @@ int ha_partition::info(uint flag)
check_time: Time of last check (only applicable to MyISAM)
We report last time of all underlying handlers
*/
+ handler *file, **file_array;
stats.records= 0;
stats.deleted= 0;
stats.data_file_length= 0;
@@ -4663,6 +4821,7 @@ int ha_partition::info(uint flag)
So we calculate these constants by using the variables on the first
handler.
*/
+ handler *file;
file= m_file[0];
file->info(HA_STATUS_CONST);
@@ -4684,6 +4843,7 @@ int ha_partition::info(uint flag)
}
if (flag & HA_STATUS_TIME)
{
+ handler *file, **file_array;
DBUG_PRINT("info", ("info: HA_STATUS_TIME"));
/*
This flag is used to set the latest update time of the table.
@@ -5744,19 +5904,33 @@ int ha_partition::cmp_ref(const uchar *ref1, const uchar *ref2)
MODULE auto increment
****************************************************************************/
-void ha_partition::restore_auto_increment(ulonglong)
-{
- DBUG_ENTER("ha_partition::restore_auto_increment");
- DBUG_VOID_RETURN;
+int ha_partition::reset_auto_increment(ulonglong value)
+{
+ handler **file= m_file;
+ int res;
+ HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
+ DBUG_ENTER("ha_partition::reset_auto_increment");
+ lock_auto_increment();
+ ha_data->auto_inc_initialized= FALSE;
+ ha_data->next_auto_inc_val= 0;
+ do
+ {
+ if ((res= (*file)->ha_reset_auto_increment(value)) != 0)
+ break;
+ } while (*(++file));
+ unlock_auto_increment();
+ DBUG_RETURN(res);
}
-/*
+/**
This method is called by update_auto_increment which in turn is called
- by the individual handlers as part of write_row. We will always let
- the first handler keep track of the auto increment value for all
- partitions.
+ by the individual handlers as part of write_row. We use the
+ table_share->ha_data->next_auto_inc_val, or search all
+ partitions for the highest auto_increment_value if not initialized or
+ if auto_increment field is a secondary part of a key, we must search
+ every partition when holding a mutex to be sure of correctness.
*/
void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment,
@@ -5764,59 +5938,88 @@ void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment,
ulonglong *first_value,
ulonglong *nb_reserved_values)
{
- ulonglong first_value_part, last_value_part, nb_reserved_values_part,
- last_value= ~ (ulonglong) 0;
- handler **pos, **end;
- bool retry= TRUE;
DBUG_ENTER("ha_partition::get_auto_increment");
-
-again:
- for (pos=m_file, end= m_file+ m_tot_parts; pos != end ; pos++)
+ DBUG_PRINT("info", ("offset: %lu inc: %lu desired_values: %lu "
+ "first_value: %lu", (ulong) offset, (ulong) increment,
+ (ulong) nb_desired_values, (ulong) *first_value));
+ DBUG_ASSERT(increment && nb_desired_values);
+ *first_value= 0;
+ if (table->s->next_number_keypart)
{
- first_value_part= *first_value;
- (*pos)->get_auto_increment(offset, increment, nb_desired_values,
- &first_value_part, &nb_reserved_values_part);
- if (first_value_part == ~(ulonglong)(0)) // error in one partition
- {
- *first_value= first_value_part;
- sql_print_error("Partition failed to reserve auto_increment value");
- DBUG_VOID_RETURN;
- }
/*
- Partition has reserved an interval. Intersect it with the intervals
- already reserved for the previous partitions.
+ next_number_keypart is != 0 if the auto_increment column is a secondary
+ column in the index (it is allowed in MyISAM)
*/
- last_value_part= (nb_reserved_values_part == ULONGLONG_MAX) ?
- ULONGLONG_MAX : (first_value_part + nb_reserved_values_part * increment);
- set_if_bigger(*first_value, first_value_part);
- set_if_smaller(last_value, last_value_part);
+ DBUG_PRINT("info", ("next_number_keypart != 0"));
+ ulonglong nb_reserved_values_part;
+ ulonglong first_value_part, max_first_value;
+ handler **file= m_file;
+ first_value_part= max_first_value= *first_value;
+ /* Must lock and find highest value among all partitions. */
+ lock_auto_increment();
+ do
+ {
+ /* Only nb_desired_values = 1 makes sense */
+ (*file)->get_auto_increment(offset, increment, 1,
+ &first_value_part, &nb_reserved_values_part);
+ if (first_value_part == ~(ulonglong)(0)) // error in one partition
+ {
+ *first_value= first_value_part;
+ /* log that the error was between table/partition handler */
+ sql_print_error("Partition failed to reserve auto_increment value");
+ unlock_auto_increment();
+ DBUG_VOID_RETURN;
+ }
+ DBUG_PRINT("info", ("first_value_part: %lu", (ulong) first_value_part));
+ set_if_bigger(max_first_value, first_value_part);
+ } while (*(++file));
+ *first_value= max_first_value;
+ *nb_reserved_values= 1;
+ unlock_auto_increment();
}
- if (last_value < *first_value) /* empty intersection, error */
+ else
{
+ THD *thd= ha_thd();
+ HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
/*
- When we have an empty intersection, it means that one or more
- partitions may have a significantly different autoinc next value.
- We should not fail here - it just means that we should try to
- find a new reservation making use of the current *first_value
- wbich should now be compatible with all partitions.
+ This is initialized in the beginning of the first write_row call.
*/
- if (retry)
- {
- retry= FALSE;
- last_value= ~ (ulonglong) 0;
- release_auto_increment();
- goto again;
- }
+ DBUG_ASSERT(ha_data->auto_inc_initialized);
/*
- We should not get here.
+ Get a lock for handling the auto_increment in table_share->ha_data
+ for avoiding two concurrent statements getting the same number.
+ */
+
+ lock_auto_increment();
+
+ /*
+ In a multi-row insert statement like INSERT SELECT and LOAD DATA
+ where the number of candidate rows to insert is not known in advance
+ we must hold a lock/mutex for the whole statement if we have statement
+ based replication. Because the statement-based binary log contains
+ only the first generated value used by the statement, and slaves assumes
+ all other generated values used by this statement were consecutive to
+ this first one, we must exclusively lock the generator until the statement
+ is done.
*/
- sql_print_error("Failed to calculate auto_increment value for partition");
-
- *first_value= ~(ulonglong)(0);
+ if (!auto_increment_safe_stmt_log_lock &&
+ thd->lex->sql_command != SQLCOM_INSERT &&
+ mysql_bin_log.is_open() &&
+ !thd->current_stmt_binlog_row_based &&
+ (thd->options & OPTION_BIN_LOG))
+ {
+ DBUG_PRINT("info", ("locking auto_increment_safe_stmt_log_lock"));
+ auto_increment_safe_stmt_log_lock= TRUE;
+ }
+
+ /* this gets corrected (for offset/increment) in update_auto_increment */
+ *first_value= ha_data->next_auto_inc_val;
+ ha_data->next_auto_inc_val+= nb_desired_values * increment;
+
+ unlock_auto_increment();
+ DBUG_PRINT("info", ("*first_value: %lu", (ulong) *first_value));
+ *nb_reserved_values= nb_desired_values;
}
- if (increment) // If not check for values
- *nb_reserved_values= (last_value == ULONGLONG_MAX) ?
- ULONGLONG_MAX : ((last_value - *first_value) / increment);
DBUG_VOID_RETURN;
}
@@ -5824,9 +6027,31 @@ void ha_partition::release_auto_increment()
{
DBUG_ENTER("ha_partition::release_auto_increment");
- for (uint i= 0; i < m_tot_parts; i++)
+ if (table->s->next_number_keypart)
{
- m_file[i]->ha_release_auto_increment();
+ for (uint i= 0; i < m_tot_parts; i++)
+ m_file[i]->ha_release_auto_increment();
+ }
+ else if (next_insert_id)
+ {
+ HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
+ ulonglong next_auto_inc_val;
+ lock_auto_increment();
+ next_auto_inc_val= ha_data->next_auto_inc_val;
+ if (next_insert_id < next_auto_inc_val &&
+ auto_inc_interval_for_cur_row.maximum() >= next_auto_inc_val)
+ ha_data->next_auto_inc_val= next_insert_id;
+ DBUG_PRINT("info", ("ha_data->next_auto_inc_val: %lu",
+ (ulong) ha_data->next_auto_inc_val));
+
+ /* Unlock the multi row statement lock taken in get_auto_increment */
+ if (auto_increment_safe_stmt_log_lock)
+ {
+ auto_increment_safe_stmt_log_lock= FALSE;
+ DBUG_PRINT("info", ("unlocking auto_increment_safe_stmt_log_lock"));
+ }
+
+ unlock_auto_increment();
}
DBUG_VOID_RETURN;
}
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index 97f5624608f..685f057dfce 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -37,6 +37,15 @@ typedef struct st_partition_share
} PARTITION_SHARE;
#endif
+/**
+ Partition specific ha_data struct.
+ @todo: move all partition specific data from TABLE_SHARE here.
+*/
+typedef struct st_ha_data_partition
+{
+ ulonglong next_auto_inc_val; /**< first non reserved value */
+ bool auto_inc_initialized;
+} HA_DATA_PARTITION;
#define PARTITION_BYTES_IN_POS 2
class ha_partition :public handler
@@ -49,7 +58,8 @@ private:
partition_index_first_unordered= 2,
partition_index_last= 3,
partition_index_read_last= 4,
- partition_no_index_scan= 5
+ partition_read_range = 5,
+ partition_no_index_scan= 6
};
/* Data for the partition handler */
int m_mode; // Open mode
@@ -63,8 +73,6 @@ private:
handler **m_reorged_file; // Reorganised partitions
handler **m_added_file; // Added parts kept for errors
partition_info *m_part_info; // local reference to partition
- uchar *m_start_key_ref; // Reference of start key in current
- // index scan info
Field **m_part_field_array; // Part field array locally to save acc
uchar *m_ordered_rec_buffer; // Row and key buffer for ord. idx scan
KEY *m_curr_key_info; // Current index
@@ -141,6 +149,12 @@ private:
"own" the m_part_info structure.
*/
bool is_clone;
+ bool auto_increment_lock; /**< lock reading/updating auto_inc */
+ /**
+ Flag to keep the auto_increment lock through out the statement.
+ This to ensure it will work with statement based replication.
+ */
+ bool auto_increment_safe_stmt_log_lock;
public:
handler *clone(MEM_ROOT *mem_root);
virtual void set_part_info(partition_info *part_info)
@@ -197,8 +211,8 @@ public:
virtual char *update_table_comment(const char *comment);
virtual int change_partitions(HA_CREATE_INFO *create_info,
const char *path,
- ulonglong *copied,
- ulonglong *deleted,
+ ulonglong * const copied,
+ ulonglong * const deleted,
const uchar *pack_frm_data,
size_t pack_frm_len);
virtual int drop_partitions(const char *path);
@@ -212,7 +226,7 @@ public:
virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share);
private:
int prepare_for_rename();
- int copy_partitions(ulonglong *copied, ulonglong *deleted);
+ int copy_partitions(ulonglong * const copied, ulonglong * const deleted);
void cleanup_new_partition(uint part_count);
int prepare_new_partition(TABLE *table, HA_CREATE_INFO *create_info,
handler *file, const char *part_name,
@@ -429,9 +443,7 @@ public:
virtual int read_range_next();
private:
- int common_index_read(uchar * buf, const uchar * key,
- key_part_map keypart_map,
- enum ha_rkey_function find_flag);
+ int common_index_read(uchar * buf, bool have_start_key);
int common_first_last(uchar * buf);
int partition_scan_set_up(uchar * buf, bool idx_read_flag);
int handle_unordered_next(uchar * buf, bool next_same);
@@ -829,12 +841,51 @@ public:
auto_increment_column_changed
-------------------------------------------------------------------------
*/
- virtual void restore_auto_increment(ulonglong prev_insert_id);
virtual void get_auto_increment(ulonglong offset, ulonglong increment,
ulonglong nb_desired_values,
ulonglong *first_value,
ulonglong *nb_reserved_values);
virtual void release_auto_increment();
+private:
+ virtual int reset_auto_increment(ulonglong value);
+ virtual void lock_auto_increment()
+ {
+ /* lock already taken */
+ if (auto_increment_safe_stmt_log_lock)
+ return;
+ DBUG_ASSERT(table_share->ha_data && !auto_increment_lock);
+ if(table_share->tmp_table == NO_TMP_TABLE)
+ {
+ auto_increment_lock= TRUE;
+ pthread_mutex_lock(&table_share->mutex);
+ }
+ }
+ virtual void unlock_auto_increment()
+ {
+ DBUG_ASSERT(table_share->ha_data);
+ /*
+ If auto_increment_safe_stmt_log_lock is true, we have to keep the lock.
+ It will be set to false and thus unlocked at the end of the statement by
+ ha_partition::release_auto_increment.
+ */
+ if(auto_increment_lock && !auto_increment_safe_stmt_log_lock)
+ {
+ pthread_mutex_unlock(&table_share->mutex);
+ auto_increment_lock= FALSE;
+ }
+ }
+ virtual void set_auto_increment_if_higher(const ulonglong nr)
+ {
+ HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
+ lock_auto_increment();
+ /* must check when the mutex is taken */
+ if (nr >= ha_data->next_auto_inc_val)
+ ha_data->next_auto_inc_val= nr + 1;
+ ha_data->auto_inc_initialized= TRUE;
+ unlock_auto_increment();
+ }
+
+public:
/*
-------------------------------------------------------------------------
diff --git a/sql/handler.cc b/sql/handler.cc
index e7cc5b82347..a988c34b7ca 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -373,6 +373,10 @@ int ha_finalize_handlerton(st_plugin_int *plugin)
handlerton *hton= (handlerton *)plugin->data;
DBUG_ENTER("ha_finalize_handlerton");
+ /* hton can be NULL here, if ha_initialize_handlerton() failed. */
+ if (!hton)
+ goto end;
+
switch (hton->state)
{
case SHOW_OPTION_NO:
@@ -401,8 +405,16 @@ int ha_finalize_handlerton(st_plugin_int *plugin)
}
}
+ /*
+ In case a plugin is uninstalled and re-installed later, it should
+ reuse an array slot. Otherwise the number of uninstall/install
+ cycles would be limited.
+ */
+ hton2plugin[hton->slot]= NULL;
+
my_free((uchar*)hton, MYF(0));
+ end:
DBUG_RETURN(0);
}
@@ -437,6 +449,7 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
case SHOW_OPTION_YES:
{
uint tmp;
+ ulong fslot;
/* now check the db_type for conflict */
if (hton->db_type <= DB_TYPE_UNKNOWN ||
hton->db_type >= DB_TYPE_DEFAULT ||
@@ -461,7 +474,31 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
tmp= hton->savepoint_offset;
hton->savepoint_offset= savepoint_alloc_size;
savepoint_alloc_size+= tmp;
- hton->slot= total_ha++;
+
+ /*
+ In case a plugin is uninstalled and re-installed later, it should
+ reuse an array slot. Otherwise the number of uninstall/install
+ cycles would be limited. So look for a free slot.
+ */
+ DBUG_PRINT("plugin", ("total_ha: %lu", total_ha));
+ for (fslot= 0; fslot < total_ha; fslot++)
+ {
+ if (!hton2plugin[fslot])
+ break;
+ }
+ if (fslot < total_ha)
+ hton->slot= fslot;
+ else
+ {
+ if (total_ha >= MAX_HA)
+ {
+ sql_print_error("Too many plugins loaded. Limit is %lu. "
+ "Failed on '%s'", (ulong) MAX_HA, plugin->name.str);
+ goto err;
+ }
+ hton->slot= total_ha++;
+ }
+
hton2plugin[hton->slot]=plugin;
if (hton->prepare)
total_ha_2pc++;
@@ -2165,7 +2202,12 @@ prev_insert_id(ulonglong nr, struct system_variables *variables)
- In both cases, the reserved intervals are remembered in
thd->auto_inc_intervals_in_cur_stmt_for_binlog if statement-based
binlogging; the last reserved interval is remembered in
- auto_inc_interval_for_cur_row.
+ auto_inc_interval_for_cur_row. The number of reserved intervals is
+ remembered in auto_inc_intervals_count. It differs from the number of
+ elements in thd->auto_inc_intervals_in_cur_stmt_for_binlog() because the
+ latter list is cumulative over all statements forming one binlog event
+ (when stored functions and triggers are used), and collapses two
+ contiguous intervals in one (see its append() method).
The idea is that generated auto_increment values are predictable and
independent of the column values in the table. This is needed to be
@@ -2249,8 +2291,6 @@ int handler::update_auto_increment()
handler::estimation_rows_to_insert was set by
handler::ha_start_bulk_insert(); if 0 it means "unknown".
*/
- uint nb_already_reserved_intervals=
- thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements();
ulonglong nb_desired_values;
/*
If an estimation was given to the engine:
@@ -2262,17 +2302,17 @@ int handler::update_auto_increment()
start, starting from AUTO_INC_DEFAULT_NB_ROWS.
Don't go beyond a max to not reserve "way too much" (because
reservation means potentially losing unused values).
+ Note that in prelocked mode no estimation is given.
*/
- if (nb_already_reserved_intervals == 0 &&
- (estimation_rows_to_insert > 0))
+ if ((auto_inc_intervals_count == 0) && (estimation_rows_to_insert > 0))
nb_desired_values= estimation_rows_to_insert;
else /* go with the increasing defaults */
{
/* avoid overflow in formula, with this if() */
- if (nb_already_reserved_intervals <= AUTO_INC_DEFAULT_NB_MAX_BITS)
+ if (auto_inc_intervals_count <= AUTO_INC_DEFAULT_NB_MAX_BITS)
{
- nb_desired_values= AUTO_INC_DEFAULT_NB_ROWS *
- (1 << nb_already_reserved_intervals);
+ nb_desired_values= AUTO_INC_DEFAULT_NB_ROWS *
+ (1 << auto_inc_intervals_count);
set_if_smaller(nb_desired_values, AUTO_INC_DEFAULT_NB_MAX);
}
else
@@ -2285,7 +2325,7 @@ int handler::update_auto_increment()
&nb_reserved_values);
if (nr == ~(ulonglong) 0)
DBUG_RETURN(HA_ERR_AUTOINC_READ_FAILED); // Mark failure
-
+
/*
That rounding below should not be needed when all engines actually
respect offset and increment in get_auto_increment(). But they don't
@@ -2296,7 +2336,7 @@ int handler::update_auto_increment()
*/
nr= compute_next_insert_id(nr-1, variables);
}
-
+
if (table->s->next_number_keypart == 0)
{
/* We must defer the appending until "nr" has been possibly truncated */
@@ -2340,8 +2380,9 @@ int handler::update_auto_increment()
{
auto_inc_interval_for_cur_row.replace(nr, nb_reserved_values,
variables->auto_increment_increment);
+ auto_inc_intervals_count++;
/* Row-based replication does not need to store intervals in binlog */
- if (!thd->current_stmt_binlog_row_based)
+ if (mysql_bin_log.is_open() && !thd->current_stmt_binlog_row_based)
thd->auto_inc_intervals_in_cur_stmt_for_binlog.append(auto_inc_interval_for_cur_row.minimum(),
auto_inc_interval_for_cur_row.values(),
variables->auto_increment_increment);
@@ -2461,6 +2502,7 @@ void handler::ha_release_auto_increment()
release_auto_increment();
insert_id_for_cur_row= 0;
auto_inc_interval_for_cur_row.replace(0, 0, 0);
+ auto_inc_intervals_count= 0;
if (next_insert_id > 0)
{
next_insert_id= 0;
@@ -2710,8 +2752,53 @@ bool handler::get_error_message(int error, String* buf)
}
+/**
+ Check for incompatible collation changes.
+
+ @retval
+ HA_ADMIN_NEEDS_UPGRADE Table may have data requiring upgrade.
+ @retval
+ 0 No upgrade required.
+*/
+
+int handler::check_collation_compatibility()
+{
+ ulong mysql_version= table->s->mysql_version;
+
+ if (mysql_version < 50048)
+ {
+ KEY *key= table->key_info;
+ KEY *key_end= key + table->s->keys;
+ for (; key < key_end; key++)
+ {
+ KEY_PART_INFO *key_part= key->key_part;
+ KEY_PART_INFO *key_part_end= key_part + key->key_parts;
+ for (; key_part < key_part_end; key_part++)
+ {
+ if (!key_part->fieldnr)
+ continue;
+ Field *field= table->field[key_part->fieldnr - 1];
+ uint cs_number= field->charset()->number;
+ if (mysql_version < 50048 &&
+ (cs_number == 11 || /* ascii_general_ci - bug #29499, bug #27562 */
+ cs_number == 41 || /* latin7_general_ci - bug #29461 */
+ cs_number == 42 || /* latin7_general_cs - bug #29461 */
+ cs_number == 20 || /* latin7_estonian_cs - bug #29461 */
+ cs_number == 21 || /* latin2_hungarian_ci - bug #29461 */
+ cs_number == 22 || /* koi8u_general_ci - bug #29461 */
+ cs_number == 23 || /* cp1251_ukrainian_ci - bug #29461 */
+ cs_number == 26)) /* cp1250_general_ci - bug #29461 */
+ return HA_ADMIN_NEEDS_UPGRADE;
+ }
+ }
+ }
+ return 0;
+}
+
+
int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt)
{
+ int error;
KEY *keyinfo, *keyend;
KEY_PART_INFO *keypart, *keypartend;
@@ -2740,6 +2827,10 @@ int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt)
}
if (table->s->frm_version != FRM_VER_TRUE_VARCHAR)
return HA_ADMIN_NEEDS_ALTER;
+
+ if ((error= check_collation_compatibility()))
+ return error;
+
return check_for_upgrade(check_opt);
}
diff --git a/sql/handler.h b/sql/handler.h
index df6157f80b4..b7d4d689d40 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -1129,6 +1129,13 @@ public:
inserter.
*/
Discrete_interval auto_inc_interval_for_cur_row;
+ /**
+ Number of reserved auto-increment intervals. Serves as a heuristic
+ when we have no estimation of how many records the statement will insert:
+ the more intervals we have reserved, the bigger the next one. Reset in
+ handler::ha_release_auto_increment().
+ */
+ uint auto_inc_intervals_count;
handler(handlerton *ht_arg, TABLE_SHARE *share_arg)
:table_share(share_arg), table(0),
@@ -1137,7 +1144,8 @@ public:
ref_length(sizeof(my_off_t)),
ft_handler(0), inited(NONE),
locked(FALSE), implicit_emptied(0),
- pushed_cond(0), next_insert_id(0), insert_id_for_cur_row(0)
+ pushed_cond(0), next_insert_id(0), insert_id_for_cur_row(0),
+ auto_inc_intervals_count(0)
{}
virtual ~handler(void)
{
@@ -1203,6 +1211,7 @@ public:
int ha_delete_row(const uchar * buf);
void ha_release_auto_increment();
+ int check_collation_compatibility();
int ha_check_for_upgrade(HA_CHECK_OPT *check_opt);
/** to be actually called to get 'check()' functionality*/
int ha_check(THD *thd, HA_CHECK_OPT *check_opt);
@@ -1241,8 +1250,8 @@ public:
int ha_change_partitions(HA_CREATE_INFO *create_info,
const char *path,
- ulonglong *copied,
- ulonglong *deleted,
+ ulonglong * const copied,
+ ulonglong * const deleted,
const uchar *pack_frm_data,
size_t pack_frm_len);
int ha_drop_partitions(const char *path);
@@ -1724,6 +1733,12 @@ public:
but we don't have a primary key
*/
virtual void use_hidden_primary_key();
+ virtual uint alter_table_flags(uint flags)
+ {
+ if (ht->alter_table_flags)
+ return ht->alter_table_flags(flags);
+ return 0;
+ }
protected:
/* Service methods for use by storage engines. */
@@ -1859,7 +1874,8 @@ private:
This is called to delete all rows in a table
If the handler don't support this, then this function will
return HA_ERR_WRONG_COMMAND and MySQL will delete the rows one
- by one.
+ by one. It should reset auto_increment if
+ thd->lex->sql_command == SQLCOM_TRUNCATE.
*/
virtual int delete_all_rows()
{ return (my_errno=HA_ERR_WRONG_COMMAND); }
@@ -1898,8 +1914,8 @@ private:
virtual int change_partitions(HA_CREATE_INFO *create_info,
const char *path,
- ulonglong *copied,
- ulonglong *deleted,
+ ulonglong * const copied,
+ ulonglong * const deleted,
const uchar *pack_frm_data,
size_t pack_frm_len)
{ return HA_ERR_WRONG_COMMAND; }
diff --git a/sql/item.cc b/sql/item.cc
index 951336cce24..93d00c287ae 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -1248,10 +1248,12 @@ Item_name_const::Item_name_const(Item *name_arg, Item *val):
if (!(valid_args= name_item->basic_const_item() &&
(value_item->basic_const_item() ||
((value_item->type() == FUNC_ITEM) &&
- (((Item_func *) value_item)->functype() ==
- Item_func::NEG_FUNC) &&
+ ((((Item_func *) value_item)->functype() ==
+ Item_func::COLLATE_FUNC) ||
+ ((((Item_func *) value_item)->functype() ==
+ Item_func::NEG_FUNC) &&
(((Item_func *) value_item)->key_item()->type() !=
- FUNC_ITEM)))))
+ FUNC_ITEM)))))))
my_error(ER_WRONG_ARGUMENTS, MYF(0), "NAME_CONST");
Item::maybe_null= TRUE;
}
@@ -1336,6 +1338,7 @@ public:
else
Item_ident::print(str, query_type);
}
+ virtual Ref_Type ref_type() { return AGGREGATE_REF; }
};
@@ -1801,14 +1804,16 @@ Item_field::Item_field(THD *thd, Name_resolution_context *context_arg,
We need to copy db_name, table_name and field_name because they must
be allocated in the statement memory, not in table memory (the table
structure can go away and pop up again between subsequent executions
- of a prepared statement).
+ of a prepared statement or after the close_tables_for_reopen() call
+ in mysql_multi_update_prepare()).
*/
- if (thd->stmt_arena->is_stmt_prepare_or_first_sp_execute())
{
if (db_name)
orig_db_name= thd->strdup(db_name);
- orig_table_name= thd->strdup(table_name);
- orig_field_name= thd->strdup(field_name);
+ if (table_name)
+ orig_table_name= thd->strdup(table_name);
+ if (field_name)
+ orig_field_name= thd->strdup(field_name);
/*
We don't restore 'name' in cleanup because it's not changed
during execution. Still we need it to point to persistent
@@ -4342,7 +4347,12 @@ Item *Item_field::equal_fields_propagator(uchar *arg)
item= this;
else if (field && (field->flags & ZEROFILL_FLAG) && IS_NUM(field->type()))
{
- if (item && cmp_context != INT_RESULT)
+ /*
+ We don't need to zero-fill timestamp columns here because they will be
+ first converted to a string (in date/time format) and compared as such if
+ compared with another string.
+ */
+ if (item && field->type() != FIELD_TYPE_TIMESTAMP && cmp_context != INT_RESULT)
convert_zerofill_number_to_string(&item, (Field_num *)field);
else
item= this;
diff --git a/sql/item.h b/sql/item.h
index be343e25d3f..00a2759a739 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -2126,7 +2126,7 @@ class Item_ref :public Item_ident
protected:
void set_properties();
public:
- enum Ref_Type { REF, DIRECT_REF, VIEW_REF, OUTER_REF };
+ enum Ref_Type { REF, DIRECT_REF, VIEW_REF, OUTER_REF, AGGREGATE_REF };
Field *result_field; /* Save result here */
Item **ref;
Item_ref(Name_resolution_context *context_arg,
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 8bb6bb30117..d7e6fc1f8f2 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -3805,6 +3805,25 @@ static user_var_entry *get_variable(HASH *hash, LEX_STRING &name,
return entry;
}
+
+bool Item_func_set_user_var::set_entry(THD *thd, bool create_if_not_exists)
+{
+ if (thd == entry_thd && entry)
+ goto end; // update entry->update_query_id for PS
+ entry_thd= thd;
+ if (!(entry= get_variable(&thd->user_vars, name, create_if_not_exists)))
+ return TRUE;
+ /*
+ Remember the last query which updated it, this way a query can later know
+ if this variable is a constant item in the query (it is if update_query_id
+ is different from query_id).
+ */
+end:
+ entry->update_query_id= thd->query_id;
+ return FALSE;
+}
+
+
/*
When a user variable is updated (in a SET command or a query like
SELECT @a:= ).
@@ -3814,15 +3833,8 @@ bool Item_func_set_user_var::fix_fields(THD *thd, Item **ref)
{
DBUG_ASSERT(fixed == 0);
/* fix_fields will call Item_func_set_user_var::fix_length_and_dec */
- if (Item_func::fix_fields(thd, ref) ||
- !(entry= get_variable(&thd->user_vars, name, 1)))
+ if (Item_func::fix_fields(thd, ref) || set_entry(thd, TRUE))
return TRUE;
- /*
- Remember the last query which updated it, this way a query can later know
- if this variable is a constant item in the query (it is if update_query_id
- is different from query_id).
- */
- entry->update_query_id= thd->query_id;
/*
As it is wrong and confusing to associate any
character set with NULL, @a should be latin2
diff --git a/sql/item_func.h b/sql/item_func.h
index 02631d7643d..d84abdb6e56 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -1294,6 +1294,17 @@ class Item_func_set_user_var :public Item_func
{
enum Item_result cached_result_type;
user_var_entry *entry;
+ /*
+ The entry_thd variable is used:
+ 1) to skip unnecessary updates of the entry field (see above);
+ 2) to reset the entry field that was initialized in the other thread
+ (for example, an item tree of a trigger that updates user variables
+ may be shared between several connections, and the entry_thd field
+ prevents updates of one connection user variables from a concurrent
+ connection calling the same trigger that initially updated some
+ user variable it the first connection context).
+ */
+ THD *entry_thd;
char buffer[MAX_FIELD_WIDTH];
String value;
my_decimal decimal_buff;
@@ -1309,7 +1320,8 @@ class Item_func_set_user_var :public Item_func
public:
LEX_STRING name; // keep it public
Item_func_set_user_var(LEX_STRING a,Item *b)
- :Item_func(b), cached_result_type(INT_RESULT), name(a)
+ :Item_func(b), cached_result_type(INT_RESULT),
+ entry(NULL), entry_thd(NULL), name(a)
{}
enum Functype functype() const { return SUSERVAR_FUNC; }
double val_real();
@@ -1340,6 +1352,7 @@ public:
}
void save_org_in_field(Field *field) { (void)save_in_field(field, 1, 0); }
bool register_field_in_read_map(uchar *arg);
+ bool set_entry(THD *thd, bool create_if_not_exists);
};
diff --git a/sql/lock.cc b/sql/lock.cc
index 675b94c2175..faddb8c586c 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -854,7 +854,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count,
if ((table=table_ptr[i])->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE)
continue;
lock_type= table->reginfo.lock_type;
- DBUG_ASSERT (lock_type != TL_WRITE_DEFAULT);
+ DBUG_ASSERT(lock_type != TL_WRITE_DEFAULT && lock_type != TL_READ_DEFAULT);
if (lock_type >= TL_WRITE_ALLOW_WRITE)
{
*write_lock_used=table;
diff --git a/sql/log.cc b/sql/log.cc
index 8f46728bcbd..6536be1b6ef 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -3778,7 +3778,7 @@ THD::binlog_set_pending_rows_event(Rows_log_event* ev)
int
MYSQL_BIN_LOG::remove_pending_rows_event(THD *thd)
{
- DBUG_ENTER(__FUNCTION__);
+ DBUG_ENTER("MYSQL_BIN_LOG::remove_pending_rows_event");
binlog_trx_data *const trx_data=
(binlog_trx_data*) thd_get_ha_data(thd, binlog_hton);
@@ -4010,11 +4010,6 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
DBUG_PRINT("info",("number of auto_inc intervals: %u",
thd->auto_inc_intervals_in_cur_stmt_for_binlog.
nb_elements()));
- /*
- If the auto_increment was second in a table's index (possible with
- MyISAM or BDB) (table->next_number_keypart != 0), such event is
- in fact not necessary. We could avoid logging it.
- */
Intvar_log_event e(thd, (uchar) INSERT_ID_EVENT,
thd->auto_inc_intervals_in_cur_stmt_for_binlog.
minimum());
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 1547c999eea..1e7b6d1854d 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -3349,6 +3349,17 @@ int Start_log_event_v3::do_apply_event(Relay_log_info const *rli)
close_temporary_tables(thd);
cleanup_load_tmpdir();
}
+ else
+ {
+ /*
+ Set all temporary tables thread references to the current thread
+ as they may point to the "old" SQL slave thread in case of its
+ restart.
+ */
+ TABLE *table;
+ for (table= thd->temporary_tables; table; table= table->next)
+ table->in_use= thd;
+ }
break;
/*
@@ -8061,7 +8072,6 @@ Write_rows_log_event::do_before_row_operations(const Slave_reporting_capability
*/
}
- m_table->file->ha_start_bulk_insert(0);
/*
We need TIMESTAMP_NO_AUTO_SET otherwise ha_write_row() will not use fill
any TIMESTAMP column with data from the row but instead will use
@@ -8200,7 +8210,16 @@ Rows_log_event::write_row(const Relay_log_info *const rli,
/* unpack row into table->record[0] */
error= unpack_current_row(rli); // TODO: how to handle errors?
-
+ if (m_curr_row == m_rows_buf)
+ {
+ /* this is the first row to be inserted, we estimate the rows with
+ the size of the first row and use that value to initialize
+ storage engine for bulk insertion */
+ ulong estimated_rows= (m_rows_end - m_curr_row) / (m_curr_row_end - m_curr_row);
+ m_table->file->ha_start_bulk_insert(estimated_rows);
+ }
+
+
#ifndef DBUG_OFF
DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
DBUG_PRINT_BITSET("debug", "write_set = %s", table->write_set);
@@ -8605,10 +8624,10 @@ int Rows_log_event::find_row(const Relay_log_info *rli)
the necessary bits on the bytes and don't set the filler bits
correctly.
*/
- my_ptrdiff_t const pos=
- table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0;
- table->record[0][pos]= 0xFF;
-
+ if (table->s->null_bytes > 0)
+ table->record[0][table->s->null_bytes - 1]|=
+ 256U - (1U << table->s->last_null_bit_pos);
+
if ((error= table->file->index_read_map(table->record[0], m_key,
HA_WHOLE_KEY,
HA_READ_KEY_EXACT)))
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 3a52c5c0130..d11f2838e3a 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -803,6 +803,7 @@ bool check_string_byte_length(LEX_STRING *str, const char *err_msg,
bool check_string_char_length(LEX_STRING *str, const char *err_msg,
uint max_char_length, CHARSET_INFO *cs,
bool no_error);
+bool check_host_name(LEX_STRING *str);
bool parse_sql(THD *thd,
Parser_state *parser_state,
@@ -1265,6 +1266,7 @@ bool fix_merge_after_open(TABLE_LIST *old_child_list, TABLE_LIST **old_last,
TABLE_LIST *new_child_list, TABLE_LIST **new_last);
bool reopen_table(TABLE *table);
bool reopen_tables(THD *thd,bool get_locks,bool in_refresh);
+thr_lock_type read_lock_type_for_table(THD *thd, TABLE *table);
void close_data_files_and_morph_locks(THD *thd, const char *db,
const char *table_name);
void close_handle_and_leave_table_as_lock(TABLE *table);
@@ -1938,7 +1940,7 @@ extern bool opt_using_transactions;
extern bool mysqld_embedded;
#endif /* MYSQL_SERVER || INNODB_COMPATIBILITY_HOOKS */
#ifdef MYSQL_SERVER
-extern bool using_update_log, opt_large_files, server_id_supplied;
+extern bool opt_large_files, server_id_supplied;
extern bool opt_update_log, opt_bin_log, opt_error_log;
extern my_bool opt_log, opt_slow_log;
extern ulong log_output_options;
@@ -2240,6 +2242,7 @@ uint build_table_shadow_filename(char *buff, size_t bufflen,
#define FN_TO_IS_TMP (1 << 1)
#define FN_IS_TMP (FN_FROM_IS_TMP | FN_TO_IS_TMP)
#define NO_FRM_RENAME (1 << 2)
+#define FRM_ONLY (1 << 3)
/* from hostname.cc */
struct in_addr;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index a3b0123ee4a..d59ea8eca45 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -382,7 +382,7 @@ my_bool opt_character_set_client_handshake= 1;
bool server_id_supplied = 0;
bool opt_endinfo, using_udf_functions;
my_bool locked_in_memory;
-bool opt_using_transactions, using_update_log;
+bool opt_using_transactions;
bool volatile abort_loop;
bool volatile shutdown_in_progress;
/**
@@ -3384,7 +3384,7 @@ static int init_common_variables(const char *conf_file_name, int argc,
if (opt_slow_log && opt_slow_logname && !(log_output_options & LOG_FILE)
&& !(log_output_options & LOG_NONE))
sql_print_warning("Although a path was specified for the "
- "--log-slow-queries option, log tables are used. "
+ "--log_slow_queries option, log tables are used. "
"To enable logging to files use the --log-output=file option.");
s= opt_logname ? opt_logname : make_default_log_name(buff, ".log");
@@ -3753,23 +3753,25 @@ with --log-bin instead.");
unireg_abort(1);
}
if (!opt_bin_log)
- if (opt_binlog_format_id != BINLOG_FORMAT_UNSPEC)
{
- sql_print_error("You need to use --log-bin to make "
- "--binlog-format work.");
- unireg_abort(1);
- }
+ if (opt_binlog_format_id != BINLOG_FORMAT_UNSPEC)
+ {
+ sql_print_error("You need to use --log-bin to make "
+ "--binlog-format work.");
+ unireg_abort(1);
+ }
else
- {
- global_system_variables.binlog_format= BINLOG_FORMAT_MIXED;
+ {
+ global_system_variables.binlog_format= BINLOG_FORMAT_STMT;
}
+ }
else
if (opt_binlog_format_id == BINLOG_FORMAT_UNSPEC)
- global_system_variables.binlog_format= BINLOG_FORMAT_MIXED;
+ global_system_variables.binlog_format= BINLOG_FORMAT_STMT;
else
{
DBUG_ASSERT(global_system_variables.binlog_format != BINLOG_FORMAT_UNSPEC);
- }
+ }
/* Check that we have not let the format to unspecified at this point */
DBUG_ASSERT((uint)global_system_variables.binlog_format <=
@@ -3815,12 +3817,6 @@ server.");
{
unireg_abort(1);
}
-
- /*
- Used to specify which type of lock we need to use for queries of type
- INSERT ... SELECT. This will change when we have row level logging.
- */
- using_update_log=1;
}
/* call ha_init_key_cache() on all key caches to init them */
@@ -5536,7 +5532,9 @@ enum options_mysqld
OPT_MIN_EXAMINED_ROW_LIMIT,
OPT_LOG_SLOW_SLAVE_STATEMENTS,
OPT_OLD_MODE,
- OPT_SLAVE_EXEC_MODE
+ OPT_SLAVE_EXEC_MODE,
+ OPT_GENERAL_LOG_FILE,
+ OPT_SLOW_QUERY_LOG_FILE
};
@@ -5733,7 +5731,7 @@ struct my_option my_long_options[] =
"Set up signals usable for debugging",
(uchar**) &opt_debugging, (uchar**) &opt_debugging,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"general-log", OPT_GENERAL_LOG,
+ {"general_log", OPT_GENERAL_LOG,
"Enable|disable general log", (uchar**) &opt_log,
(uchar**) &opt_log, 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0},
#ifdef HAVE_LARGE_PAGES
@@ -5769,8 +5767,12 @@ Disable with --skip-large-pages.",
(uchar**) &opt_local_infile,
(uchar**) &opt_local_infile, 0, GET_BOOL, OPT_ARG,
1, 0, 0, 0, 0, 0},
- {"log", 'l', "Log connections and queries to file.", (uchar**) &opt_logname,
+ {"log", 'l', "Log connections and queries to file (deprecated option, use "
+ "--general_log/--general_log_file instead).", (uchar**) &opt_logname,
(uchar**) &opt_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
+ {"general_log_file", OPT_GENERAL_LOG_FILE,
+ "Log connections and queries to given file.", (uchar**) &opt_logname,
+ (uchar**) &opt_logname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"log-bin", OPT_BIN_LOG,
"Log update queries in binary format. Optional (but strongly recommended "
"to avoid replication problems if server's hostname changes) argument "
@@ -5844,10 +5846,17 @@ Disable with --skip-large-pages.",
(uchar**) &opt_log_slow_slave_statements,
(uchar**) &opt_log_slow_slave_statements,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"log-slow-queries", OPT_SLOW_QUERY_LOG,
- "Log slow queries to a table or log file. Defaults logging to table mysql.slow_log or hostname-slow.log if --log-output=file is used. Must be enabled to activate other slow log options.",
+ {"log_slow_queries", OPT_SLOW_QUERY_LOG,
+ "Log slow queries to a table or log file. Defaults logging to table "
+ "mysql.slow_log or hostname-slow.log if --log-output=file is used. "
+ "Must be enabled to activate other slow log options. "
+ "(deprecated option, use --slow_query_log/--slow_query_log_file instead)",
(uchar**) &opt_slow_logname, (uchar**) &opt_slow_logname, 0, GET_STR, OPT_ARG,
0, 0, 0, 0, 0, 0},
+ {"slow_query_log_file", OPT_SLOW_QUERY_LOG_FILE,
+ "Log slow queries to given log file. Defaults logging to hostname-slow.log. Must be enabled to activate other slow log options.",
+ (uchar**) &opt_slow_logname, (uchar**) &opt_slow_logname, 0, GET_STR,
+ REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"log-tc", OPT_LOG_TC,
"Path to transaction coordinator log (used for transactions that affect "
"more than one storage engine, when binary log is disabled)",
@@ -6229,7 +6238,7 @@ Can't be set to 1 if --log-slave-updates is used.",
{"skip-symlink", OPT_SKIP_SYMLINKS, "Don't allow symlinking of tables. Deprecated option. Use --skip-symbolic-links instead.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"skip-thread-priority", OPT_SKIP_PRIOR,
- "Don't give threads different priorities.", 0, 0, 0, GET_NO_ARG, NO_ARG,
+ "Don't give threads different priorities. Deprecated option.", 0, 0, 0, GET_NO_ARG, NO_ARG,
DEFAULT_SKIP_THREAD_PRIORITY, 0, 0, 0, 0, 0},
#ifdef HAVE_REPLICATION
{"slave-load-tmpdir", OPT_SLAVE_LOAD_TMPDIR,
@@ -7431,7 +7440,7 @@ static void mysql_init_variables(void)
slave_open_temp_tables= 0;
cached_thread_count= 0;
opt_endinfo= using_udf_functions= 0;
- opt_using_transactions= using_update_log= 0;
+ opt_using_transactions= 0;
abort_loop= select_thread_in_use= signal_thread_in_use= 0;
ready_to_exit= shutdown_in_progress= grant_option= 0;
aborted_threads= aborted_connects= 0;
@@ -7541,13 +7550,13 @@ static void mysql_init_variables(void)
have_community_features = SHOW_OPTION_YES;
#else
have_community_features = SHOW_OPTION_NO;
+#endif
global_system_variables.ndb_index_stat_enable=FALSE;
max_system_variables.ndb_index_stat_enable=TRUE;
global_system_variables.ndb_index_stat_cache_entries=32;
max_system_variables.ndb_index_stat_cache_entries=~0L;
global_system_variables.ndb_index_stat_update_freq=20;
max_system_variables.ndb_index_stat_update_freq=~0L;
-#endif
#ifdef HAVE_OPENSSL
have_ssl=SHOW_OPTION_YES;
#else
@@ -7655,6 +7664,7 @@ mysqld_get_one_option(int optid,
default_collation_name= 0;
break;
case 'l':
+ WARN_DEPRECATED(NULL, "7.0", "--log", "'--general_log'/'--general_log_file'");
opt_log=1;
break;
case 'h':
@@ -7824,6 +7834,7 @@ mysqld_get_one_option(int optid,
}
#endif /* HAVE_REPLICATION */
case (int) OPT_SLOW_QUERY_LOG:
+ WARN_DEPRECATED(NULL, "7.0", "--log_slow_queries", "'--slow_query_log'/'--slow_query_log_file'");
opt_slow_log= 1;
break;
#ifdef WITH_CSV_STORAGE_ENGINE
@@ -7871,6 +7882,9 @@ mysqld_get_one_option(int optid,
break;
case (int) OPT_SKIP_PRIOR:
opt_specialflag|= SPECIAL_NO_PRIOR;
+ sql_print_warning("The --skip-thread-priority startup option is deprecated "
+ "and will be removed in MySQL 7.0. MySQL 6.0 and up do not "
+ "give threads different priorities.");
break;
case (int) OPT_SKIP_LOCK:
opt_external_locking=0;
@@ -8213,7 +8227,7 @@ static void get_options(int *argc,char **argv)
if ((opt_log_slow_admin_statements || opt_log_queries_not_using_indexes ||
opt_log_slow_slave_statements) &&
!opt_slow_log)
- sql_print_warning("options --log-slow-admin-statements, --log-queries-not-using-indexes and --log-slow-slave-statements have no effect if --log-slow-queries is not set");
+ sql_print_warning("options --log-slow-admin-statements, --log-queries-not-using-indexes and --log-slow-slave-statements have no effect if --log_slow_queries is not set");
#if defined(HAVE_BROKEN_REALPATH)
my_use_symdir=0;
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 28ee8af0699..f228551b586 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -574,6 +574,7 @@ public:
keys_map.clear_all();
bzero((char*) keys,sizeof(keys));
}
+ SEL_TREE(SEL_TREE *arg, RANGE_OPT_PARAM *param);
/*
Note: there may exist SEL_TREE objects with sel_tree->type=KEY and
keys[i]=0 for all i. (SergeyP: it is not clear whether there is any
@@ -767,6 +768,7 @@ public:
trees_next(trees),
trees_end(trees + PREALLOCED_TREES)
{}
+ SEL_IMERGE (SEL_IMERGE *arg, RANGE_OPT_PARAM *param);
int or_sel_tree(RANGE_OPT_PARAM *param, SEL_TREE *tree);
int or_sel_tree_with_checks(RANGE_OPT_PARAM *param, SEL_TREE *new_tree);
int or_sel_imerge_with_checks(RANGE_OPT_PARAM *param, SEL_IMERGE* imerge);
@@ -883,6 +885,61 @@ int SEL_IMERGE::or_sel_imerge_with_checks(RANGE_OPT_PARAM *param, SEL_IMERGE* im
}
+SEL_TREE::SEL_TREE(SEL_TREE *arg, RANGE_OPT_PARAM *param): Sql_alloc()
+{
+ keys_map= arg->keys_map;
+ type= arg->type;
+ for (int idx= 0; idx < MAX_KEY; idx++)
+ {
+ if ((keys[idx]= arg->keys[idx]))
+ keys[idx]->increment_use_count(1);
+ }
+
+ List_iterator<SEL_IMERGE> it(arg->merges);
+ for (SEL_IMERGE *el= it++; el; el= it++)
+ {
+ SEL_IMERGE *merge= new SEL_IMERGE(el, param);
+ if (!merge || merge->trees == merge->trees_next)
+ {
+ merges.empty();
+ return;
+ }
+ merges.push_back (merge);
+ }
+}
+
+
+SEL_IMERGE::SEL_IMERGE (SEL_IMERGE *arg, RANGE_OPT_PARAM *param) : Sql_alloc()
+{
+ uint elements= (arg->trees_end - arg->trees);
+ if (elements > PREALLOCED_TREES)
+ {
+ uint size= elements * sizeof (SEL_TREE **);
+ if (!(trees= (SEL_TREE **)alloc_root(param->mem_root, size)))
+ goto mem_err;
+ }
+ else
+ trees= &trees_prealloced[0];
+
+ trees_next= trees;
+ trees_end= trees + elements;
+
+ for (SEL_TREE **tree = trees, **arg_tree= arg->trees; tree < trees_end;
+ tree++, arg_tree++)
+ {
+ if (!(*tree= new SEL_TREE(*arg_tree, param)))
+ goto mem_err;
+ }
+
+ return;
+
+mem_err:
+ trees= &trees_prealloced[0];
+ trees_next= trees;
+ trees_end= trees;
+}
+
+
/*
Perform AND operation on two index_merge lists and store result in *im1.
*/
@@ -942,10 +999,23 @@ int imerge_list_or_tree(RANGE_OPT_PARAM *param,
{
SEL_IMERGE *imerge;
List_iterator<SEL_IMERGE> it(*im1);
+ bool tree_used= FALSE;
while ((imerge= it++))
{
- if (imerge->or_sel_tree_with_checks(param, tree))
+ SEL_TREE *or_tree;
+ if (tree_used)
+ {
+ or_tree= new SEL_TREE (tree, param);
+ if (!or_tree ||
+ (or_tree->keys_map.is_clear_all() && or_tree->merges.is_empty()))
+ return FALSE;
+ }
+ else
+ or_tree= tree;
+
+ if (imerge->or_sel_tree_with_checks(param, or_tree))
it.remove();
+ tree_used= TRUE;
}
return im1->is_empty();
}
@@ -3145,10 +3215,12 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
ppar->subpart_fields););
/* Find the subpartition (it's HASH/KEY so we always have one) */
partition_info *part_info= ppar->part_info;
- uint32 subpart_id= part_info->get_subpartition_id(part_info);
-
+ uint32 part_id, subpart_id;
+
+ if (part_info->get_subpartition_id(part_info, &subpart_id))
+ return 0;
+
/* Mark this partition as used in each subpartition. */
- uint32 part_id;
while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) !=
NOT_A_PARTITION_ID)
{
@@ -5547,7 +5619,9 @@ get_mm_parts(RANGE_OPT_PARAM *param, COND *cond_func, Field *field,
tree->keys_map.set_bit(key_part->key);
}
}
-
+
+ if (tree && tree->merges.is_empty() && tree->keys_map.is_clear_all())
+ tree= NULL;
DBUG_RETURN(tree);
}
diff --git a/sql/parse_file.cc b/sql/parse_file.cc
index 1f0a45edd5e..d8cbc7ff174 100644
--- a/sql/parse_file.cc
+++ b/sql/parse_file.cc
@@ -26,6 +26,9 @@
#include <my_sys.h>
#include <my_dir.h>
+/* from sql_db.cc */
+extern long mysql_rm_arc_files(THD *thd, MY_DIR *dirp, const char *org_path);
+
/**
Write string with escaping.
@@ -282,8 +285,9 @@ sql_create_definition_file(const LEX_STRING *dir, const LEX_STRING *file_name,
DBUG_RETURN(TRUE);
}
- // archive copies management
path[path_end]='\0';
+#ifdef FRM_ARCHIVE
+ // archive copies management: disabled unused feature (see bug #17823).
if (!access(path, F_OK))
{
if (old_version != ULONGLONG_MAX && max_versions != 0)
@@ -330,6 +334,7 @@ sql_create_definition_file(const LEX_STRING *dir, const LEX_STRING *file_name,
}
}
}
+#endif//FRM_ARCHIVE
{
// rename temporary file
@@ -352,6 +357,7 @@ err_w_file:
/**
Renames a frm file (including backups) in same schema.
+ @thd thread handler
@param schema name of given schema
@param old_name original file name
@param new_name new file name
@@ -363,7 +369,8 @@ err_w_file:
@retval
1 Error (only if renaming of frm failed)
*/
-my_bool rename_in_schema_file(const char *schema, const char *old_name,
+my_bool rename_in_schema_file(THD *thd,
+ const char *schema, const char *old_name,
const char *new_name, ulonglong revision,
uint num_view_backups)
{
@@ -377,9 +384,10 @@ my_bool rename_in_schema_file(const char *schema, const char *old_name,
if (my_rename(old_path, new_path, MYF(MY_WME)))
return 1;
- /* check if arc_dir exists */
+ /* check if arc_dir exists: disabled unused feature (see bug #17823). */
build_table_filename(arc_path, sizeof(arc_path) - 1, schema, "arc", "", 0);
+#ifdef FRM_ARCHIVE
if (revision > 0 && !access(arc_path, F_OK))
{
char old_name_buf[FN_REFLEN], new_name_buf[FN_REFLEN];
@@ -400,6 +408,16 @@ my_bool rename_in_schema_file(const char *schema, const char *old_name,
my_rename(old_path, new_path, MYF(0));
}
}
+#else//FRM_ARCHIVE
+ { // remove obsolete 'arc' directory and files if any
+ MY_DIR *new_dirp;
+ if ((new_dirp = my_dir(arc_path, MYF(MY_DONT_SORT))))
+ {
+ DBUG_PRINT("my",("Archive subdir found: %s", arc_path));
+ (void) mysql_rm_arc_files(thd, new_dirp, arc_path);
+ }
+ }
+#endif//FRM_ARCHIVE
return 0;
}
diff --git a/sql/parse_file.h b/sql/parse_file.h
index 30c902478b8..c05b2853b9a 100644
--- a/sql/parse_file.h
+++ b/sql/parse_file.h
@@ -82,8 +82,9 @@ my_bool
sql_create_definition_file(const LEX_STRING *dir, const LEX_STRING *file_name,
const LEX_STRING *type,
uchar* base, File_option *parameters, uint versions);
-my_bool rename_in_schema_file(const char *schema, const char *old_name,
- const char *new_name, ulonglong revision,
+my_bool rename_in_schema_file(THD *thd,
+ const char *schema, const char *old_name,
+ const char *new_name, ulonglong revision,
uint num_view_backups);
class File_parser: public Sql_alloc
diff --git a/sql/partition_info.h b/sql/partition_info.h
index 2af7fa1717c..703b92305b1 100644
--- a/sql/partition_info.h
+++ b/sql/partition_info.h
@@ -25,8 +25,9 @@ class partition_info;
typedef int (*get_part_id_func)(partition_info *part_info,
uint32 *part_id,
longlong *func_value);
-typedef uint32 (*get_subpart_id_func)(partition_info *part_info);
-
+typedef int (*get_subpart_id_func)(partition_info *part_info,
+ uint32 *part_id);
+
struct st_ddl_log_memory_entry;
class partition_info : public Sql_alloc
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 4259c3a6aaf..a62cafc3873 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -2391,6 +2391,12 @@ end:
bool sys_var_log_state::update(THD *thd, set_var *var)
{
bool res;
+
+ if (this == &sys_var_log)
+ WARN_DEPRECATED(thd, "7.0", "@@log", "'@@general_log'");
+ else if (this == &sys_var_log_slow)
+ WARN_DEPRECATED(thd, "7.0", "@@log_slow_queries", "'@@slow_query_log'");
+
pthread_mutex_lock(&LOCK_global_system_variables);
if (!var->save_result.ulong_value)
{
@@ -2405,6 +2411,11 @@ bool sys_var_log_state::update(THD *thd, set_var *var)
void sys_var_log_state::set_default(THD *thd, enum_var_type type)
{
+ if (this == &sys_var_log)
+ WARN_DEPRECATED(thd, "7.0", "@@log", "'@@general_log'");
+ else if (this == &sys_var_log_slow)
+ WARN_DEPRECATED(thd, "7.0", "@@log_slow_queries", "'@@slow_query_log'");
+
pthread_mutex_lock(&LOCK_global_system_variables);
logger.deactivate_log_handler(thd, log_type);
pthread_mutex_unlock(&LOCK_global_system_variables);
@@ -3711,7 +3722,7 @@ bool sys_var_thd_storage_engine::update(THD *thd, set_var *var)
void sys_var_thd_table_type::warn_deprecated(THD *thd)
{
- WARN_DEPRECATED(thd, "5.2", "table_type", "'storage_engine'");
+ WARN_DEPRECATED(thd, "5.2", "@@table_type", "'@@storage_engine'");
}
void sys_var_thd_table_type::set_default(THD *thd, enum_var_type type)
@@ -3973,8 +3984,8 @@ bool process_key_caches(process_key_cache_t func)
void sys_var_trust_routine_creators::warn_deprecated(THD *thd)
{
- WARN_DEPRECATED(thd, "5.2", "log_bin_trust_routine_creators",
- "'log_bin_trust_function_creators'");
+ WARN_DEPRECATED(thd, "5.2", "@@log_bin_trust_routine_creators",
+ "'@@log_bin_trust_function_creators'");
}
void sys_var_trust_routine_creators::set_default(THD *thd, enum_var_type type)
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index d1f920fd3a5..534cd0a7ca1 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -126,6 +126,9 @@ sp_get_item_value(THD *thd, Item *item, String *str)
if (cs->escape_with_backslash_is_dangerous)
buf.append(' ');
append_query_string(cs, result, &buf);
+ buf.append(" COLLATE '");
+ buf.append(item->collation.collation->name);
+ buf.append('\'');
str->copy(buf);
return str;
@@ -1940,10 +1943,14 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
we'll leave it here.
*/
if (!thd->in_sub_stmt)
- close_thread_tables(thd);
+ {
+ thd->lex->unit.cleanup();
+ close_thread_tables(thd);
+ thd->rollback_item_tree_changes();
+ }
- DBUG_PRINT("info",(" %.*s: eval args done",
- (int) m_name.length, m_name.str));
+ DBUG_PRINT("info",(" %.*s: eval args done", (int) m_name.length,
+ m_name.str));
}
if (!(m_flags & LOG_SLOW_STATEMENTS) && thd->enable_slow_log)
{
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 39030462f0b..f18ee0ec562 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -3720,6 +3720,20 @@ void assign_new_table_id(TABLE_SHARE *share)
DBUG_VOID_RETURN;
}
+#ifndef DBUG_OFF
+/* Cause a spurious statement reprepare for debug purposes. */
+static bool inject_reprepare(THD *thd)
+{
+ if (thd->m_reprepare_observer && thd->stmt_arena->is_reprepared == FALSE)
+ {
+ thd->m_reprepare_observer->report_error(thd);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+#endif
+
/**
Compare metadata versions of an element obtained from the table
definition cache and its corresponding node in the parse tree.
@@ -3773,15 +3787,7 @@ check_and_update_table_version(THD *thd,
tables->set_table_ref_id(table_share);
}
-#ifndef DBUG_OFF
- /* Spuriously reprepare each statement. */
- if (_db_strict_keyword_("reprepare_each_statement") &&
- thd->m_reprepare_observer && thd->stmt_arena->is_reprepared == FALSE)
- {
- thd->m_reprepare_observer->report_error(thd);
- return TRUE;
- }
-#endif
+ DBUG_EXECUTE_IF("reprepare_each_statement", return inject_reprepare(thd););
return FALSE;
}
@@ -4356,6 +4362,38 @@ bool fix_merge_after_open(TABLE_LIST *old_child_list, TABLE_LIST **old_last,
/*
+ Return a appropriate read lock type given a table object.
+
+ @param thd Thread context
+ @param table TABLE object for table to be locked
+
+ @remark Due to a statement-based replication limitation, statements such as
+ INSERT INTO .. SELECT FROM .. and CREATE TABLE .. SELECT FROM need
+ to grab a TL_READ_NO_INSERT lock on the source table in order to
+ prevent the replication of a concurrent statement that modifies the
+ source table. If such a statement gets applied on the slave before
+ the INSERT .. SELECT statement finishes, data on the master could
+ differ from data on the slave and end-up with a discrepancy between
+ the binary log and table state. Furthermore, this does not apply to
+ I_S and log tables as it's always unsafe to replicate such tables
+ under statement-based replication as the table on the slave might
+ contain other data (ie: general_log is enabled on the slave). The
+ statement will be marked as unsafe for SBR in decide_logging_format().
+*/
+
+thr_lock_type read_lock_type_for_table(THD *thd, TABLE *table)
+{
+ bool log_on= mysql_bin_log.is_open() && (thd->options & OPTION_BIN_LOG);
+ ulong binlog_format= thd->variables.binlog_format;
+ if ((log_on == FALSE) || (binlog_format == BINLOG_FORMAT_ROW) ||
+ (table->s->table_category == TABLE_CATEGORY_PERFORMANCE))
+ return TL_READ;
+ else
+ return TL_READ_NO_INSERT;
+}
+
+
+/*
Open all tables in list
SYNOPSIS
@@ -4629,6 +4667,9 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags)
{
if (tables->lock_type == TL_WRITE_DEFAULT)
tables->table->reginfo.lock_type= thd->update_lock_default;
+ else if (tables->lock_type == TL_READ_DEFAULT)
+ tables->table->reginfo.lock_type=
+ read_lock_type_for_table(thd, tables->table);
else if (tables->table->s->tmp_table == NO_TMP_TABLE)
tables->table->reginfo.lock_type= tables->lock_type;
}
@@ -5036,7 +5077,11 @@ int decide_logging_format(THD *thd, TABLE_LIST *tables)
void* prev_ht= NULL;
for (TABLE_LIST *table= tables; table; table= table->next_global)
{
- if (!table->placeholder() && table->lock_type >= TL_WRITE_ALLOW_WRITE)
+ if (table->placeholder())
+ continue;
+ if (table->table->s->table_category == TABLE_CATEGORY_PERFORMANCE)
+ thd->lex->set_stmt_unsafe();
+ if (table->lock_type >= TL_WRITE_ALLOW_WRITE)
{
ulonglong const flags= table->table->file->ha_table_flags();
DBUG_PRINT("info", ("table: %s; ha_table_flags: %s%s",
@@ -5706,8 +5751,21 @@ find_field_in_natural_join(THD *thd, TABLE_LIST *table_ref, const char *name,
{
/* This is a base table. */
DBUG_ASSERT(nj_col->view_field == NULL);
- DBUG_ASSERT(nj_col->table_ref->table == nj_col->table_field->table);
- found_field= nj_col->table_field;
+ /*
+ This fix_fields is not necessary (initially this item is fixed by
+ the Item_field constructor; after reopen_tables the Item_func_eq
+ calls fix_fields on that item), it's just a check during table
+ reopening for columns that was dropped by the concurrent connection.
+ */
+ if (!nj_col->table_field->fixed &&
+ nj_col->table_field->fix_fields(thd, (Item **)&nj_col->table_field))
+ {
+ DBUG_PRINT("info", ("column '%s' was dropped by the concurrent connection",
+ nj_col->table_field->name));
+ DBUG_RETURN(NULL);
+ }
+ DBUG_ASSERT(nj_col->table_ref->table == nj_col->table_field->field->table);
+ found_field= nj_col->table_field->field;
update_field_dependencies(thd, found_field, nj_col->table_ref->table);
}
@@ -6632,7 +6690,7 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2,
const char *field_name_1;
/* true if field_name_1 is a member of using_fields */
bool is_using_column_1;
- if (!(nj_col_1= it_1.get_or_create_column_ref(leaf_1)))
+ if (!(nj_col_1= it_1.get_or_create_column_ref(thd, leaf_1)))
goto err;
field_name_1= nj_col_1->name();
is_using_column_1= using_fields &&
@@ -6653,7 +6711,7 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2,
{
Natural_join_column *cur_nj_col_2;
const char *cur_field_name_2;
- if (!(cur_nj_col_2= it_2.get_or_create_column_ref(leaf_2)))
+ if (!(cur_nj_col_2= it_2.get_or_create_column_ref(thd, leaf_2)))
goto err;
cur_field_name_2= cur_nj_col_2->name();
DBUG_PRINT ("info", ("cur_field_name_2=%s.%s",
@@ -7143,15 +7201,24 @@ static bool setup_natural_join_row_types(THD *thd,
TABLE_LIST *left_neighbor;
/* Table reference to the right of the current. */
TABLE_LIST *right_neighbor= NULL;
+ bool save_first_natural_join_processing=
+ context->select_lex->first_natural_join_processing;
+
+ context->select_lex->first_natural_join_processing= FALSE;
/* Note that tables in the list are in reversed order */
for (left_neighbor= table_ref_it++; left_neighbor ; )
{
table_ref= left_neighbor;
left_neighbor= table_ref_it++;
- /* For stored procedures do not redo work if already done. */
- if (context->select_lex->first_execution)
+ /*
+ Do not redo work if already done:
+ 1) for stored procedures,
+ 2) for multitable update after lock failure and table reopening.
+ */
+ if (save_first_natural_join_processing)
{
+ context->select_lex->first_natural_join_processing= FALSE;
if (store_top_level_join_columns(thd, table_ref,
left_neighbor, right_neighbor))
return TRUE;
@@ -7296,6 +7363,22 @@ bool setup_fields(THD *thd, Item **ref_pointer_array,
if (ref_pointer_array)
bzero(ref_pointer_array, sizeof(Item *) * fields.elements);
+ /*
+ We call set_entry() there (before fix_fields() of the whole list of field
+ items) because:
+ 1) the list of field items has same order as in the query, and the
+ Item_func_get_user_var item may go before the Item_func_set_user_var:
+ SELECT @a, @a := 10 FROM t;
+ 2) The entry->update_query_id value controls constantness of
+ Item_func_get_user_var items, so in presence of Item_func_set_user_var
+ items we have to refresh their entries before fixing of
+ Item_func_get_user_var items.
+ */
+ List_iterator<Item_func_set_user_var> li(thd->lex->set_var_list);
+ Item_func_set_user_var *var;
+ while ((var= li++))
+ var->set_entry(thd, FALSE);
+
Item **ref= ref_pointer_array;
thd->lex->current_select->cur_pos_in_select_list= 0;
while ((item= it++))
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index f5566acbc6f..81d0d447ac1 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -1542,10 +1542,9 @@ void Query_cache::invalidate_locked_for_write(TABLE_LIST *tables_used)
for (; tables_used; tables_used= tables_used->next_local)
{
thd_proc_info(thd, "invalidating query cache entries (table)");
- if (tables_used->lock_type & (TL_WRITE_LOW_PRIORITY | TL_WRITE) &&
+ if (tables_used->lock_type >= TL_WRITE_ALLOW_WRITE &&
tables_used->table)
{
- THD *thd= current_thd;
invalidate_table(thd, tables_used->table);
}
}
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 98da93385cb..c0f54753ac0 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1481,6 +1481,12 @@ sql_exchange::sql_exchange(char *name,bool flag)
cs= NULL;
}
+bool sql_exchange::escaped_given(void)
+{
+ return escaped != &default_escaped;
+}
+
+
bool select_send::send_fields(List<Item> &list, uint flags)
{
bool res;
@@ -1766,8 +1772,11 @@ select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
exchange->line_term=exchange->field_term; // Use this if it exists
field_sep_char= (exchange->enclosed->length() ?
(int) (uchar) (*exchange->enclosed)[0] : field_term_char);
- escape_char= (exchange->escaped->length() ?
- (int) (uchar) (*exchange->escaped)[0] : -1);
+ if (exchange->escaped->length() && (exchange->escaped_given() ||
+ !(thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)))
+ escape_char= (int) (uchar) (*exchange->escaped)[0];
+ else
+ escape_char= -1;
is_ambiguous_field_sep= test(strchr(ESCAPE_CHARS, field_sep_char));
is_unsafe_field_sep= test(strchr(NUMERIC_CHARS, field_sep_char));
line_sep_char= (exchange->line_term->length() ?
@@ -3504,7 +3513,7 @@ int THD::binlog_delete_row(TABLE* table, bool is_trans,
int THD::binlog_remove_pending_rows_event(bool clear_maps)
{
- DBUG_ENTER(__FUNCTION__);
+ DBUG_ENTER("THD::binlog_remove_pending_rows_event");
if (!mysql_bin_log.is_open())
DBUG_RETURN(0);
@@ -3549,22 +3558,24 @@ int THD::binlog_flush_pending_rows_event(bool stmt_end)
}
-#ifndef DBUG_OFF
+#if !defined(DBUG_OFF) && !defined(_lint)
static const char *
show_query_type(THD::enum_binlog_query_type qtype)
{
switch (qtype) {
- static char buf[64];
case THD::ROW_QUERY_TYPE:
return "ROW";
case THD::STMT_QUERY_TYPE:
return "STMT";
case THD::MYSQL_QUERY_TYPE:
return "MYSQL";
+ case THD::QUERY_TYPE_COUNT:
default:
- sprintf(buf, "UNKNOWN#%d", qtype);
- return buf;
+ DBUG_ASSERT(0 <= qtype && qtype < THD::QUERY_TYPE_COUNT);
}
+ static char buf[64];
+ sprintf(buf, "UNKNOWN#%d", qtype);
+ return buf;
}
#endif
diff --git a/sql/sql_class.h b/sql/sql_class.h
index be386adb8b1..d79863c6048 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1543,6 +1543,9 @@ public:
then the latter INSERT will insert no rows
(first_successful_insert_id_in_cur_stmt == 0), but storing "INSERT_ID=3"
in the binlog is still needed; the list's minimum will contain 3.
+ This variable is cumulative: if several statements are written to binlog
+ as one (stored functions or triggers are used) this list is the
+ concatenation of all intervals reserved by all statements.
*/
Discrete_intervals_list auto_inc_intervals_in_cur_stmt_for_binlog;
/* Used by replication and SET INSERT_ID */
@@ -2273,6 +2276,7 @@ public:
ulong skip_lines;
CHARSET_INFO *cs;
sql_exchange(char *name,bool dumpfile_flag);
+ bool escaped_given(void);
};
#include "log_event.h"
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index 00476e2fc38..4571ceba93f 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -37,7 +37,7 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp,
const char *db, const char *path, uint level,
TABLE_LIST **dropped_tables);
-static long mysql_rm_arc_files(THD *thd, MY_DIR *dirp, const char *org_path);
+long mysql_rm_arc_files(THD *thd, MY_DIR *dirp, const char *org_path);
static my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error);
static void mysql_change_db_impl(THD *thd,
LEX_STRING *new_db_name,
@@ -1095,7 +1095,11 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db,
else if (file->name[0] == 'a' && file->name[1] == 'r' &&
file->name[2] == 'c' && file->name[3] == '\0')
{
- /* .frm archive */
+ /* .frm archive:
+ Those archives are obsolete, but following code should
+ exist to remove existent "arc" directories.
+ See #ifdef FRM_ARCHIVE directives for obsolete code.
+ */
char newpath[FN_REFLEN];
MY_DIR *new_dirp;
strxmov(newpath, org_path, "/", "arc", NullS);
@@ -1259,9 +1263,13 @@ static my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error)
RETURN
> 0 number of removed files
-1 error
+
+ NOTE
+ A support of "arc" directories is obsolete, however this
+ function should exist to remove existent "arc" directories.
+ See #ifdef FRM_ARCHIVE directives for obsolete code.
*/
-static long mysql_rm_arc_files(THD *thd, MY_DIR *dirp,
- const char *org_path)
+long mysql_rm_arc_files(THD *thd, MY_DIR *dirp, const char *org_path)
{
long deleted= 0;
ulong found_other_files= 0;
@@ -1303,6 +1311,7 @@ static long mysql_rm_arc_files(THD *thd, MY_DIR *dirp,
{
goto err;
}
+ deleted++;
}
if (thd->killed)
goto err;
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 5225dfe221e..8762d3dc8fa 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1545,6 +1545,17 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
}
}
}
+
+ /*
+ If more than one iteration of the above while loop is done, from the second
+ one the row being inserted will have an explicit value in the autoinc field,
+ which was set at the first call of handler::update_auto_increment(). This
+ value is saved to avoid thd->insert_id_for_cur_row becoming 0. Use this saved
+ autoinc value.
+ */
+ if (table->file->insert_id_for_cur_row == 0)
+ table->file->insert_id_for_cur_row= insert_id_for_cur_row;
+
thd->record_first_successful_insert_id_in_cur_stmt(table->file->insert_id_for_cur_row);
/*
Restore column maps if they where replaced during an duplicate key
@@ -2532,8 +2543,13 @@ bool Delayed_insert::handle_inserts(void)
thd_proc_info(&thd, "upgrading lock");
if (thr_upgrade_write_delay_lock(*thd.lock->locks))
{
- /* This can only happen if thread is killed by shutdown */
- sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->s->table_name.str);
+ /*
+ This can happen if thread is killed either by a shutdown
+ or if another thread is removing the current table definition
+ from the table cache.
+ */
+ my_error(ER_DELAYED_CANT_CHANGE_LOCK,MYF(ME_FATALERROR),
+ table->s->table_name.str);
goto err;
}
@@ -2688,9 +2704,10 @@ bool Delayed_insert::handle_inserts(void)
query_cache_invalidate3(&thd, table, 1);
if (thr_reschedule_write_lock(*thd.lock->locks))
{
- /* This should never happen */
- sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),
- table->s->table_name.str);
+ /* This is not known to happen. */
+ my_error(ER_DELAYED_CANT_CHANGE_LOCK,MYF(ME_FATALERROR),
+ table->s->table_name.str);
+ goto err;
}
if (!using_bin_log)
table->file->extra(HA_EXTRA_WRITE_CACHE);
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 1822176f00a..4ac73baa992 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -293,6 +293,7 @@ void lex_start(THD *thd)
lex->select_lex.init_query();
lex->value_list.empty();
lex->update_list.empty();
+ lex->set_var_list.empty();
lex->param_list.empty();
lex->view_list.empty();
lex->prepared_stmt_params.empty();
@@ -1552,6 +1553,7 @@ void st_select_lex::init_query()
subquery_in_having= explicit_limit= 0;
is_item_list_lookup= 0;
first_execution= 1;
+ first_natural_join_processing= 1;
first_cond_optimization= 1;
parsing_place= NO_MATTER;
exclude_from_table_unique_test= no_wrap_view_item= FALSE;
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index bb3dc00fc8d..53ae984e795 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -667,6 +667,7 @@ public:
case of an error during prepare the PS is not created.
*/
bool first_execution;
+ bool first_natural_join_processing;
bool first_cond_optimization;
/* do not wrap view fields with Item_ref */
bool no_wrap_view_item;
@@ -1549,6 +1550,7 @@ typedef struct st_lex : public Query_tables_list
List<Item> *insert_list,field_list,value_list,update_list;
List<List_item> many_values;
List<set_var_base> var_list;
+ List<Item_func_set_user_var> set_var_list; // in-query assignment list
List<Item_param> param_list;
List<LEX_STRING> view_list; // view list (list of field names in view)
/*
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 255d8e5813d..239fb1d49f3 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -327,7 +327,9 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
bzero((char*) &info,sizeof(info));
info.ignore= ignore;
info.handle_duplicates=handle_duplicates;
- info.escape_char=escaped->length() ? (*escaped)[0] : INT_MAX;
+ info.escape_char= (escaped->length() && (ex->escaped_given() ||
+ !(thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)))
+ ? (*escaped)[0] : INT_MAX;
READ_INFO read_info(file,tot_length,
ex->cs ? ex->cs : thd->variables.collation_database,
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 8f7f8cfd9c4..b083d93a801 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -5632,7 +5632,7 @@ void mysql_init_multi_delete(LEX *lex)
lex->select_lex.select_limit= 0;
lex->unit.select_limit_cnt= HA_POS_ERROR;
lex->select_lex.table_list.save_and_clear(&lex->auxiliary_table_list);
- lex->lock_option= using_update_log ? TL_READ_NO_INSERT : TL_READ;
+ lex->lock_option= TL_READ_DEFAULT;
lex->query_tables= 0;
lex->query_tables_last= &lex->query_tables;
}
@@ -7507,6 +7507,39 @@ int test_if_data_home_dir(const char *dir)
C_MODE_END
+/**
+ Check that host name string is valid.
+
+ @param[in] str string to be checked
+
+ @return Operation status
+ @retval FALSE host name is ok
+ @retval TRUE host name string is longer than max_length or
+ has invalid symbols
+*/
+
+bool check_host_name(LEX_STRING *str)
+{
+ const char *name= str->str;
+ const char *end= str->str + str->length;
+ if (check_string_byte_length(str, ER(ER_HOSTNAME), HOSTNAME_LENGTH))
+ return TRUE;
+
+ while (name != end)
+ {
+ if (*name == '@')
+ {
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "Malformed hostname (illegal symbol: '%c')", MYF(0),
+ *name);
+ return TRUE;
+ }
+ name++;
+ }
+ return FALSE;
+}
+
+
extern int MYSQLparse(void *thd); // from sql_yacc.cc
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 6419d336b9f..a45664a9767 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -73,10 +73,8 @@ static int get_part_id_charset_func_subpart(partition_info *part_info,
static int get_part_part_id_charset_func(partition_info *part_info,
uint32 *part_id,
longlong *func_value);
-static uint32 get_subpart_id_charset_func(partition_info *part_info);
-int get_partition_id_list(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
+static int get_subpart_id_charset_func(partition_info *part_info,
+ uint32 *part_id);
int get_partition_id_list(partition_info *part_info,
uint32 *part_id,
longlong *func_value);
@@ -119,10 +117,14 @@ int get_partition_id_list_sub_linear_hash(partition_info *part_info,
int get_partition_id_list_sub_linear_key(partition_info *part_info,
uint32 *part_id,
longlong *func_value);
-uint32 get_partition_id_hash_sub(partition_info *part_info);
-uint32 get_partition_id_key_sub(partition_info *part_info);
-uint32 get_partition_id_linear_hash_sub(partition_info *part_info);
-uint32 get_partition_id_linear_key_sub(partition_info *part_info);
+int get_partition_id_hash_sub(partition_info *part_info,
+ uint32 *part_id);
+int get_partition_id_key_sub(partition_info *part_info,
+ uint32 *part_id);
+int get_partition_id_linear_hash_sub(partition_info *part_info,
+ uint32 *part_id);
+int get_partition_id_linear_key_sub(partition_info *part_info,
+ uint32 *part_id);
static uint32 get_next_partition_via_walking(PARTITION_ITERATOR*);
static void set_up_range_analysis_info(partition_info *part_info);
static uint32 get_next_subpartition_via_walking(PARTITION_ITERATOR*);
@@ -2232,17 +2234,24 @@ bool partition_key_modified(TABLE *table, const MY_BITMAP *fields)
SYNOPSIS
part_val_int()
item_expr The item expression to evaluate
+ out:result The value of the partition function,
+ LONGLONG_MIN if any null value in function
RETURN VALUES
- The value of the partition function, LONGLONG_MIN if any null value
- in function
+ TRUE Error in val_int()
+ FALSE ok
*/
-static inline longlong part_val_int(Item *item_expr)
+static inline int part_val_int(Item *item_expr, longlong *result)
{
- longlong value= item_expr->val_int();
+ *result= item_expr->val_int();
if (item_expr->null_value)
- value= LONGLONG_MIN;
- return value;
+ {
+ if (current_thd->is_error())
+ return TRUE;
+ else
+ *result= LONGLONG_MIN;
+ }
+ return FALSE;
}
@@ -2319,24 +2328,29 @@ static uint32 get_part_id_for_sub(uint32 loc_part_id, uint32 sub_part_id,
get_part_id_hash()
no_parts Number of hash partitions
part_expr Item tree of hash function
- out:func_value Value of hash function
+ out:part_id The returned partition id
+ out:func_value Value of hash function
RETURN VALUE
- Calculated partition id
+ != 0 Error code
+ FALSE Success
*/
-inline
-static uint32 get_part_id_hash(uint no_parts,
- Item *part_expr,
- longlong *func_value)
+static int get_part_id_hash(uint no_parts,
+ Item *part_expr,
+ uint32 *part_id,
+ longlong *func_value)
{
longlong int_hash_id;
DBUG_ENTER("get_part_id_hash");
- *func_value= part_val_int(part_expr);
+ if (part_val_int(part_expr, func_value))
+ DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+
int_hash_id= *func_value % no_parts;
- DBUG_RETURN(int_hash_id < 0 ? (uint32) -int_hash_id : (uint32) int_hash_id);
+ *part_id= int_hash_id < 0 ? (uint32) -int_hash_id : (uint32) int_hash_id;
+ DBUG_RETURN(FALSE);
}
@@ -2349,24 +2363,29 @@ static uint32 get_part_id_hash(uint no_parts,
desired information is given
no_parts Number of hash partitions
part_expr Item tree of hash function
+ out:part_id The returned partition id
out:func_value Value of hash function
RETURN VALUE
- Calculated partition id
+ != 0 Error code
+ 0 OK
*/
-inline
-static uint32 get_part_id_linear_hash(partition_info *part_info,
- uint no_parts,
- Item *part_expr,
- longlong *func_value)
+static int get_part_id_linear_hash(partition_info *part_info,
+ uint no_parts,
+ Item *part_expr,
+ uint32 *part_id,
+ longlong *func_value)
{
DBUG_ENTER("get_part_id_linear_hash");
- *func_value= part_val_int(part_expr);
- DBUG_RETURN(get_part_id_from_linear_hash(*func_value,
- part_info->linear_hash_mask,
- no_parts));
+ if (part_val_int(part_expr, func_value))
+ DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+
+ *part_id= get_part_id_from_linear_hash(*func_value,
+ part_info->linear_hash_mask,
+ no_parts);
+ DBUG_RETURN(FALSE);
}
@@ -2503,49 +2522,7 @@ static void restore_part_field_pointers(Field **ptr, uchar **restore_ptr)
}
return;
}
-/*
- This function is used to calculate the partition id where all partition
- fields have been prepared to point to a record where the partition field
- values are bound.
- SYNOPSIS
- get_partition_id()
- part_info A reference to the partition_info struct where all the
- desired information is given
- out:part_id The partition id is returned through this pointer
- out: func_value Value of partition function (longlong)
-
- RETURN VALUE
- part_id Partition id of partition that would contain
- row with given values of PF-fields
- HA_ERR_NO_PARTITION_FOUND The fields of the partition function didn't
- fit into any partition and thus the values of
- the PF-fields are not allowed.
-
- DESCRIPTION
- A routine used from write_row, update_row and delete_row from any
- handler supporting partitioning. It is also a support routine for
- get_partition_set used to find the set of partitions needed to scan
- for a certain index scan or full table scan.
-
- It is actually 14 different variants of this function which are called
- through a function pointer.
-
- get_partition_id_list
- get_partition_id_range
- get_partition_id_hash_nosub
- get_partition_id_key_nosub
- get_partition_id_linear_hash_nosub
- get_partition_id_linear_key_nosub
- get_partition_id_range_sub_hash
- get_partition_id_range_sub_key
- get_partition_id_range_sub_linear_hash
- get_partition_id_range_sub_linear_key
- get_partition_id_list_sub_hash
- get_partition_id_list_sub_key
- get_partition_id_list_sub_linear_hash
- get_partition_id_list_sub_linear_key
-*/
/*
This function is used to calculate the main partition to use in the case of
@@ -2557,14 +2534,13 @@ static void restore_part_field_pointers(Field **ptr, uchar **restore_ptr)
part_info A reference to the partition_info struct where all the
desired information is given
out:part_id The partition id is returned through this pointer
- out: func_value The value calculated by partition function
+ out:func_value The value calculated by partition function
RETURN VALUE
- part_id Partition id of partition that would contain
- row with given values of PF-fields
HA_ERR_NO_PARTITION_FOUND The fields of the partition function didn't
fit into any partition and thus the values of
the PF-fields are not allowed.
+ 0 OK
DESCRIPTION
@@ -2640,13 +2616,14 @@ static int get_part_part_id_charset_func(partition_info *part_info,
}
-static uint32 get_subpart_id_charset_func(partition_info *part_info)
+static int get_subpart_id_charset_func(partition_info *part_info,
+ uint32 *part_id)
{
int res;
copy_to_part_field_buffers(part_info->subpart_charset_field_array,
part_info->subpart_field_buffers,
part_info->restore_subpart_field_ptrs);
- res= part_info->get_subpartition_id_charset(part_info);
+ res= part_info->get_subpartition_id_charset(part_info, part_id);
restore_part_field_pointers(part_info->subpart_charset_field_array,
part_info->restore_subpart_field_ptrs);
return res;
@@ -2661,11 +2638,15 @@ int get_partition_id_list(partition_info *part_info,
int list_index;
int min_list_index= 0;
int max_list_index= part_info->no_list_values - 1;
- longlong part_func_value= part_val_int(part_info->part_expr);
+ longlong part_func_value;
+ int error= part_val_int(part_info->part_expr, &part_func_value);
longlong list_value;
bool unsigned_flag= part_info->part_expr->unsigned_flag;
DBUG_ENTER("get_partition_id_list");
+ if (error)
+ goto notfound;
+
if (part_info->part_expr->null_value)
{
if (part_info->has_null_value)
@@ -2809,10 +2790,14 @@ int get_partition_id_range(partition_info *part_info,
uint min_part_id= 0;
uint max_part_id= max_partition;
uint loc_part_id;
- longlong part_func_value= part_val_int(part_info->part_expr);
+ longlong part_func_value;
+ int error= part_val_int(part_info->part_expr, &part_func_value);
bool unsigned_flag= part_info->part_expr->unsigned_flag;
DBUG_ENTER("get_partition_id_range");
+ if (error)
+ DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+
if (part_info->part_expr->null_value)
{
*part_id= 0;
@@ -2970,9 +2955,8 @@ int get_partition_id_hash_nosub(partition_info *part_info,
uint32 *part_id,
longlong *func_value)
{
- *part_id= get_part_id_hash(part_info->no_parts, part_info->part_expr,
- func_value);
- return 0;
+ return get_part_id_hash(part_info->no_parts, part_info->part_expr,
+ part_id, func_value);
}
@@ -2980,9 +2964,8 @@ int get_partition_id_linear_hash_nosub(partition_info *part_info,
uint32 *part_id,
longlong *func_value)
{
- *part_id= get_part_id_linear_hash(part_info, part_info->no_parts,
- part_info->part_expr, func_value);
- return 0;
+ return get_part_id_linear_hash(part_info, part_info->no_parts,
+ part_info->part_expr, part_id, func_value);
}
@@ -3016,6 +2999,8 @@ int get_partition_id_range_sub_hash(partition_info *part_info,
longlong local_func_value;
int error;
DBUG_ENTER("get_partition_id_range_sub_hash");
+ LINT_INIT(loc_part_id);
+ LINT_INIT(sub_part_id);
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
func_value))))
@@ -3023,8 +3008,12 @@ int get_partition_id_range_sub_hash(partition_info *part_info,
DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
- sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr,
- &local_func_value);
+ if (unlikely((error= get_part_id_hash(no_subparts, part_info->subpart_expr,
+ &sub_part_id, &local_func_value))))
+ {
+ DBUG_RETURN(error);
+ }
+
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
DBUG_RETURN(0);
}
@@ -3039,6 +3028,8 @@ int get_partition_id_range_sub_linear_hash(partition_info *part_info,
longlong local_func_value;
int error;
DBUG_ENTER("get_partition_id_range_sub_linear_hash");
+ LINT_INIT(loc_part_id);
+ LINT_INIT(sub_part_id);
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
func_value))))
@@ -3046,9 +3037,14 @@ int get_partition_id_range_sub_linear_hash(partition_info *part_info,
DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
- sub_part_id= get_part_id_linear_hash(part_info, no_subparts,
- part_info->subpart_expr,
- &local_func_value);
+ if (unlikely((error= get_part_id_linear_hash(part_info, no_subparts,
+ part_info->subpart_expr,
+ &sub_part_id,
+ &local_func_value))))
+ {
+ DBUG_RETURN(error);
+ }
+
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
DBUG_RETURN(0);
}
@@ -3063,6 +3059,7 @@ int get_partition_id_range_sub_key(partition_info *part_info,
longlong local_func_value;
int error;
DBUG_ENTER("get_partition_id_range_sub_key");
+ LINT_INIT(loc_part_id);
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
func_value))))
@@ -3086,6 +3083,7 @@ int get_partition_id_range_sub_linear_key(partition_info *part_info,
longlong local_func_value;
int error;
DBUG_ENTER("get_partition_id_range_sub_linear_key");
+ LINT_INIT(loc_part_id);
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
func_value))))
@@ -3110,6 +3108,7 @@ int get_partition_id_list_sub_hash(partition_info *part_info,
longlong local_func_value;
int error;
DBUG_ENTER("get_partition_id_list_sub_hash");
+ LINT_INIT(sub_part_id);
if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
func_value))))
@@ -3117,8 +3116,12 @@ int get_partition_id_list_sub_hash(partition_info *part_info,
DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
- sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr,
- &local_func_value);
+ if (unlikely((error= get_part_id_hash(no_subparts, part_info->subpart_expr,
+ &sub_part_id, &local_func_value))))
+ {
+ DBUG_RETURN(error);
+ }
+
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
DBUG_RETURN(0);
}
@@ -3133,6 +3136,7 @@ int get_partition_id_list_sub_linear_hash(partition_info *part_info,
longlong local_func_value;
int error;
DBUG_ENTER("get_partition_id_list_sub_linear_hash");
+ LINT_INIT(sub_part_id);
if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
func_value))))
@@ -3140,9 +3144,14 @@ int get_partition_id_list_sub_linear_hash(partition_info *part_info,
DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
- sub_part_id= get_part_id_linear_hash(part_info, no_subparts,
- part_info->subpart_expr,
- &local_func_value);
+ if (unlikely((error= get_part_id_linear_hash(part_info, no_subparts,
+ part_info->subpart_expr,
+ &sub_part_id,
+ &local_func_value))))
+ {
+ DBUG_RETURN(error);
+ }
+
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
DBUG_RETURN(0);
}
@@ -3219,36 +3228,43 @@ int get_partition_id_list_sub_linear_key(partition_info *part_info,
get_partition_id_linear_key_sub
*/
-uint32 get_partition_id_hash_sub(partition_info *part_info)
+int get_partition_id_hash_sub(partition_info *part_info,
+ uint32 *part_id)
{
longlong func_value;
return get_part_id_hash(part_info->no_subparts, part_info->subpart_expr,
- &func_value);
+ part_id, &func_value);
}
-uint32 get_partition_id_linear_hash_sub(partition_info *part_info)
+int get_partition_id_linear_hash_sub(partition_info *part_info,
+ uint32 *part_id)
{
longlong func_value;
return get_part_id_linear_hash(part_info, part_info->no_subparts,
- part_info->subpart_expr, &func_value);
+ part_info->subpart_expr, part_id,
+ &func_value);
}
-uint32 get_partition_id_key_sub(partition_info *part_info)
+int get_partition_id_key_sub(partition_info *part_info,
+ uint32 *part_id)
{
longlong func_value;
- return get_part_id_key(part_info->subpart_field_array,
- part_info->no_subparts, &func_value);
+ *part_id= get_part_id_key(part_info->subpart_field_array,
+ part_info->no_subparts, &func_value);
+ return FALSE;
}
-uint32 get_partition_id_linear_key_sub(partition_info *part_info)
+int get_partition_id_linear_key_sub(partition_info *part_info,
+ uint32 *part_id)
{
longlong func_value;
- return get_part_id_linear_key(part_info,
- part_info->subpart_field_array,
- part_info->no_subparts, &func_value);
+ *part_id= get_part_id_linear_key(part_info,
+ part_info->subpart_field_array,
+ part_info->no_subparts, &func_value);
+ return FALSE;
}
@@ -3337,37 +3353,40 @@ static bool check_part_func_bound(Field **ptr)
buf A buffer that can be used to evaluate the partition function
key_info The index object
key_spec A key_range containing key and key length
+ out:part_id The returned partition id
RETURN VALUES
- part_id Subpartition id to use
+ TRUE All fields in partition function are set
+ FALSE Not all fields in partition function are set
DESCRIPTION
Use key buffer to set-up record in buf, move field pointers and
get the partition identity and restore field pointers afterwards.
*/
-static uint32 get_sub_part_id_from_key(const TABLE *table,uchar *buf,
- KEY *key_info,
- const key_range *key_spec)
+static int get_sub_part_id_from_key(const TABLE *table,uchar *buf,
+ KEY *key_info,
+ const key_range *key_spec,
+ uint32 *part_id)
{
uchar *rec0= table->record[0];
partition_info *part_info= table->part_info;
- uint32 part_id;
+ int res;
DBUG_ENTER("get_sub_part_id_from_key");
key_restore(buf, (uchar*)key_spec->key, key_info, key_spec->length);
if (likely(rec0 == buf))
{
- part_id= part_info->get_subpartition_id(part_info);
+ res= part_info->get_subpartition_id(part_info, part_id);
}
else
{
Field **part_field_array= part_info->subpart_field_array;
set_field_ptr(part_field_array, buf, rec0);
- part_id= part_info->get_subpartition_id(part_info);
+ res= part_info->get_subpartition_id(part_info, part_id);
set_field_ptr(part_field_array, rec0, buf);
}
- DBUG_RETURN(part_id);
+ DBUG_RETURN(res);
}
/*
@@ -3586,7 +3605,13 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
else if (part_info->is_sub_partitioned())
{
if (part_info->all_fields_in_SPF.is_set(index))
- sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec);
+ {
+ if (get_sub_part_id_from_key(table, buf, key_info, key_spec, &sub_part))
+ {
+ part_spec->start_part= no_parts;
+ DBUG_VOID_RETURN;
+ }
+ }
else if (part_info->all_fields_in_PPF.is_set(index))
{
if (get_part_id_from_key(table,buf,key_info,
@@ -3632,7 +3657,14 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
else if (part_info->is_sub_partitioned())
{
if (check_part_func_bound(part_info->subpart_field_array))
- sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec);
+ {
+ if (get_sub_part_id_from_key(table, buf, key_info, key_spec, &sub_part))
+ {
+ part_spec->start_part= no_parts;
+ clear_indicator_in_key_fields(key_info);
+ DBUG_VOID_RETURN;
+ }
+ }
else if (check_part_func_bound(part_info->part_field_array))
{
if (get_part_id_from_key(table,buf,key_info,key_spec,&part_part))
@@ -6836,9 +6868,11 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
field->set_null();
if (is_subpart)
{
- part_id= part_info->get_subpartition_id(part_info);
- init_single_partition_iterator(part_id, part_iter);
- return 1; /* Ok, iterator initialized */
+ if (!part_info->get_subpartition_id(part_info, &part_id))
+ {
+ init_single_partition_iterator(part_id, part_iter);
+ return 1; /* Ok, iterator initialized */
+ }
}
else
{
@@ -7007,13 +7041,18 @@ static uint32 get_next_partition_via_walking(PARTITION_ITERATOR *part_iter)
static uint32 get_next_subpartition_via_walking(PARTITION_ITERATOR *part_iter)
{
Field *field= part_iter->part_info->subpart_field_array[0];
+ uint32 res;
if (part_iter->field_vals.cur == part_iter->field_vals.end)
{
part_iter->field_vals.cur= part_iter->field_vals.start;
return NOT_A_PARTITION_ID;
}
field->store(part_iter->field_vals.cur++, FALSE);
- return part_iter->part_info->get_subpartition_id(part_iter->part_info);
+ if (part_iter->part_info->get_subpartition_id(part_iter->part_info,
+ &res))
+ return NOT_A_PARTITION_ID;
+ return res;
+
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 6510d2428db..102809f9f64 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -14804,6 +14804,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
Item *pos;
List_iterator_fast<Item> li(all_fields);
Copy_field *copy= NULL;
+ IF_DBUG(Copy_field *copy_start);
res_selected_fields.empty();
res_all_fields.empty();
List_iterator_fast<Item> itr(res_all_fields);
@@ -14816,12 +14817,19 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
goto err2;
param->copy_funcs.empty();
+ IF_DBUG(copy_start= copy);
for (i= 0; (pos= li++); i++)
{
Field *field;
uchar *tmp;
Item *real_pos= pos->real_item();
- if (real_pos->type() == Item::FIELD_ITEM)
+ /*
+ Aggregate functions can be substituted for fields (by e.g. temp tables).
+ We need to filter those substituted fields out.
+ */
+ if (real_pos->type() == Item::FIELD_ITEM &&
+ !(real_pos != pos &&
+ ((Item_ref *)pos)->ref_type() == Item_ref::AGGREGATE_REF))
{
Item_field *item;
if (!(item= new Item_field(thd, ((Item_field*) real_pos))))
@@ -14868,6 +14876,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
goto err;
if (copy)
{
+ DBUG_ASSERT (param->field_count > (uint) (copy - copy_start));
copy->set(tmp, item->result_field);
item->result_field->move_field(copy->to_ptr,copy->to_null_ptr,1);
#ifdef HAVE_purify
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index a57d966f173..fc2c2548cc4 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -4257,6 +4257,27 @@ static int get_schema_views_record(THD *thd, TABLE_LIST *tables,
!my_strcasecmp(system_charset_info, tables->definer.host.str,
sctx->priv_host))
tables->allowed_show= TRUE;
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
+ else
+ {
+ if ((thd->col_access & (SHOW_VIEW_ACL|SELECT_ACL)) ==
+ (SHOW_VIEW_ACL|SELECT_ACL))
+ tables->allowed_show= TRUE;
+ else
+ {
+ TABLE_LIST table_list;
+ uint view_access;
+ memset(&table_list, 0, sizeof(table_list));
+ table_list.db= tables->view_db.str;
+ table_list.table_name= tables->view_name.str;
+ table_list.grant.privilege= thd->col_access;
+ view_access= get_table_grant(thd, &table_list);
+ if ((view_access & (SHOW_VIEW_ACL|SELECT_ACL)) ==
+ (SHOW_VIEW_ACL|SELECT_ACL))
+ tables->allowed_show= TRUE;
+ }
+ }
+#endif
}
restore_record(table, s->default_values);
tmp_db_name= &tables->view_db;
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index e8473fb49b6..c866fdfc173 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1835,8 +1835,9 @@ bool quick_rm_table(handlerton *base,const char *db,
if (my_delete(path,MYF(0)))
error= 1; /* purecov: inspected */
path[path_length - reg_ext_length]= '\0'; // Remove reg_ext
- DBUG_RETURN(ha_delete_table(current_thd, base, path, db, table_name, 0) ||
- error);
+ if (!(flags & FRM_ONLY))
+ error|= ha_delete_table(current_thd, base, path, db, table_name, 0);
+ DBUG_RETURN(error);
}
/*
@@ -5164,6 +5165,7 @@ err:
index_drop_count OUT The number of elements in the array.
index_add_buffer OUT An array of offsets into key_info_buffer.
index_add_count OUT The number of elements in the array.
+ candidate_key_count OUT The number of candidate keys in original table.
DESCRIPTION
'table' (first argument) contains information of the original
@@ -5194,7 +5196,8 @@ compare_tables(TABLE *table,
enum_alter_table_change_level *need_copy_table,
KEY **key_info_buffer,
uint **index_drop_buffer, uint *index_drop_count,
- uint **index_add_buffer, uint *index_add_count)
+ uint **index_add_buffer, uint *index_add_count,
+ uint *candidate_key_count)
{
Field **f_ptr, *field;
uint changes= 0, tmp;
@@ -5209,6 +5212,9 @@ compare_tables(TABLE *table,
create_info->varchar will be reset in mysql_prepare_create_table.
*/
bool varchar= create_info->varchar;
+ bool not_nullable= true;
+ DBUG_ENTER("compare_tables");
+
/*
Create a copy of alter_info.
To compare the new and old table definitions, we need to "prepare"
@@ -5226,24 +5232,21 @@ compare_tables(TABLE *table,
*/
Alter_info tmp_alter_info(*alter_info, thd->mem_root);
uint db_options= 0; /* not used */
-
- DBUG_ENTER("compare_tables");
-
/* Create the prepared information. */
if (mysql_prepare_create_table(thd, create_info,
- &tmp_alter_info,
- (table->s->tmp_table != NO_TMP_TABLE),
- &db_options,
- table->file, key_info_buffer,
- &key_count, 0))
+ &tmp_alter_info,
+ (table->s->tmp_table != NO_TMP_TABLE),
+ &db_options,
+ table->file, key_info_buffer,
+ &key_count, 0))
DBUG_RETURN(1);
/* Allocate result buffers. */
if (! (*index_drop_buffer=
- (uint*) thd->alloc(sizeof(uint) * table->s->keys)) ||
+ (uint*) thd->alloc(sizeof(uint) * table->s->keys)) ||
! (*index_add_buffer=
- (uint*) thd->alloc(sizeof(uint) * tmp_alter_info.key_list.elements)))
+ (uint*) thd->alloc(sizeof(uint) * tmp_alter_info.key_list.elements)))
DBUG_RETURN(1);
-
+
/*
Some very basic checks. If number of fields changes, or the
handler, we need to run full ALTER TABLE. In the future
@@ -5356,12 +5359,29 @@ compare_tables(TABLE *table,
*/
*index_drop_count= 0;
*index_add_count= 0;
+ *candidate_key_count= 0;
for (table_key= table->key_info; table_key < table_key_end; table_key++)
{
KEY_PART_INFO *table_part;
KEY_PART_INFO *table_part_end= table_key->key_part + table_key->key_parts;
KEY_PART_INFO *new_part;
+ /*
+ Check if key is a candidate key, i.e. a unique index with no index
+ fields nullable, then key is either already primary key or could
+ be promoted to primary key if the original primary key is dropped.
+ Count all candidate keys.
+ */
+ not_nullable= true;
+ for (table_part= table_key->key_part;
+ table_part < table_part_end;
+ table_part++)
+ {
+ not_nullable= not_nullable && (! table_part->field->maybe_null());
+ }
+ if ((table_key->flags & HA_NOSAME) && not_nullable)
+ (*candidate_key_count)++;
+
/* Search a new key with the same name. */
for (new_key= *key_info_buffer; new_key < new_key_end; new_key++)
{
@@ -5987,13 +6007,16 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
uint *index_drop_buffer;
uint index_add_count;
uint *index_add_buffer;
+ uint candidate_key_count;
bool committed= 0;
+ bool no_pk;
DBUG_ENTER("mysql_alter_table");
LINT_INIT(index_add_count);
LINT_INIT(index_drop_count);
LINT_INIT(index_add_buffer);
LINT_INIT(index_drop_buffer);
+ LINT_INIT(candidate_key_count);
/*
Check if we attempt to alter mysql.slow_log or
@@ -6404,7 +6427,8 @@ view_err:
&need_copy_table_res,
&key_info_buffer,
&index_drop_buffer, &index_drop_count,
- &index_add_buffer, &index_add_count))
+ &index_add_buffer, &index_add_count,
+ &candidate_key_count))
goto err;
if (need_copy_table == ALTER_TABLE_METADATA_ONLY)
@@ -6438,20 +6462,40 @@ view_err:
DBUG_PRINT("info", ("index dropped: '%s'", key->name));
if (key->flags & HA_NOSAME)
{
- /* Unique key. Check for "PRIMARY". */
- if (! my_strcasecmp(system_charset_info,
- key->name, primary_key_name))
+ /*
+ Unique key. Check for "PRIMARY".
+ or if dropping last unique key
+ */
+ if ((uint) (key - table->key_info) == table->s->primary_key)
{
+ DBUG_PRINT("info", ("Dropping primary key"));
/* Primary key. */
needed_online_flags|= HA_ONLINE_DROP_PK_INDEX;
needed_fast_flags|= HA_ONLINE_DROP_PK_INDEX_NO_WRITES;
pk_changed++;
+ candidate_key_count--;
}
else
{
+ KEY_PART_INFO *part_end= key->key_part + key->key_parts;
+ bool is_candidate_key= true;
+
/* Non-primary unique key. */
needed_online_flags|= HA_ONLINE_DROP_UNIQUE_INDEX;
needed_fast_flags|= HA_ONLINE_DROP_UNIQUE_INDEX_NO_WRITES;
+
+ /*
+ Check if all fields in key are declared
+ NOT NULL and adjust candidate_key_count
+ */
+ for (KEY_PART_INFO *key_part= key->key_part;
+ key_part < part_end;
+ key_part++)
+ is_candidate_key=
+ (is_candidate_key &&
+ (! table->field[key_part->fieldnr-1]->maybe_null()));
+ if (is_candidate_key)
+ candidate_key_count--;
}
}
else
@@ -6461,7 +6505,8 @@ view_err:
needed_fast_flags|= HA_ONLINE_DROP_INDEX_NO_WRITES;
}
}
-
+ no_pk= ((table->s->primary_key == MAX_KEY) ||
+ (needed_online_flags & HA_ONLINE_DROP_PK_INDEX));
/* Check added indexes. */
for (idx_p= index_add_buffer, idx_end_p= idx_p + index_add_count;
idx_p < idx_end_p;
@@ -6471,14 +6516,38 @@ view_err:
DBUG_PRINT("info", ("index added: '%s'", key->name));
if (key->flags & HA_NOSAME)
{
- /* Unique key. Check for "PRIMARY". */
- if (! my_strcasecmp(system_charset_info,
- key->name, primary_key_name))
+ /* Unique key */
+
+ KEY_PART_INFO *part_end= key->key_part + key->key_parts;
+ bool is_candidate_key= true;
+
+ /*
+ Check if all fields in key are declared
+ NOT NULL
+ */
+ for (KEY_PART_INFO *key_part= key->key_part;
+ key_part < part_end;
+ key_part++)
+ is_candidate_key=
+ (is_candidate_key &&
+ (! table->field[key_part->fieldnr]->maybe_null()));
+
+ /*
+ Check for "PRIMARY"
+ or if adding first unique key
+ defined on non-nullable fields
+ */
+
+ if ((!my_strcasecmp(system_charset_info,
+ key->name, primary_key_name)) ||
+ (no_pk && candidate_key_count == 0 && is_candidate_key))
{
+ DBUG_PRINT("info", ("Adding primary key"));
/* Primary key. */
needed_online_flags|= HA_ONLINE_ADD_PK_INDEX;
needed_fast_flags|= HA_ONLINE_ADD_PK_INDEX_NO_WRITES;
pk_changed++;
+ no_pk= false;
}
else
{
@@ -6495,6 +6564,20 @@ view_err:
}
}
+ if ((candidate_key_count > 0) &&
+ (needed_online_flags & HA_ONLINE_DROP_PK_INDEX))
+ {
+ /*
+ Dropped primary key when there is some other unique
+ not null key that should be converted to primary key
+ */
+ needed_online_flags|= HA_ONLINE_ADD_PK_INDEX;
+ needed_fast_flags|= HA_ONLINE_ADD_PK_INDEX_NO_WRITES;
+ pk_changed= 2;
+ }
+
+ DBUG_PRINT("info", ("needed_online_flags: 0x%lx, needed_fast_flags: 0x%lx",
+ needed_online_flags, needed_fast_flags));
/*
Online or fast add/drop index is possible only if
the primary key is not added and dropped in the same statement.
@@ -6993,7 +7076,10 @@ err1:
close_temporary_table(thd, new_table, 1, 1);
}
else
- VOID(quick_rm_table(new_db_type, new_db, tmp_name, FN_IS_TMP));
+ VOID(quick_rm_table(new_db_type, new_db, tmp_name,
+ create_info->frm_only
+ ? FN_IS_TMP | FRM_ONLY
+ : FN_IS_TMP));
err:
/*
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 5f7e3c5b620..2f3282f58f6 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -399,7 +399,6 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
}
else
{
- DBUG_ASSERT(!thd->stmt_arena->is_conventional());
/*
We're in execution of a prepared statement or stored procedure:
reset field items to point at fields from the created temporary table.
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index b9ad88ee663..197e5290aba 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -1039,7 +1039,7 @@ reopen_tables:
correct order of statements. Otherwise, we use a TL_READ lock to
improve performance.
*/
- tl->lock_type= using_update_log ? TL_READ_NO_INSERT : TL_READ;
+ tl->lock_type= read_lock_type_for_table(thd, table);
tl->updating= 0;
/* Update TABLE::lock_type accordingly. */
if (!tl->placeholder() && !using_lock_tables)
@@ -1076,11 +1076,14 @@ reopen_tables:
}
/* now lock and fill tables */
- if (lock_tables(thd, table_list, table_count, &need_reopen))
+ if (!thd->stmt_arena->is_stmt_prepare() &&
+ lock_tables(thd, table_list, table_count, &need_reopen))
{
if (!need_reopen)
DBUG_RETURN(TRUE);
+ DBUG_PRINT("info", ("lock_tables failed, reopening"));
+
/*
We have to reopen tables since some of them were altered or dropped
during lock_tables() or something was done with their triggers.
@@ -1096,6 +1099,34 @@ reopen_tables:
for (TABLE_LIST *tbl= table_list; tbl; tbl= tbl->next_global)
tbl->cleanup_items();
+ /*
+ To not to hog memory (as a result of the
+ unit->reinit_exec_mechanism() call below):
+ */
+ lex->unit.cleanup();
+
+ for (SELECT_LEX *sl= lex->all_selects_list;
+ sl;
+ sl= sl->next_select_in_list())
+ {
+ SELECT_LEX_UNIT *unit= sl->master_unit();
+ unit->reinit_exec_mechanism(); // reset unit->prepared flags
+ /*
+ Reset 'clean' flag back to force normal execution of
+ unit->cleanup() in Prepared_statement::cleanup_stmt()
+ (call to lex->unit.cleanup() above sets this flag to TRUE).
+ */
+ unit->unclean();
+ }
+
+ /*
+ Also we need to cleanup Natural_join_column::table_field items.
+ To not to traverse a join tree we will cleanup whole
+ thd->free_list (in PS execution mode that list may not contain
+ items from 'fields' list, so the cleanup above is necessary to.
+ */
+ cleanup_items(thd->free_list);
+
close_tables_for_reopen(thd, &table_list);
goto reopen_tables;
}
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 12b2adf4f13..87b073af078 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -1947,7 +1947,7 @@ mysql_rename_view(THD *thd,
goto err;
/* rename view and it's backups */
- if (rename_in_schema_file(view->db, view->table_name, new_name,
+ if (rename_in_schema_file(thd, view->db, view->table_name, new_name,
view_def.revision - 1, num_view_backups))
goto err;
@@ -1967,7 +1967,7 @@ mysql_rename_view(THD *thd,
num_view_backups))
{
/* restore renamed view in case of error */
- rename_in_schema_file(view->db, new_name, view->table_name,
+ rename_in_schema_file(thd, view->db, new_name, view->table_name,
view_def.revision - 1, num_view_backups);
goto err;
}
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 04f73cb963b..8d9b3a2d4b5 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1817,6 +1817,8 @@ event_tail:
if (!(lex->event_parse_data= Event_parse_data::new_instance(thd)))
MYSQL_YYABORT;
lex->event_parse_data->identifier= $3;
+ lex->event_parse_data->on_completion=
+ Event_parse_data::ON_COMPLETION_DROP;
lex->sql_command= SQLCOM_CREATE_EVENT;
/* We need that for disallowing subqueries */
@@ -4299,7 +4301,7 @@ create_select:
SELECT_SYM
{
LEX *lex=Lex;
- lex->lock_option= using_update_log ? TL_READ_NO_INSERT : TL_READ;
+ lex->lock_option= TL_READ_DEFAULT;
if (lex->sql_command == SQLCOM_INSERT)
lex->sql_command= SQLCOM_INSERT_SELECT;
else if (lex->sql_command == SQLCOM_REPLACE)
@@ -8062,11 +8064,13 @@ variable:
variable_aux:
ident_or_text SET_VAR expr
{
- $$= new (YYTHD->mem_root) Item_func_set_user_var($1, $3);
+ Item_func_set_user_var *item;
+ $$= item= new (YYTHD->mem_root) Item_func_set_user_var($1, $3);
if ($$ == NULL)
MYSQL_YYABORT;
LEX *lex= Lex;
lex->uncacheable(UNCACHEABLE_RAND);
+ lex->set_var_list.push_back(item);
}
| ident_or_text
{
@@ -9394,7 +9398,7 @@ insert:
lex->duplicates= DUP_ERROR;
mysql_init_select(lex);
/* for subselects */
- lex->lock_option= (using_update_log) ? TL_READ_NO_INSERT : TL_READ;
+ lex->lock_option= TL_READ_DEFAULT;
}
insert_lock_option
opt_ignore insert2
@@ -11297,8 +11301,7 @@ user:
if (check_string_char_length(&$$->user, ER(ER_USERNAME),
USERNAME_CHAR_LENGTH,
system_charset_info, 0) ||
- check_string_byte_length(&$$->host, ER(ER_HOSTNAME),
- HOSTNAME_LENGTH))
+ check_host_name(&$$->host))
MYSQL_YYABORT;
}
| CURRENT_USER optional_braces
diff --git a/sql/table.cc b/sql/table.cc
index 40264a7cbb3..89714e4e47e 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -3390,7 +3390,7 @@ TABLE_LIST *TABLE_LIST::find_underlying_table(TABLE *table_to_find)
}
/*
- cleunup items belonged to view fields translation table
+ cleanup items belonged to view fields translation table
SYNOPSIS
TABLE_LIST::cleanup_items()
@@ -3836,10 +3836,10 @@ Natural_join_column::Natural_join_column(Field_translator *field_param,
}
-Natural_join_column::Natural_join_column(Field *field_param,
+Natural_join_column::Natural_join_column(Item_field *field_param,
TABLE_LIST *tab)
{
- DBUG_ASSERT(tab->table == field_param->table);
+ DBUG_ASSERT(tab->table == field_param->field->table);
table_field= field_param;
view_field= NULL;
table_ref= tab;
@@ -3867,7 +3867,7 @@ Item *Natural_join_column::create_item(THD *thd)
return create_view_field(thd, table_ref, &view_field->item,
view_field->name);
}
- return new Item_field(thd, &thd->lex->current_select->context, table_field);
+ return table_field;
}
@@ -3878,7 +3878,7 @@ Field *Natural_join_column::field()
DBUG_ASSERT(table_field == NULL);
return NULL;
}
- return table_field;
+ return table_field->field;
}
@@ -4010,7 +4010,7 @@ void Field_iterator_natural_join::next()
cur_column_ref= column_ref_it++;
DBUG_ASSERT(!cur_column_ref || ! cur_column_ref->table_field ||
cur_column_ref->table_ref->table ==
- cur_column_ref->table_field->table);
+ cur_column_ref->table_field->field->table);
}
@@ -4174,7 +4174,7 @@ GRANT_INFO *Field_iterator_table_ref::grant()
*/
Natural_join_column *
-Field_iterator_table_ref::get_or_create_column_ref(TABLE_LIST *parent_table_ref)
+Field_iterator_table_ref::get_or_create_column_ref(THD *thd, TABLE_LIST *parent_table_ref)
{
Natural_join_column *nj_col;
bool is_created= TRUE;
@@ -4187,7 +4187,11 @@ Field_iterator_table_ref::get_or_create_column_ref(TABLE_LIST *parent_table_ref)
{
/* The field belongs to a stored table. */
Field *tmp_field= table_field_it.field();
- nj_col= new Natural_join_column(tmp_field, table_ref);
+ Item_field *tmp_item=
+ new Item_field(thd, &thd->lex->current_select->context, tmp_field);
+ if (!tmp_item)
+ return NULL;
+ nj_col= new Natural_join_column(tmp_item, table_ref);
field_count= table_ref->table->s->fields;
}
else if (field_it == &view_field_it)
@@ -4211,7 +4215,7 @@ Field_iterator_table_ref::get_or_create_column_ref(TABLE_LIST *parent_table_ref)
DBUG_ASSERT(nj_col);
}
DBUG_ASSERT(!nj_col->table_field ||
- nj_col->table_ref->table == nj_col->table_field->table);
+ nj_col->table_ref->table == nj_col->table_field->field->table);
/*
If the natural join column was just created add it to the list of
@@ -4276,7 +4280,7 @@ Field_iterator_table_ref::get_natural_column_ref()
nj_col= natural_join_it.column_ref();
DBUG_ASSERT(nj_col &&
(!nj_col->table_field ||
- nj_col->table_ref->table == nj_col->table_field->table));
+ nj_col->table_ref->table == nj_col->table_field->field->table));
return nj_col;
}
diff --git a/sql/table.h b/sql/table.h
index d21a9eefae8..ccd6b60664e 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -18,6 +18,7 @@
class Item; /* Needed by ORDER */
class Item_subselect;
+class Item_field;
class GRANT_TABLE;
class st_select_lex_unit;
class st_select_lex;
@@ -410,6 +411,7 @@ typedef struct st_table_share
int cached_row_logging_check;
#ifdef WITH_PARTITION_STORAGE_ENGINE
+ /** @todo: Move into *ha_data for partitioning */
bool auto_partitioned;
const char *partition_info;
uint partition_info_len;
@@ -419,6 +421,9 @@ typedef struct st_table_share
handlerton *default_part_db_type;
#endif
+ /** place to store storage engine specific data */
+ void *ha_data;
+
/*
Set share's table cache key and update its db and table name appropriately.
@@ -1012,7 +1017,7 @@ class Natural_join_column: public Sql_alloc
{
public:
Field_translator *view_field; /* Column reference of merge view. */
- Field *table_field; /* Column reference of table or temp view. */
+ Item_field *table_field; /* Column reference of table or temp view. */
TABLE_LIST *table_ref; /* Original base table/view reference. */
/*
True if a common join column of two NATURAL/USING join operands. Notice
@@ -1024,7 +1029,7 @@ public:
bool is_common;
public:
Natural_join_column(Field_translator *field_param, TABLE_LIST *tab);
- Natural_join_column(Field *field_param, TABLE_LIST *tab);
+ Natural_join_column(Item_field *field_param, TABLE_LIST *tab);
const char *name();
Item *create_item(THD *thd);
Field *field();
@@ -1603,7 +1608,7 @@ public:
GRANT_INFO *grant();
Item *create_item(THD *thd) { return field_it->create_item(thd); }
Field *field() { return field_it->field(); }
- Natural_join_column *get_or_create_column_ref(TABLE_LIST *parent_table_ref);
+ Natural_join_column *get_or_create_column_ref(THD *thd, TABLE_LIST *parent_table_ref);
Natural_join_column *get_natural_column_ref();
};