summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorAlexander Nozdrin <alexander.nozdrin@oracle.com>2010-10-13 01:17:22 +0400
committerAlexander Nozdrin <alexander.nozdrin@oracle.com>2010-10-13 01:17:22 +0400
commitba084605e8fed127cb680648a54a9fa60f7258c1 (patch)
treea342a24a8dd1000dcf37fc055e28fb441af44406 /sql
parent1b92ff606db9bc8f043c4ad0c958d9657b2e0bd8 (diff)
parentcd81c833a8a27df3cf037e084917a1a58e76f70b (diff)
downloadmariadb-git-ba084605e8fed127cb680648a54a9fa60f7258c1.tar.gz
Auto-merge from mysql-5.5-bugteam.
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_partition.cc190
-rw-r--r--sql/ha_partition.h10
-rw-r--r--sql/handler.cc15
-rw-r--r--sql/handler.h58
-rw-r--r--sql/item.cc11
-rw-r--r--sql/item_cmpfunc.cc8
-rw-r--r--sql/item_strfunc.cc5
-rw-r--r--sql/item_timefunc.cc70
-rw-r--r--sql/item_timefunc.h2
-rw-r--r--sql/log_event.cc40
-rw-r--r--sql/mysqld.cc23
-rw-r--r--sql/repl_failsafe.cc4
-rw-r--r--sql/repl_failsafe.h4
-rw-r--r--sql/scheduler.h7
-rw-r--r--sql/share/errmsg-utf8.txt15
-rw-r--r--sql/slave.cc5
-rw-r--r--sql/sp_head.cc24
-rw-r--r--sql/sql_acl.cc2
-rw-r--r--sql/sql_cache.cc114
-rw-r--r--sql/sql_delete.cc4
-rw-r--r--sql/sql_insert.cc4
-rw-r--r--sql/sql_lex.h1
-rw-r--r--sql/sql_parse.cc3
-rw-r--r--sql/sql_partition_admin.cc93
-rw-r--r--sql/sql_partition_admin.h6
-rw-r--r--sql/sql_plugin.cc39
-rw-r--r--sql/sql_plugin.h7
-rw-r--r--sql/sql_show.cc18
-rw-r--r--sql/sql_string.h8
-rw-r--r--sql/sql_truncate.cc479
-rw-r--r--sql/sql_truncate.h23
-rw-r--r--sql/sql_update.cc93
-rw-r--r--sql/sql_update.h3
-rw-r--r--sql/sql_yacc.yy33
-rw-r--r--sql/sys_vars.cc16
-rw-r--r--sql/sys_vars.h8
-rw-r--r--sql/table.h4
37 files changed, 865 insertions, 584 deletions
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 8bf35f79ba9..3d8d5ae9eb8 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -3337,113 +3337,123 @@ int ha_partition::delete_row(const uchar *buf)
Called from sql_delete.cc by mysql_delete().
Called from sql_select.cc by JOIN::reinit().
Called from sql_union.cc by st_select_lex_unit::exec().
-
- Also used for handle ALTER TABLE t TRUNCATE PARTITION ...
- NOTE: auto increment value will be truncated in that partition as well!
*/
int ha_partition::delete_all_rows()
{
int error;
- bool truncate= FALSE;
handler **file;
- THD *thd= ha_thd();
DBUG_ENTER("ha_partition::delete_all_rows");
- if (thd->lex->sql_command == SQLCOM_TRUNCATE)
+ file= m_file;
+ do
{
- Alter_info *alter_info= &thd->lex->alter_info;
- /* TRUNCATE also means resetting auto_increment */
- lock_auto_increment();
- table_share->ha_part_data->next_auto_inc_val= 0;
- table_share->ha_part_data->auto_inc_initialized= FALSE;
- unlock_auto_increment();
- if (alter_info->flags & ALTER_ADMIN_PARTITION)
- {
- /* ALTER TABLE t TRUNCATE PARTITION ... */
- List_iterator<partition_element> part_it(m_part_info->partitions);
- int saved_error= 0;
- uint num_parts= m_part_info->num_parts;
- uint num_subparts= m_part_info->num_subparts;
- uint i= 0;
- uint num_parts_set= alter_info->partition_names.elements;
- uint num_parts_found= set_part_state(alter_info, m_part_info,
- PART_ADMIN);
- if (num_parts_set != num_parts_found &&
- (!(alter_info->flags & ALTER_ALL_PARTITION)))
- DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+ if ((error= (*file)->ha_delete_all_rows()))
+ DBUG_RETURN(error);
+ } while (*(++file));
+ DBUG_RETURN(0);
+}
+
+
+/**
+ Manually truncate the table.
+
+ @retval 0 Success.
+ @retval > 0 Error code.
+*/
+
+int ha_partition::truncate()
+{
+ int error;
+ handler **file;
+ DBUG_ENTER("ha_partition::truncate");
+
+ /*
+ TRUNCATE also means resetting auto_increment. Hence, reset
+ it so that it will be initialized again at the next use.
+ */
+ lock_auto_increment();
+ table_share->ha_part_data->next_auto_inc_val= 0;
+ table_share->ha_part_data->auto_inc_initialized= FALSE;
+ unlock_auto_increment();
- /*
- Cannot return HA_ERR_WRONG_COMMAND here without correct pruning
- since that whould delete the whole table row by row in sql_delete.cc
- */
- bitmap_clear_all(&m_part_info->used_partitions);
- do
- {
- partition_element *part_elem= part_it++;
- if (part_elem->part_state == PART_ADMIN)
- {
- if (m_is_sub_partitioned)
- {
- List_iterator<partition_element>
- subpart_it(part_elem->subpartitions);
- partition_element *sub_elem;
- uint j= 0, part;
- do
- {
- sub_elem= subpart_it++;
- part= i * num_subparts + j;
- bitmap_set_bit(&m_part_info->used_partitions, part);
- if (!saved_error)
- {
- DBUG_PRINT("info", ("truncate subpartition %u (%s)",
- part, sub_elem->partition_name));
- if ((error= m_file[part]->ha_delete_all_rows()))
- saved_error= error;
- /* If not reset_auto_increment is supported, just accept it */
- if (!saved_error &&
- (error= m_file[part]->ha_reset_auto_increment(0)) &&
- error != HA_ERR_WRONG_COMMAND)
- saved_error= error;
- }
- } while (++j < num_subparts);
- }
- else
- {
- DBUG_PRINT("info", ("truncate partition %u (%s)", i,
- part_elem->partition_name));
- bitmap_set_bit(&m_part_info->used_partitions, i);
- if (!saved_error)
- {
- if ((error= m_file[i]->ha_delete_all_rows()) && !saved_error)
- saved_error= error;
- /* If not reset_auto_increment is supported, just accept it */
- if (!saved_error &&
- (error= m_file[i]->ha_reset_auto_increment(0)) &&
- error != HA_ERR_WRONG_COMMAND)
- saved_error= error;
- }
- }
- part_elem->part_state= PART_NORMAL;
- }
- } while (++i < num_parts);
- DBUG_RETURN(saved_error);
- }
- truncate= TRUE;
- }
file= m_file;
do
{
- if ((error= (*file)->ha_delete_all_rows()))
+ if ((error= (*file)->ha_truncate()))
DBUG_RETURN(error);
- /* Ignore the error */
- if (truncate)
- (void) (*file)->ha_reset_auto_increment(0);
} while (*(++file));
DBUG_RETURN(0);
}
+/**
+ Truncate a set of specific partitions.
+
+ @remark Auto increment value will be truncated in that partition as well!
+
+ ALTER TABLE t TRUNCATE PARTITION ...
+*/
+
+int ha_partition::truncate_partition(Alter_info *alter_info)
+{
+ int error= 0;
+ List_iterator<partition_element> part_it(m_part_info->partitions);
+ uint num_parts= m_part_info->num_parts;
+ uint num_subparts= m_part_info->num_subparts;
+ uint i= 0;
+ uint num_parts_set= alter_info->partition_names.elements;
+ uint num_parts_found= set_part_state(alter_info, m_part_info,
+ PART_ADMIN);
+ DBUG_ENTER("ha_partition::truncate_partition");
+
+ /*
+ TRUNCATE also means resetting auto_increment. Hence, reset
+ it so that it will be initialized again at the next use.
+ */
+ lock_auto_increment();
+ table_share->ha_part_data->next_auto_inc_val= 0;
+ table_share->ha_part_data->auto_inc_initialized= FALSE;
+ unlock_auto_increment();
+
+ if (num_parts_set != num_parts_found &&
+ (!(alter_info->flags & ALTER_ALL_PARTITION)))
+ DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_ADMIN)
+ {
+ if (m_is_sub_partitioned)
+ {
+ List_iterator<partition_element>
+ subpart_it(part_elem->subpartitions);
+ partition_element *sub_elem;
+ uint j= 0, part;
+ do
+ {
+ sub_elem= subpart_it++;
+ part= i * num_subparts + j;
+ DBUG_PRINT("info", ("truncate subpartition %u (%s)",
+ part, sub_elem->partition_name));
+ if ((error= m_file[part]->ha_truncate()))
+ break;
+ } while (++j < num_subparts);
+ }
+ else
+ {
+ DBUG_PRINT("info", ("truncate partition %u (%s)", i,
+ part_elem->partition_name));
+ error= m_file[i]->ha_truncate();
+ }
+ part_elem->part_state= PART_NORMAL;
+ }
+ } while (!error && (++i < num_parts));
+ DBUG_RETURN(error);
+}
+
+
/*
Start a large batch of insert rows
@@ -6327,8 +6337,8 @@ void ha_partition::print_error(int error, myf errflag)
/* Should probably look for my own errors first */
DBUG_PRINT("enter", ("error: %d", error));
- if (error == HA_ERR_NO_PARTITION_FOUND &&
- thd->lex->sql_command != SQLCOM_TRUNCATE)
+ if ((error == HA_ERR_NO_PARTITION_FOUND) &&
+ ! (thd->lex->alter_info.flags & ALTER_TRUNCATE_PARTITION))
m_part_info->print_no_partition_found(table);
else
{
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index 353e0d17159..f1abc0cefe2 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -346,6 +346,7 @@ public:
virtual int update_row(const uchar * old_data, uchar * new_data);
virtual int delete_row(const uchar * buf);
virtual int delete_all_rows(void);
+ virtual int truncate();
virtual void start_bulk_insert(ha_rows rows);
virtual int end_bulk_insert();
private:
@@ -354,6 +355,15 @@ private:
long estimate_read_buffer_size(long original_size);
public:
+ /*
+ Method for truncating a specific partition.
+ (i.e. ALTER TABLE t1 TRUNCATE PARTITION p).
+
+ @remark This method is a partitioning-specific hook
+ and thus not a member of the general SE API.
+ */
+ int truncate_partition(Alter_info *);
+
virtual bool is_fatal_error(int error, uint flags)
{
if (!handler::is_fatal_error(error, flags) ||
diff --git a/sql/handler.cc b/sql/handler.cc
index 567dbe6ea49..1542dd99ec6 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -3209,6 +3209,21 @@ handler::ha_delete_all_rows()
/**
+ Truncate table: public interface.
+
+ @sa handler::truncate()
+*/
+
+int
+handler::ha_truncate()
+{
+ mark_trx_read_write();
+
+ return truncate();
+}
+
+
+/**
Reset auto increment: public interface.
@sa handler::reset_auto_increment()
diff --git a/sql/handler.h b/sql/handler.h
index b1d64a1114b..325df003215 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -1331,6 +1331,7 @@ public:
int ha_bulk_update_row(const uchar *old_data, uchar *new_data,
uint *dup_key_found);
int ha_delete_all_rows();
+ int ha_truncate();
int ha_reset_auto_increment(ulonglong value);
int ha_optimize(THD* thd, HA_CHECK_OPT* check_opt);
int ha_analyze(THD* thd, HA_CHECK_OPT* check_opt);
@@ -1644,8 +1645,33 @@ public:
{ return(NULL);} /* gets tablespace name from handler */
/** used in ALTER TABLE; 1 if changing storage engine is allowed */
virtual bool can_switch_engines() { return 1; }
- /** used in REPLACE; is > 0 if table is referred by a FOREIGN KEY */
- virtual int get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list)
+ /**
+ Get the list of foreign keys in this table.
+
+ @remark Returns the set of foreign keys where this table is the
+ dependent or child table.
+
+ @param thd The thread handle.
+ @param f_key_list[out] The list of foreign keys.
+
+ @return The handler error code or zero for success.
+ */
+ virtual int
+ get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list)
+ { return 0; }
+ /**
+ Get the list of foreign keys referencing this table.
+
+ @remark Returns the set of foreign keys where this table is the
+ referenced or parent table.
+
+ @param thd The thread handle.
+ @param f_key_list[out] The list of foreign keys.
+
+ @return The handler error code or zero for success.
+ */
+ virtual int
+ get_parent_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list)
{ return 0; }
virtual uint referenced_by_foreign_key() { return 0;}
virtual void init_table_handle_for_HANDLER()
@@ -2010,16 +2036,34 @@ private:
This is called to delete all rows in a table
If the handler don't support this, then this function will
return HA_ERR_WRONG_COMMAND and MySQL will delete the rows one
- by one. It should reset auto_increment if
- thd->lex->sql_command == SQLCOM_TRUNCATE.
+ by one.
*/
virtual int delete_all_rows()
{ return (my_errno=HA_ERR_WRONG_COMMAND); }
/**
+ Quickly remove all rows from a table.
+
+ @remark This method is responsible for implementing MySQL's TRUNCATE
+ TABLE statement, which is a DDL operation. As such, a engine
+ can bypass certain integrity checks and in some cases avoid
+ fine-grained locking (e.g. row locks) which would normally be
+ required for a DELETE statement.
+
+ @remark Typically, truncate is not used if it can result in integrity
+ violation. For example, truncate is not used when a foreign
+ key references the table, but it might be used if foreign key
+ checks are disabled.
+
+ @remark Engine is responsible for resetting the auto-increment counter.
+
+ @remark The table is locked in exclusive mode.
+ */
+ virtual int truncate()
+ { return HA_ERR_WRONG_COMMAND; }
+ /**
Reset the auto-increment counter to the given value, i.e. the next row
- inserted will get the given value. This is called e.g. after TRUNCATE
- is emulated by doing a 'DELETE FROM t'. HA_ERR_WRONG_COMMAND is
- returned by storage engines that don't support this operation.
+ inserted will get the given value. HA_ERR_WRONG_COMMAND is returned by
+ storage engines that don't support this operation.
*/
virtual int reset_auto_increment(ulonglong value)
{ return HA_ERR_WRONG_COMMAND; }
diff --git a/sql/item.cc b/sql/item.cc
index e782e90b874..b166f3e645f 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -1783,8 +1783,7 @@ bool agg_item_set_converter(DTCollation &coll, const char *fname,
In case we're in statement prepare, create conversion item
in its memory: it will be reused on each execute.
*/
- arena= thd->is_stmt_prepare() ? thd->activate_stmt_arena_if_needed(&backup)
- : NULL;
+ arena= thd->activate_stmt_arena_if_needed(&backup);
for (i= 0, arg= args; i < nargs; i++, arg+= item_sep)
{
@@ -7356,9 +7355,11 @@ Item_cache* Item_cache::get_cache(const Item *item, const Item_result type)
case DECIMAL_RESULT:
return new Item_cache_decimal();
case STRING_RESULT:
- if (item->field_type() == MYSQL_TYPE_DATE ||
- item->field_type() == MYSQL_TYPE_DATETIME ||
- item->field_type() == MYSQL_TYPE_TIME)
+ /* Not all functions that return DATE/TIME are actually DATE/TIME funcs. */
+ if ((item->field_type() == MYSQL_TYPE_DATE ||
+ item->field_type() == MYSQL_TYPE_DATETIME ||
+ item->field_type() == MYSQL_TYPE_TIME) &&
+ (const_cast<Item*>(item))->result_as_longlong())
return new Item_cache_datetime(item->field_type());
return new Item_cache_str(item);
case ROW_RESULT:
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index b7aad733e67..b04ec105468 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -3029,6 +3029,14 @@ void Item_func_case::fix_length_and_dec()
{
if (agg_arg_charsets_for_string_result(collation, agg, nagg))
return;
+ /*
+ Copy all THEN and ELSE items back to args[] array.
+ Some of the items might have been changed to Item_func_conv_charset.
+ */
+ for (nagg= 0 ; nagg < ncases / 2 ; nagg++)
+ args[nagg * 2 + 1]= agg[nagg];
+ if (else_expr_num != -1)
+ args[else_expr_num]= agg[nagg++];
}
else
collation.set_numeric();
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 6d3514bf356..89c1e785c71 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -2299,7 +2299,8 @@ String *Item_func_format::val_str_ascii(String *str)
if (lc->grouping[0] > 0 &&
str_length >= dec_length + 1 + lc->grouping[0])
{
- char buf[DECIMAL_MAX_STR_LENGTH * 2]; /* 2 - in the worst case when grouping=1 */
+ /* We need space for ',' between each group of digits as well. */
+ char buf[2 * FLOATING_POINT_BUFFER];
int count;
const char *grouping= lc->grouping;
char sign_length= *str->ptr() == '-' ? 1 : 0;
@@ -2323,7 +2324,7 @@ String *Item_func_format::val_str_ascii(String *str)
count will be initialized to -1 and
we'll never get into this "if" anymore.
*/
- if (!count)
+ if (count == 0)
{
*--dst= lc->thousand_sep;
if (grouping[1])
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 24cf4da0a95..49336b04e16 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -2857,10 +2857,11 @@ void Item_func_add_time::fix_length_and_dec()
Result: Time value or datetime value
*/
-String *Item_func_add_time::val_str(String *str)
+MYSQL_TIME *Item_func_add_time::val_datetime(MYSQL_TIME *time,
+ date_time_format_types *format)
{
DBUG_ASSERT(fixed == 1);
- MYSQL_TIME l_time1, l_time2, l_time3;
+ MYSQL_TIME l_time1, l_time2;
bool is_time= 0;
long days, microseconds;
longlong seconds;
@@ -2886,41 +2887,38 @@ String *Item_func_add_time::val_str(String *str)
if (l_time1.neg != l_time2.neg)
l_sign= -l_sign;
- bzero((char *)&l_time3, sizeof(l_time3));
+ bzero((char *)time, sizeof(MYSQL_TIME));
- l_time3.neg= calc_time_diff(&l_time1, &l_time2, -l_sign,
- &seconds, &microseconds);
+ time->neg= calc_time_diff(&l_time1, &l_time2, -l_sign,
+ &seconds, &microseconds);
/*
If first argument was negative and diff between arguments
is non-zero we need to swap sign to get proper result.
*/
if (l_time1.neg && (seconds || microseconds))
- l_time3.neg= 1-l_time3.neg; // Swap sign of result
+ time->neg= 1 - time->neg; // Swap sign of result
- if (!is_time && l_time3.neg)
+ if (!is_time && time->neg)
goto null_date;
days= (long)(seconds/86400L);
- calc_time_from_sec(&l_time3, (long)(seconds%86400L), microseconds);
+ calc_time_from_sec(time, (long)(seconds%86400L), microseconds);
if (!is_time)
{
- get_date_from_daynr(days,&l_time3.year,&l_time3.month,&l_time3.day);
- if (l_time3.day &&
- !make_datetime(l_time1.second_part || l_time2.second_part ?
- DATE_TIME_MICROSECOND : DATE_TIME,
- &l_time3, str))
- return str;
+ get_date_from_daynr(days, &time->year, &time->month, &time->day);
+ *format= l_time1.second_part || l_time2.second_part ?
+ DATE_TIME_MICROSECOND : DATE_TIME;
+ if (time->day)
+ return time;
goto null_date;
}
-
- l_time3.hour+= days*24;
- if (!make_datetime_with_warn(l_time1.second_part || l_time2.second_part ?
- TIME_MICROSECOND : TIME_ONLY,
- &l_time3, str))
- return str;
+ *format= l_time1.second_part || l_time2.second_part ?
+ TIME_MICROSECOND : TIME_ONLY;
+ time->hour+= days*24;
+ return time;
null_date:
null_value=1;
@@ -2928,6 +2926,38 @@ null_date:
}
+String *Item_func_add_time::val_str(String *str)
+{
+ MYSQL_TIME ltime;
+ date_time_format_types format;
+
+ val_datetime(&ltime, &format);
+
+ if (null_value)
+ return 0;
+
+ if (!make_datetime_with_warn(format, &ltime, str))
+ return str;
+
+ null_value= 1;
+ return 0;
+}
+
+
+longlong Item_func_add_time::val_int()
+{
+ MYSQL_TIME ltime;
+ date_time_format_types format;
+
+ val_datetime(&ltime, &format);
+
+ if (null_value)
+ return 0;
+
+ return TIME_to_ulonglong_datetime(&ltime);
+}
+
+
void Item_func_add_time::print(String *str, enum_query_type query_type)
{
if (is_date)
diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h
index 27dfd548f73..004eb83cbeb 100644
--- a/sql/item_timefunc.h
+++ b/sql/item_timefunc.h
@@ -948,6 +948,8 @@ public:
return save_date_in_field(field);
return Item_str_func::save_in_field(field, no_conversions);
}
+ longlong val_int();
+ MYSQL_TIME *val_datetime(MYSQL_TIME *time, date_time_format_types *format);
};
class Item_func_timediff :public Item_str_timefunc
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 16290c58685..bfcac10e138 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -158,16 +158,21 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error,
" %s, Error_code: %d;", err->get_message_text(),
err->get_sql_errno());
}
-
- rli->report(level, thd->is_error()? thd->stmt_da->sql_errno() : 0,
- "Could not execute %s event on table %s.%s;"
- "%s handler error %s; "
- "the event's master log %s, end_log_pos %lu",
- type, table->s->db.str,
- table->s->table_name.str,
- buff,
- handler_error == NULL? "<unknown>" : handler_error,
- log_name, pos);
+
+ if (ha_error != 0)
+ rli->report(level, thd->is_error() ? thd->stmt_da->sql_errno() : 0,
+ "Could not execute %s event on table %s.%s;"
+ "%s handler error %s; "
+ "the event's master log %s, end_log_pos %lu",
+ type, table->s->db.str, table->s->table_name.str,
+ buff, handler_error == NULL ? "<unknown>" : handler_error,
+ log_name, pos);
+ else
+ rli->report(level, thd->is_error() ? thd->stmt_da->sql_errno() : 0,
+ "Could not execute %s event on table %s.%s;"
+ "%s the event's master log %s, end_log_pos %lu",
+ type, table->s->db.str, table->s->table_name.str,
+ buff, log_name, pos);
}
#endif
@@ -7811,19 +7816,16 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli)
/Sven
*/
thd->reset_current_stmt_binlog_format_row();
- const_cast<Relay_log_info*>(rli)->cleanup_context(thd, error);
thd->is_slave_error= 1;
DBUG_RETURN(error);
}
- if (get_flags(STMT_END_F))
- if ((error= rows_event_stmt_cleanup(rli, thd)))
- rli->report(ERROR_LEVEL, error,
- "Error in %s event: commit of row events failed, "
- "table `%s`.`%s`",
- get_type_str(), m_table->s->db.str,
- m_table->s->table_name.str);
-
+ if (get_flags(STMT_END_F) && (error= rows_event_stmt_cleanup(rli, thd)))
+ slave_rows_error_report(ERROR_LEVEL,
+ thd->is_error() ? 0 : error,
+ rli, thd, table,
+ get_type_str(),
+ RPL_LOG_NAME, (ulong) log_pos);
DBUG_RETURN(error);
}
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 1ab335eb106..99754e8b7f6 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -265,6 +265,8 @@ extern "C" sig_handler handle_segfault(int sig);
/* Constants */
+#include <welcome_copyright_notice.h> // ORACLE_WELCOME_COPYRIGHT_NOTICE
+
const char *show_comp_option_name[]= {"YES", "NO", "DISABLED"};
static const char *tc_heuristic_recover_names[]=
@@ -3200,6 +3202,11 @@ static int init_common_variables()
return 1;
set_server_version();
+#ifndef EMBEDDED_LIBRARY
+ if (opt_help && !opt_verbose)
+ unireg_abort(0);
+#endif /*!EMBEDDED_LIBRARY*/
+
DBUG_PRINT("info",("%s Ver %s for %s on %s\n",my_progname,
server_version, SYSTEM_TYPE,MACHINE_TYPE));
@@ -3237,12 +3244,11 @@ static int init_common_variables()
desired page sizes.
*/
int nelem;
- int max_desired_page_size;
- int max_page_size;
+ size_t max_desired_page_size;
if (opt_super_large_pages)
- max_page_size= SUPER_LARGE_PAGESIZE;
+ max_desired_page_size= SUPER_LARGE_PAGESIZE;
else
- max_page_size= LARGE_PAGESIZE;
+ max_desired_page_size= LARGE_PAGESIZE;
nelem = getpagesizes(NULL, 0);
if (nelem > 0)
{
@@ -6593,13 +6599,8 @@ static void usage(void)
if (!default_collation_name)
default_collation_name= (char*) default_charset_info->name;
print_version();
- puts("\
-Copyright (C) 2000-2008 MySQL AB, by Monty and others.\n\
-Copyright (C) 2008,2009 Sun Microsystems, Inc.\n\
-This software comes with ABSOLUTELY NO WARRANTY. This is free software,\n\
-and you are welcome to modify and redistribute it under the GPL license\n\n\
-Starts the MySQL database server.\n");
-
+ puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000, 2010"));
+ puts("Starts the MySQL database server.\n");
printf("Usage: %s [OPTIONS]\n", my_progname);
if (!opt_verbose)
puts("\nFor more help options (several pages), use mysqld --verbose --help.");
diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc
index 47eb2f7031d..540b62b9d3b 100644
--- a/sql/repl_failsafe.cc
+++ b/sql/repl_failsafe.cc
@@ -41,7 +41,7 @@
#define SLAVE_ERRMSG_SIZE (FN_REFLEN+64)
-RPL_STATUS rpl_status=RPL_NULL;
+ulong rpl_status=RPL_NULL;
mysql_mutex_t LOCK_rpl_status;
mysql_cond_t COND_rpl_status;
HASH slave_list;
@@ -68,7 +68,7 @@ static Slave_log_event* find_slave_event(IO_CACHE* log,
functions like register_slave()) are working.
*/
-void change_rpl_status(RPL_STATUS from_status, RPL_STATUS to_status)
+void change_rpl_status(ulong from_status, ulong to_status)
{
mysql_mutex_lock(&LOCK_rpl_status);
if (rpl_status == from_status || rpl_status == RPL_ANY)
diff --git a/sql/repl_failsafe.h b/sql/repl_failsafe.h
index c6d00de47cb..a0c41686696 100644
--- a/sql/repl_failsafe.h
+++ b/sql/repl_failsafe.h
@@ -26,7 +26,7 @@ typedef enum {RPL_AUTH_MASTER=0,RPL_IDLE_SLAVE,RPL_ACTIVE_SLAVE,
RPL_LOST_SOLDIER,RPL_TROOP_SOLDIER,
RPL_RECOVERY_CAPTAIN,RPL_NULL /* inactive */,
RPL_ANY /* wild card used by change_rpl_status */ } RPL_STATUS;
-extern RPL_STATUS rpl_status;
+extern ulong rpl_status;
extern mysql_mutex_t LOCK_rpl_status;
extern mysql_cond_t COND_rpl_status;
@@ -34,7 +34,7 @@ extern TYPELIB rpl_role_typelib;
extern const char* rpl_role_type[], *rpl_status_type[];
pthread_handler_t handle_failsafe_rpl(void *arg);
-void change_rpl_status(RPL_STATUS from_status, RPL_STATUS to_status);
+void change_rpl_status(ulong from_status, ulong to_status);
int find_recovery_captain(THD* thd, MYSQL* mysql);
int update_slave_list(MYSQL* mysql, Master_info* mi);
diff --git a/sql/scheduler.h b/sql/scheduler.h
index 40f0e28bc2c..b5a175434b6 100644
--- a/sql/scheduler.h
+++ b/sql/scheduler.h
@@ -57,6 +57,13 @@ struct scheduler_functions
*/
enum scheduler_types
{
+ /*
+ The default of --thread-handling is the first one in the
+ thread_handling_names array, this array has to be consistent with
+ the order in this array, so to change default one has to change
+ the first entry in this enum and the first entry in the
+ thread_handling_names array.
+ */
SCHEDULER_ONE_THREAD_PER_CONNECTION=0,
SCHEDULER_NO_THREADS,
SCHEDULER_TYPES_COUNT
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index 07f5589135d..be97afe055a 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -6152,7 +6152,7 @@ ER_WARN_ENGINE_TRANSACTION_ROLLBACK
ER_SLAVE_HEARTBEAT_FAILURE
eng "Unexpected master's heartbeat data: %s"
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE
- eng "The requested value for the heartbeat period %s %s"
+ eng "The requested value for the heartbeat period is either negative or exceeds the maximum allowed (%s seconds)."
ER_NDB_REPLICATION_SCHEMA_ERROR
eng "Bad schema for mysql.ndb_replication table. Message: %-.64s"
@@ -6379,3 +6379,16 @@ ER_SET_PASSWORD_AUTH_PLUGIN
ER_GRANT_PLUGIN_USER_EXISTS
eng "GRANT with IDENTIFIED WITH is illegal because the user %-.*s already exists"
+
+ER_TRUNCATE_ILLEGAL_FK 42000
+ eng "Cannot truncate a table referenced in a foreign key constraint (%.192s)"
+
+ER_PLUGIN_IS_PERMANENT
+ eng "Plugin '%s' is force_plus_permanent and can not be unloaded"
+
+ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN
+ eng "The requested value for the heartbeat period is less than 1 millisecond. The value is reset to 0, meaning that heartbeating will effectively be disabled."
+
+ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX
+ eng "The requested value for the heartbeat period exceeds the value of `slave_net_timeout' seconds. A sensible value for the period should be less than the timeout."
+
diff --git a/sql/slave.cc b/sql/slave.cc
index dff6b6946d4..3e77a5e7516 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -2534,9 +2534,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli)
else
{
exec_res= 0;
- trans_rollback(thd);
- close_thread_tables(thd);
- thd->mdl_context.release_transactional_locks();
+ rli->cleanup_context(thd, 1);
/* chance for concurrent connection to get more locks */
safe_sleep(thd, min(rli->trans_retries, MAX_SLAVE_RETRY_PAUSE),
(CHECK_KILLED_FUNC)sql_slave_killed, (void*)rli);
@@ -3385,6 +3383,7 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \
request is detected only by the present function, not by events), so we
must "proactively" clear playgrounds:
*/
+ thd->clear_error();
rli->cleanup_context(thd, 1);
/*
Some extra safety, which should not been needed (normally, event deletion
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index a95bbe094c0..52f658cbe0e 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -1213,8 +1213,28 @@ sp_head::execute(THD *thd)
Object_creation_ctx *saved_creation_ctx;
Warning_info *saved_warning_info, warning_info(thd->warning_info->warn_id());
- /* Use some extra margin for possible SP recursion and functions */
- if (check_stack_overrun(thd, 8 * STACK_MIN_SIZE, (uchar*)&old_packet))
+ /*
+ Just reporting a stack overrun error
+ (@sa check_stack_overrun()) requires stack memory for error
+ message buffer. Thus, we have to put the below check
+ relatively close to the beginning of the execution stack,
+ where available stack margin is still big. As long as the check
+ has to be fairly high up the call stack, the amount of memory
+ we "book" for has to stay fairly high as well, and hence
+ not very accurate. The number below has been calculated
+ by trial and error, and reflects the amount of memory necessary
+ to execute a single stored procedure instruction, be it either
+ an SQL statement, or, heaviest of all, a CALL, which involves
+ parsing and loading of another stored procedure into the cache
+ (@sa db_load_routine() and Bug#10100).
+ At the time of measuring, a recursive SP invocation required
+ 3232 bytes of stack on 32 bit Linux, 6016 bytes on 64 bit Mac
+ and 11152 on 64 bit Solaris sparc.
+ The same with db_load_routine() required circa 7k bytes and
+ 14k bytes accordingly. Hence, here we book the stack with some
+ reasonable margin.
+ */
+ if (check_stack_overrun(thd, 4 * STACK_MIN_SIZE, (uchar*)&old_packet))
DBUG_RETURN(TRUE);
/* init per-instruction memroot */
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 571a7890116..5583e9a29f1 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -3936,7 +3936,7 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
ulong rights, bool revoke_grant, bool is_proxy)
{
List_iterator <LEX_USER> str_list (list);
- LEX_USER *Str, *tmp_Str, *proxied_user;
+ LEX_USER *Str, *tmp_Str, *proxied_user= NULL;
char tmp_db[NAME_LEN+1];
bool create_new_users=0;
TABLE_LIST tables[2];
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index b57c851edf4..0dadc0f0cd4 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -334,6 +334,7 @@ TODO list:
#include "tztime.h" // struct Time_zone
#include "sql_acl.h" // SELECT_ACL
#include "sql_base.h" // TMP_TABLE_KEY_EXTRA
+#include "debug_sync.h" // DEBUG_SYNC
#ifdef HAVE_QUERY_CACHE
#include <m_ctype.h>
#include <my_dir.h>
@@ -341,6 +342,7 @@ TODO list:
#include "../storage/myisammrg/ha_myisammrg.h"
#include "../storage/myisammrg/myrg_def.h"
#include "probes_mysql.h"
+#include "transaction.h"
#ifdef EMBEDDED_LIBRARY
#include "emb_qcache.h"
@@ -370,32 +372,6 @@ TODO list:
__LINE__,(ulong)(B)));B->query()->unlock_reading();}
#define DUMP(C) DBUG_EXECUTE("qcache", {\
(C)->cache_dump(); (C)->queries_dump();(C)->tables_dump();})
-
-
-/**
- Causes the thread to wait in a spin lock for a query kill signal.
- This function is used by the test frame work to identify race conditions.
-
- The signal is caught and ignored and the thread is not killed.
-*/
-
-static void debug_wait_for_kill(const char *info)
-{
- DBUG_ENTER("debug_wait_for_kill");
- const char *prev_info;
- THD *thd;
- thd= current_thd;
- prev_info= thd->proc_info;
- thd->proc_info= info;
- sql_print_information("%s", info);
- while(!thd->killed)
- my_sleep(1000);
- thd->killed= THD::NOT_KILLED;
- sql_print_information("Exit debug_wait_for_kill");
- thd->proc_info= prev_info;
- DBUG_VOID_RETURN;
-}
-
#else
#define RW_WLOCK(M) mysql_rwlock_wrlock(M)
#define RW_RLOCK(M) mysql_rwlock_rdlock(M)
@@ -407,6 +383,52 @@ static void debug_wait_for_kill(const char *info)
#define DUMP(C)
#endif
+
+/**
+ Macro that executes the requested action at a synchronization point
+ only if the thread has a associated THD session.
+*/
+#if defined(ENABLED_DEBUG_SYNC)
+#define QC_DEBUG_SYNC(name) \
+ do { \
+ THD *thd= current_thd; \
+ if (thd) \
+ DEBUG_SYNC(thd, name); \
+ } while (0)
+#else
+#define QC_DEBUG_SYNC(name)
+#endif
+
+
+/**
+ Thread state to be used when the query cache lock needs to be acquired.
+ Sets the thread state name in the constructor, resets on destructor.
+*/
+
+struct Query_cache_wait_state
+{
+ THD *m_thd;
+ const char *m_proc_info;
+
+ Query_cache_wait_state(THD *thd, const char *func,
+ const char *file, unsigned int line)
+ : m_thd(thd),
+ m_proc_info(NULL)
+ {
+ if (m_thd)
+ m_proc_info= set_thd_proc_info(m_thd,
+ "Waiting for query cache lock",
+ func, file, line);
+ }
+
+ ~Query_cache_wait_state()
+ {
+ if (m_thd)
+ set_thd_proc_info(m_thd, m_proc_info, NULL, NULL, 0);
+ }
+};
+
+
/**
Serialize access to the query cache.
If the lock cannot be granted the thread hangs in a conditional wait which
@@ -428,6 +450,8 @@ static void debug_wait_for_kill(const char *info)
bool Query_cache::try_lock(bool use_timeout)
{
bool interrupt= FALSE;
+ THD *thd= current_thd;
+ Query_cache_wait_state wait_state(thd, __func__, __FILE__, __LINE__);
DBUG_ENTER("Query_cache::try_lock");
mysql_mutex_lock(&structure_guard_mutex);
@@ -437,7 +461,6 @@ bool Query_cache::try_lock(bool use_timeout)
{
m_cache_lock_status= Query_cache::LOCKED;
#ifndef DBUG_OFF
- THD *thd= current_thd;
if (thd)
m_cache_lock_thread_id= thd->thread_id;
#endif
@@ -496,6 +519,8 @@ bool Query_cache::try_lock(bool use_timeout)
void Query_cache::lock_and_suspend(void)
{
+ THD *thd= current_thd;
+ Query_cache_wait_state wait_state(thd, __func__, __FILE__, __LINE__);
DBUG_ENTER("Query_cache::lock_and_suspend");
mysql_mutex_lock(&structure_guard_mutex);
@@ -503,7 +528,6 @@ void Query_cache::lock_and_suspend(void)
mysql_cond_wait(&COND_cache_status_changed, &structure_guard_mutex);
m_cache_lock_status= Query_cache::LOCKED_NO_WAIT;
#ifndef DBUG_OFF
- THD *thd= current_thd;
if (thd)
m_cache_lock_thread_id= thd->thread_id;
#endif
@@ -524,6 +548,8 @@ void Query_cache::lock_and_suspend(void)
void Query_cache::lock(void)
{
+ THD *thd= current_thd;
+ Query_cache_wait_state wait_state(thd, __func__, __FILE__, __LINE__);
DBUG_ENTER("Query_cache::lock");
mysql_mutex_lock(&structure_guard_mutex);
@@ -531,7 +557,6 @@ void Query_cache::lock(void)
mysql_cond_wait(&COND_cache_status_changed, &structure_guard_mutex);
m_cache_lock_status= Query_cache::LOCKED;
#ifndef DBUG_OFF
- THD *thd= current_thd;
if (thd)
m_cache_lock_thread_id= thd->thread_id;
#endif
@@ -871,9 +896,7 @@ Query_cache::insert(Query_cache_tls *query_cache_tls,
if (is_disabled() || query_cache_tls->first_query_block == NULL)
DBUG_VOID_RETURN;
- DBUG_EXECUTE_IF("wait_in_query_cache_insert",
- debug_wait_for_kill("wait_in_query_cache_insert"); );
-
+ QC_DEBUG_SYNC("wait_in_query_cache_insert");
if (try_lock())
DBUG_VOID_RETURN;
@@ -1683,6 +1706,8 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
}
else
thd->lex->safe_to_cache_query= 0; // Don't try to cache this
+ /* End the statement transaction potentially started by engine. */
+ trans_rollback_stmt(thd);
goto err_unlock; // Parse query
}
else
@@ -1724,6 +1749,13 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
thd->limit_found_rows = query->found_rows();
thd->status_var.last_query_cost= 0.0;
+ /*
+ End the statement transaction potentially started by an
+ engine callback. We ignore the return value for now,
+ since as long as EOF packet is part of the query cache
+ response, we can't handle it anyway.
+ */
+ (void) trans_commit_stmt(thd);
if (!thd->stmt_da->is_set())
thd->stmt_da->disable_status();
@@ -1769,8 +1801,7 @@ void Query_cache::invalidate(THD *thd, TABLE_LIST *tables_used,
invalidate_table(thd, tables_used);
}
- DBUG_EXECUTE_IF("wait_after_query_cache_invalidate",
- debug_wait_for_kill("wait_after_query_cache_invalidate"););
+ DEBUG_SYNC(thd, "wait_after_query_cache_invalidate");
DBUG_VOID_RETURN;
}
@@ -1961,8 +1992,7 @@ void Query_cache::flush()
if (is_disabled())
DBUG_VOID_RETURN;
- DBUG_EXECUTE_IF("wait_in_query_cache_flush1",
- debug_wait_for_kill("wait_in_query_cache_flush1"););
+ QC_DEBUG_SYNC("wait_in_query_cache_flush1");
lock_and_suspend();
if (query_cache_size > 0)
@@ -2302,9 +2332,7 @@ void Query_cache::free_cache()
void Query_cache::flush_cache()
{
-
- DBUG_EXECUTE_IF("wait_in_query_cache_flush2",
- debug_wait_for_kill("wait_in_query_cache_flush2"););
+ QC_DEBUG_SYNC("wait_in_query_cache_flush2");
my_hash_reset(&queries);
while (queries_blocks != 0)
@@ -2750,8 +2778,7 @@ void Query_cache::invalidate_table(THD *thd, TABLE *table)
void Query_cache::invalidate_table(THD *thd, uchar * key, uint32 key_length)
{
- DBUG_EXECUTE_IF("wait_in_query_cache_invalidate1",
- debug_wait_for_kill("wait_in_query_cache_invalidate1"); );
+ DEBUG_SYNC(thd, "wait_in_query_cache_invalidate1");
/*
Lock the query cache and queue all invalidation attempts to avoid
@@ -2759,9 +2786,7 @@ void Query_cache::invalidate_table(THD *thd, uchar * key, uint32 key_length)
*/
lock();
- DBUG_EXECUTE_IF("wait_in_query_cache_invalidate2",
- debug_wait_for_kill("wait_in_query_cache_invalidate2"); );
-
+ DEBUG_SYNC(thd, "wait_in_query_cache_invalidate2");
if (query_cache_size > 0)
invalidate_table_internal(thd, key, key_length);
@@ -2811,7 +2836,6 @@ Query_cache::invalidate_query_block_list(THD *thd,
Query_cache_block *query_block= list_root->next->block();
BLOCK_LOCK_WR(query_block);
free_query(query_block);
- DBUG_EXECUTE_IF("debug_cache_locks", sleep(10););
}
}
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 2f69bac917e..685ce1c7b42 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -525,9 +525,7 @@ int mysql_multi_delete_prepare(THD *thd)
if (!(target_tbl->table= target_tbl->correspondent_table->table))
{
DBUG_ASSERT(target_tbl->correspondent_table->view &&
- target_tbl->correspondent_table->merge_underlying_list &&
- target_tbl->correspondent_table->merge_underlying_list->
- next_local);
+ target_tbl->correspondent_table->multitable_view);
my_error(ER_VIEW_DELETE_MERGE_VIEW, MYF(0),
target_tbl->correspondent_table->view_db.str,
target_tbl->correspondent_table->view_name.str);
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 3ac6cf28c90..1f8da3fab5c 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1620,9 +1620,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
table->file->adjust_next_insert_id_after_explicit_value(
table->next_number_field->val_int());
info->touched++;
- if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ &&
- !bitmap_is_subset(table->write_set, table->read_set)) ||
- compare_record(table))
+ if (!records_are_comparable(table) || compare_records(table))
{
if ((error=table->file->ha_update_row(table->record[1],
table->record[0])) &&
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 3a559433e2d..0f93275434d 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -958,6 +958,7 @@ inline bool st_select_lex_unit::is_union ()
#define ALTER_ALL_PARTITION (1L << 21)
#define ALTER_REMOVE_PARTITIONING (1L << 22)
#define ALTER_FOREIGN_KEY (1L << 23)
+#define ALTER_TRUNCATE_PARTITION (1L << 24)
enum enum_alter_table_change_level
{
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index e44d2563483..2f8a72ee25c 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -34,7 +34,7 @@
#include "sql_locale.h" // my_locale_en_US
#include "log.h" // flush_error_log
#include "sql_view.h" // mysql_create_view, mysql_drop_view
-#include "sql_delete.h" // mysql_truncate, mysql_delete
+#include "sql_delete.h" // mysql_delete
#include "sql_insert.h" // mysql_insert
#include "sql_update.h" // mysql_update, mysql_multi_update
#include "sql_partition.h" // struct partition_info
@@ -49,7 +49,6 @@
// mysql_recreate_table,
// mysql_backup_table,
// mysql_restore_table
-#include "sql_truncate.h" // mysql_truncate_table
#include "sql_reload.h" // reload_acl_and_cache
#include "sql_admin.h" // mysql_assign_to_keycache
#include "sql_connect.h" // check_user,
diff --git a/sql/sql_partition_admin.cc b/sql/sql_partition_admin.cc
index fee33303a04..98750314a4a 100644
--- a/sql/sql_partition_admin.cc
+++ b/sql/sql_partition_admin.cc
@@ -16,10 +16,10 @@
#include "sql_parse.h" // check_one_table_access
#include "sql_table.h" // mysql_alter_table, etc.
#include "sql_lex.h" // Sql_statement
-#include "sql_truncate.h" // mysql_truncate_table,
- // Truncate_statement
#include "sql_admin.h" // Analyze/Check/.._table_statement
#include "sql_partition_admin.h" // Alter_table_*_partition
+#include "ha_partition.h" // ha_partition
+#include "sql_base.h" // open_and_lock_tables
#ifndef WITH_PARTITION_STORAGE_ENGINE
@@ -46,7 +46,7 @@ bool Alter_table_analyze_partition_statement::execute(THD *thd)
m_lex->alter_info.flags|= ALTER_ADMIN_PARTITION;
res= Analyze_table_statement::execute(thd);
-
+
DBUG_RETURN(res);
}
@@ -104,36 +104,85 @@ bool Alter_table_repair_partition_statement::execute(THD *thd)
bool Alter_table_truncate_partition_statement::execute(THD *thd)
{
+ int error;
+ ha_partition *partition;
+ ulong timeout= thd->variables.lock_wait_timeout;
TABLE_LIST *first_table= thd->lex->select_lex.table_list.first;
- bool res;
- enum_sql_command original_sql_command;
DBUG_ENTER("Alter_table_truncate_partition_statement::execute");
/*
- Execute TRUNCATE PARTITION just like TRUNCATE TABLE.
- Some storage engines (InnoDB, partition) checks thd_sql_command,
- so we set it to SQLCOM_TRUNCATE during the execution.
- */
- original_sql_command= m_lex->sql_command;
- m_lex->sql_command= SQLCOM_TRUNCATE;
-
- /*
Flag that it is an ALTER command which administrates partitions, used
by ha_partition.
*/
- m_lex->alter_info.flags|= ALTER_ADMIN_PARTITION;
-
+ m_lex->alter_info.flags|= ALTER_ADMIN_PARTITION |
+ ALTER_TRUNCATE_PARTITION;
+
+ /* Fix the lock types (not the same as ordinary ALTER TABLE). */
+ first_table->lock_type= TL_WRITE;
+ first_table->mdl_request.set_type(MDL_EXCLUSIVE);
+
/*
- Fix the lock types (not the same as ordinary ALTER TABLE).
+ Check table permissions and open it with a exclusive lock.
+ Ensure it is a partitioned table and finally, upcast the
+ handler and invoke the partition truncate method. Lastly,
+ write the statement to the binary log if necessary.
*/
- first_table->lock_type= TL_WRITE;
- first_table->mdl_request.set_type(MDL_SHARED_NO_READ_WRITE);
- /* execute as a TRUNCATE TABLE */
- res= Truncate_statement::execute(thd);
+ if (check_one_table_access(thd, DROP_ACL, first_table))
+ DBUG_RETURN(TRUE);
- m_lex->sql_command= original_sql_command;
- DBUG_RETURN(res);
+ if (open_and_lock_tables(thd, first_table, FALSE, 0))
+ DBUG_RETURN(TRUE);
+
+ /*
+ TODO: Add support for TRUNCATE PARTITION for NDB and other
+ engines supporting native partitioning.
+ */
+ if (first_table->table->s->db_type() != partition_hton)
+ {
+ my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+
+ /*
+ Under locked table modes this might still not be an exclusive
+ lock. Hence, upgrade the lock since the handler truncate method
+ mandates an exclusive metadata lock.
+ */
+ MDL_ticket *ticket= first_table->table->mdl_ticket;
+ if (thd->mdl_context.upgrade_shared_lock_to_exclusive(ticket, timeout))
+ DBUG_RETURN(TRUE);
+
+ tdc_remove_table(thd, TDC_RT_REMOVE_NOT_OWN, first_table->db,
+ first_table->table_name, FALSE);
+
+ partition= (ha_partition *) first_table->table->file;
+
+ /* Invoke the handler method responsible for truncating the partition. */
+ if ((error= partition->truncate_partition(&thd->lex->alter_info)))
+ first_table->table->file->print_error(error, MYF(0));
+
+ /*
+ All effects of a truncate operation are committed even if the
+ operation fails. Thus, the query must be written to the binary
+ log. The only exception is a unimplemented truncate method. Also,
+ it is logged in statement format, regardless of the binlog format.
+ */
+ if (error != HA_ERR_WRONG_COMMAND)
+ error|= write_bin_log(thd, !error, thd->query(), thd->query_length());
+
+ /*
+ A locked table ticket was upgraded to a exclusive lock. After the
+ the query has been written to the binary log, downgrade the lock
+ to a shared one.
+ */
+ if (thd->locked_tables_mode)
+ ticket->downgrade_exclusive_lock(MDL_SHARED_NO_READ_WRITE);
+
+ if (! error)
+ my_ok(thd);
+
+ DBUG_RETURN(error);
}
#endif /* WITH_PARTITION_STORAGE_ENGINE */
diff --git a/sql/sql_partition_admin.h b/sql/sql_partition_admin.h
index 36bafec4202..564b8676be8 100644
--- a/sql/sql_partition_admin.h
+++ b/sql/sql_partition_admin.h
@@ -210,7 +210,7 @@ public:
/**
Class that represents the ALTER TABLE t1 TRUNCATE PARTITION p statement.
*/
-class Alter_table_truncate_partition_statement : public Truncate_statement
+class Alter_table_truncate_partition_statement : public Sql_statement
{
public:
/**
@@ -218,10 +218,10 @@ public:
@param lex the LEX structure for this statement.
*/
Alter_table_truncate_partition_statement(LEX *lex)
- : Truncate_statement(lex)
+ : Sql_statement(lex)
{}
- ~Alter_table_truncate_partition_statement()
+ virtual ~Alter_table_truncate_partition_statement()
{}
/**
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index f88282487d9..451277712db 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -42,9 +42,8 @@ extern struct st_mysql_plugin *mysql_mandatory_plugins[];
@note The order of the enumeration is critical.
@see construct_options
*/
-static const char *global_plugin_typelib_names[]=
- { "OFF", "ON", "FORCE", NULL };
-enum enum_plugin_load_policy {PLUGIN_OFF, PLUGIN_ON, PLUGIN_FORCE};
+const char *global_plugin_typelib_names[]=
+ { "OFF", "ON", "FORCE", "FORCE_PLUS_PERMANENT", NULL };
static TYPELIB global_plugin_typelib=
{ array_elements(global_plugin_typelib_names)-1,
"", global_plugin_typelib_names, NULL };
@@ -800,6 +799,7 @@ static bool plugin_add(MEM_ROOT *tmp_root,
tmp.name.length= name_len;
tmp.ref_count= 0;
tmp.state= PLUGIN_IS_UNINITIALIZED;
+ tmp.load_option= PLUGIN_ON;
if (test_plugin_options(tmp_root, &tmp, argc, argv))
tmp.state= PLUGIN_IS_DISABLED;
@@ -1241,7 +1241,7 @@ int plugin_init(int *argc, char **argv, int flags)
tmp.name.str= (char *)plugin->name;
tmp.name.length= strlen(plugin->name);
tmp.state= 0;
- tmp.is_mandatory= mandatory;
+ tmp.load_option= mandatory ? PLUGIN_FORCE : PLUGIN_ON;
/*
If the performance schema is compiled in,
@@ -1260,7 +1260,7 @@ int plugin_init(int *argc, char **argv, int flags)
to work, by using '--skip-performance-schema' (the plugin)
*/
if (!my_strcasecmp(&my_charset_latin1, plugin->name, "PERFORMANCE_SCHEMA"))
- tmp.is_mandatory= true;
+ tmp.load_option= PLUGIN_FORCE;
free_root(&tmp_root, MYF(MY_MARK_BLOCKS_FREE));
if (test_plugin_options(&tmp_root, &tmp, argc, argv))
@@ -1338,7 +1338,8 @@ int plugin_init(int *argc, char **argv, int flags)
while ((plugin_ptr= *(--reap)))
{
mysql_mutex_unlock(&LOCK_plugin);
- if (plugin_ptr->is_mandatory)
+ if (plugin_ptr->load_option == PLUGIN_FORCE ||
+ plugin_ptr->load_option == PLUGIN_FORCE_PLUS_PERMANENT)
reaped_mandatory_plugin= TRUE;
plugin_deinitialize(plugin_ptr, true);
mysql_mutex_lock(&LOCK_plugin);
@@ -1848,6 +1849,11 @@ bool mysql_uninstall_plugin(THD *thd, const LEX_STRING *name)
my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "PLUGIN", name->str);
goto err;
}
+ if (plugin->load_option == PLUGIN_FORCE_PLUS_PERMANENT)
+ {
+ my_error(ER_PLUGIN_IS_PERMANENT, MYF(0), name->str);
+ goto err;
+ }
plugin->state= PLUGIN_IS_DELETED;
if (plugin->ref_count)
@@ -3058,7 +3064,8 @@ static int construct_options(MEM_ROOT *mem_root, struct st_plugin_int *tmp,
plugin_dash.length + 1);
strxmov(plugin_name_with_prefix_ptr, plugin_dash.str, plugin_name_ptr, NullS);
- if (!tmp->is_mandatory)
+ if (tmp->load_option != PLUGIN_FORCE &&
+ tmp->load_option != PLUGIN_FORCE_PLUS_PERMANENT)
{
/* support --skip-plugin-foo syntax */
options[0].name= plugin_name_ptr;
@@ -3318,7 +3325,7 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
{
struct sys_var_chain chain= { NULL, NULL };
bool disable_plugin;
- enum_plugin_load_policy plugin_load_policy= tmp->is_mandatory ? PLUGIN_FORCE : PLUGIN_ON;
+ enum_plugin_load_option plugin_load_option= tmp->load_option;
MEM_ROOT *mem_root= alloc_root_inited(&tmp->mem_root) ?
&tmp->mem_root : &plugin_mem_root;
@@ -3339,7 +3346,7 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
*/
if (!(my_strcasecmp(&my_charset_latin1, tmp->name.str, "federated") &&
my_strcasecmp(&my_charset_latin1, tmp->name.str, "ndbcluster")))
- plugin_load_policy= PLUGIN_OFF;
+ plugin_load_option= PLUGIN_OFF;
for (opt= tmp->plugin->system_vars; opt && *opt; opt++)
count+= 2; /* --{plugin}-{optname} and --plugin-{plugin}-{optname} */
@@ -3363,8 +3370,9 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
We adjust the default value to account for the hardcoded exceptions
we have set for the federated and ndbcluster storage engines.
*/
- if (!tmp->is_mandatory)
- opts[0].def_value= opts[1].def_value= plugin_load_policy;
+ if (tmp->load_option != PLUGIN_FORCE &&
+ tmp->load_option != PLUGIN_FORCE_PLUS_PERMANENT)
+ opts[0].def_value= opts[1].def_value= plugin_load_option;
error= handle_options(argc, &argv, opts, NULL);
(*argc)++; /* add back one for the program name */
@@ -3379,12 +3387,13 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
Set plugin loading policy from option value. First element in the option
list is always the <plugin name> option value.
*/
- if (!tmp->is_mandatory)
- plugin_load_policy= (enum_plugin_load_policy)*(ulong*)opts[0].value;
+ if (tmp->load_option != PLUGIN_FORCE &&
+ tmp->load_option != PLUGIN_FORCE_PLUS_PERMANENT)
+ plugin_load_option= (enum_plugin_load_option) *(ulong*) opts[0].value;
}
- disable_plugin= (plugin_load_policy == PLUGIN_OFF);
- tmp->is_mandatory= (plugin_load_policy == PLUGIN_FORCE);
+ disable_plugin= (plugin_load_option == PLUGIN_OFF);
+ tmp->load_option= plugin_load_option;
/*
If the plugin is disabled it should not be initialized.
diff --git a/sql/sql_plugin.h b/sql/sql_plugin.h
index 079dc4e6dca..5fa9afb3066 100644
--- a/sql/sql_plugin.h
+++ b/sql/sql_plugin.h
@@ -32,6 +32,9 @@
class sys_var;
enum SHOW_COMP_OPTION { SHOW_OPTION_YES, SHOW_OPTION_NO, SHOW_OPTION_DISABLED};
+enum enum_plugin_load_option { PLUGIN_OFF, PLUGIN_ON, PLUGIN_FORCE,
+ PLUGIN_FORCE_PLUS_PERMANENT };
+extern const char *global_plugin_typelib_names[];
#include <my_sys.h>
@@ -95,7 +98,7 @@ struct st_plugin_int
void *data; /* plugin type specific, e.g. handlerton */
MEM_ROOT mem_root; /* memory for dynamic plugin structures */
sys_var *system_vars; /* server variables for this plugin */
- bool is_mandatory; /* If true then plugin must not fail to load */
+ enum enum_plugin_load_option load_option; /* OFF, ON, FORCE, F+PERMANENT */
};
@@ -110,6 +113,7 @@ typedef struct st_plugin_int *plugin_ref;
#define plugin_data(pi,cast) ((cast)((pi)->data))
#define plugin_name(pi) (&((pi)->name))
#define plugin_state(pi) ((pi)->state)
+#define plugin_load_option(pi) ((pi)->load_option)
#define plugin_equals(p1,p2) ((p1) == (p2))
#else
typedef struct st_plugin_int **plugin_ref;
@@ -118,6 +122,7 @@ typedef struct st_plugin_int **plugin_ref;
#define plugin_data(pi,cast) ((cast)((pi)[0]->data))
#define plugin_name(pi) (&((pi)[0]->name))
#define plugin_state(pi) ((pi)[0]->state)
+#define plugin_load_option(pi) ((pi)[0]->load_option)
#define plugin_equals(p1,p2) ((p1) && (p2) && (p1)[0] == (p2)[0])
#endif
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 6b24e3db7bc..16deb50b17c 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -211,6 +211,11 @@ static my_bool show_plugins(THD *thd, plugin_ref plugin,
}
table->field[9]->set_notnull();
+ table->field[10]->store(
+ global_plugin_typelib_names[plugin_load_option(plugin)],
+ strlen(global_plugin_typelib_names[plugin_load_option(plugin)]),
+ cs);
+
return schema_table_store_record(thd, table);
}
@@ -5063,8 +5068,8 @@ static int get_schema_constraints_record(THD *thd, TABLE_LIST *tables,
while ((f_key_info=it++))
{
if (store_constraints(thd, table, db_name, table_name,
- f_key_info->forein_id->str,
- strlen(f_key_info->forein_id->str),
+ f_key_info->foreign_id->str,
+ strlen(f_key_info->foreign_id->str),
"FOREIGN KEY", 11))
DBUG_RETURN(1);
}
@@ -5263,8 +5268,8 @@ static int get_schema_key_column_usage_record(THD *thd,
f_idx++;
restore_record(table, s->default_values);
store_key_column_usage(table, db_name, table_name,
- f_key_info->forein_id->str,
- f_key_info->forein_id->length,
+ f_key_info->foreign_id->str,
+ f_key_info->foreign_id->length,
f_info->str, f_info->length,
(longlong) f_idx);
table->field[8]->store((longlong) f_idx, TRUE);
@@ -6053,8 +6058,8 @@ get_referential_constraints_record(THD *thd, TABLE_LIST *tables,
table->field[0]->store(STRING_WITH_LEN("def"), cs);
table->field[1]->store(db_name->str, db_name->length, cs);
table->field[9]->store(table_name->str, table_name->length, cs);
- table->field[2]->store(f_key_info->forein_id->str,
- f_key_info->forein_id->length, cs);
+ table->field[2]->store(f_key_info->foreign_id->str,
+ f_key_info->foreign_id->length, cs);
table->field[3]->store(STRING_WITH_LEN("def"), cs);
table->field[4]->store(f_key_info->referenced_db->str,
f_key_info->referenced_db->length, cs);
@@ -7214,6 +7219,7 @@ ST_FIELD_INFO plugin_fields_info[]=
{"PLUGIN_AUTHOR", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
{"PLUGIN_DESCRIPTION", 65535, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
{"PLUGIN_LICENSE", 80, MYSQL_TYPE_STRING, 0, 1, "License", SKIP_OPEN_TABLE},
+ {"LOAD_OPTION", 64, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
};
diff --git a/sql/sql_string.h b/sql/sql_string.h
index d21b5353b76..845b7c280b1 100644
--- a/sql/sql_string.h
+++ b/sql/sql_string.h
@@ -265,8 +265,12 @@ public:
CHARSET_INFO *csto, uint *errors);
bool append(const String &s);
bool append(const char *s);
- bool append(const char *s,uint32 arg_length);
- bool append(const char *s,uint32 arg_length, CHARSET_INFO *cs);
+ bool append(LEX_STRING *ls)
+ {
+ return append(ls->str, ls->length);
+ }
+ bool append(const char *s, uint32 arg_length);
+ bool append(const char *s, uint32 arg_length, CHARSET_INFO *cs);
bool append_ulonglong(ulonglong val);
bool append(IO_CACHE* file, uint32 arg_length);
bool append_with_prefill(const char *s, uint32 arg_length,
diff --git a/sql/sql_truncate.cc b/sql/sql_truncate.cc
index c0bc726a188..0cff2875ac8 100644
--- a/sql/sql_truncate.cc
+++ b/sql/sql_truncate.cc
@@ -13,11 +13,8 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-#include "sql_priv.h"
-#include "transaction.h"
-#include "debug_sync.h"
-#include "records.h" // READ_RECORD
-#include "table.h" // TABLE
+#include "debug_sync.h" // DEBUG_SYNC
+#include "table.h" // TABLE, FOREIGN_KEY_INFO
#include "sql_class.h" // THD
#include "sql_base.h" // open_and_lock_tables
#include "sql_table.h" // write_bin_log
@@ -29,145 +26,212 @@
#include "sql_truncate.h"
-/*
- Delete all rows of a locked table.
+/**
+ Append a list of field names to a string.
- @param thd Thread context.
- @param table_list Table list element for the table.
- @param rows_deleted Whether rows might have been deleted.
+ @param str The string.
+ @param fields The list of field names.
- @retval FALSE Success.
- @retval TRUE Error.
+ @return TRUE on failure, FALSE otherwise.
*/
-static bool
-delete_all_rows(THD *thd, TABLE *table)
+static bool fk_info_append_fields(String *str, List<LEX_STRING> *fields)
{
- int error;
- READ_RECORD info;
- bool is_bulk_delete;
- bool some_rows_deleted= FALSE;
- bool save_binlog_row_based= thd->is_current_stmt_binlog_format_row();
- DBUG_ENTER("delete_all_rows");
-
- /* Replication of truncate table must be statement based. */
- thd->clear_current_stmt_binlog_format_row();
+ bool res= FALSE;
+ LEX_STRING *field;
+ List_iterator_fast<LEX_STRING> it(*fields);
- /*
- Update handler statistics (e.g. table->file->stats.records).
- Might be used by the storage engine to aggregate information
- necessary to allow deletion. Currently, this seems to be
- meaningful only to the archive storage engine, which uses
- the info method to set the number of records. Although
- archive does not support deletion, it becomes necessary in
- order to return a error if the table is not empty.
- */
- error= table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
- if (error && error != HA_ERR_WRONG_COMMAND)
+ while ((field= it++))
{
- table->file->print_error(error, MYF(0));
- goto end;
+ res|= str->append("`");
+ res|= str->append(field);
+ res|= str->append("`, ");
}
- /*
- Attempt to delete all rows in the table.
- If it is unsupported, switch to row by row deletion.
- */
- if (! (error= table->file->ha_delete_all_rows()))
- goto end;
+ str->chop();
+ str->chop();
- if (error != HA_ERR_WRONG_COMMAND)
- {
- /*
- If a transactional engine fails in the middle of deletion,
- we expect it to be able to roll it back. Some reasons
- for the engine to fail would be media failure or corrupted
- data dictionary (i.e. in case of a partitioned table). We
- have sufficiently strong metadata locks to rule out any
- potential deadlocks.
-
- If a non-transactional engine fails here (that would
- not be MyISAM, since MyISAM does TRUNCATE by recreate),
- and binlog is on, replication breaks, since nothing gets
- written to the binary log. (XXX: is this a bug?)
- */
- table->file->print_error(error, MYF(0));
- goto end;
- }
+ return res;
+}
+
+
+/**
+ Generate a foreign key description suitable for a error message.
+
+ @param thd Thread context.
+ @param fk_info The foreign key information.
+
+ @return A human-readable string describing the foreign key.
+*/
+
+static const char *fk_info_str(THD *thd, FOREIGN_KEY_INFO *fk_info)
+{
+ bool res= FALSE;
+ char buffer[STRING_BUFFER_USUAL_SIZE*2];
+ String str(buffer, sizeof(buffer), system_charset_info);
+
+ str.length(0);
/*
- A workaround for Bug#53696 "Performance schema engine violates the
- PSEA API by calling my_error()".
+ `db`.`tbl`, CONSTRAINT `id` FOREIGN KEY (`fk`) REFERENCES `db`.`tbl` (`fk`)
*/
- if (thd->is_error())
- goto end;
- /* Handler didn't support fast delete. Delete rows one by one. */
+ res|= str.append('`');
+ res|= str.append(fk_info->foreign_db);
+ res|= str.append("`.`");
+ res|= str.append(fk_info->foreign_table);
+ res|= str.append("`, CONSTRAINT `");
+ res|= str.append(fk_info->foreign_id);
+ res|= str.append("` FOREIGN KEY (");
+ res|= fk_info_append_fields(&str, &fk_info->foreign_fields);
+ res|= str.append(") REFERENCES `");
+ res|= str.append(fk_info->referenced_db);
+ res|= str.append("`.`");
+ res|= str.append(fk_info->referenced_table);
+ res|= str.append("` (");
+ res|= fk_info_append_fields(&str, &fk_info->referenced_fields);
+ res|= str.append(')');
+
+ return res ? NULL : thd->strmake(str.ptr(), str.length());
+}
+
+
+/**
+ Check and emit a fatal error if the table which is going to be
+ affected by TRUNCATE TABLE is a parent table in some non-self-
+ referencing foreign key.
+
+ @remark The intention is to allow truncate only for tables that
+ are not dependent on other tables.
+
+ @param thd Thread context.
+ @param table Table handle.
+
+ @retval FALSE This table is not parent in a non-self-referencing foreign
+ key. Statement can proceed.
+ @retval TRUE This table is parent in a non-self-referencing foreign key,
+ error was emitted.
+*/
+
+static bool
+fk_truncate_illegal_if_parent(THD *thd, TABLE *table)
+{
+ FOREIGN_KEY_INFO *fk_info;
+ List<FOREIGN_KEY_INFO> fk_list;
+ List_iterator_fast<FOREIGN_KEY_INFO> it;
- init_read_record(&info, thd, table, NULL, TRUE, TRUE, FALSE);
+ /*
+ Bail out early if the table is not referenced by a foreign key.
+ In this case, the table could only be, if at all, a child table.
+ */
+ if (! table->file->referenced_by_foreign_key())
+ return FALSE;
/*
- Start bulk delete. If the engine does not support it, go on,
- it's not an error.
+ This table _is_ referenced by a foreign key. At this point, only
+ self-referencing keys are acceptable. For this reason, get the list
+ of foreign keys referencing this table in order to check the name
+ of the child (dependent) tables.
*/
- is_bulk_delete= ! table->file->start_bulk_delete();
+ table->file->get_parent_foreign_key_list(thd, &fk_list);
- table->mark_columns_needed_for_delete();
+ /* Out of memory when building list. */
+ if (thd->is_error())
+ return TRUE;
- while (!(error= info.read_record(&info)) && !thd->killed)
+ it.init(fk_list);
+
+ /* Loop over the set of foreign keys for which this table is a parent. */
+ while ((fk_info= it++))
{
- if ((error= table->file->ha_delete_row(table->record[0])))
- {
- table->file->print_error(error, MYF(0));
+ DBUG_ASSERT(!my_strcasecmp(system_charset_info,
+ fk_info->referenced_db->str,
+ table->s->db.str));
+
+ DBUG_ASSERT(!my_strcasecmp(system_charset_info,
+ fk_info->referenced_table->str,
+ table->s->table_name.str));
+
+ if (my_strcasecmp(system_charset_info, fk_info->foreign_db->str,
+ table->s->db.str) ||
+ my_strcasecmp(system_charset_info, fk_info->foreign_table->str,
+ table->s->table_name.str))
break;
- }
-
- some_rows_deleted= TRUE;
}
- /* HA_ERR_END_OF_FILE */
- if (error == -1)
- error= 0;
-
- /* Close down the bulk delete. */
- if (is_bulk_delete)
+ /* Table is parent in a non-self-referencing foreign key. */
+ if (fk_info)
{
- int bulk_delete_error= table->file->end_bulk_delete();
- if (bulk_delete_error && !error)
- {
- table->file->print_error(bulk_delete_error, MYF(0));
- error= bulk_delete_error;
- }
+ my_error(ER_TRUNCATE_ILLEGAL_FK, MYF(0), fk_info_str(thd, fk_info));
+ return TRUE;
}
- end_read_record(&info);
+ return FALSE;
+}
+
+
+/*
+ Open and truncate a locked table.
+
+ @param thd Thread context.
+ @param table_ref Table list element for the table to be truncated.
+ @param is_tmp_table True if element refers to a temp table.
+
+ @retval 0 Success.
+ @retval > 0 Error code.
+*/
+
+int Truncate_statement::handler_truncate(THD *thd, TABLE_LIST *table_ref,
+ bool is_tmp_table)
+{
+ int error= 0;
+ uint flags;
+ DBUG_ENTER("Truncate_statement::handler_truncate");
/*
- Regardless of the error status, the query must be written to the
- binary log if rows of the table is non-transactional.
+ Can't recreate, the engine must mechanically delete all rows
+ in the table. Use open_and_lock_tables() to open a write cursor.
*/
- if (some_rows_deleted && !table->file->has_transactions())
+
+ /* If it is a temporary table, no need to take locks. */
+ if (is_tmp_table)
+ flags= MYSQL_OPEN_TEMPORARY_ONLY;
+ else
{
- thd->transaction.stmt.modified_non_trans_table= TRUE;
- thd->transaction.all.modified_non_trans_table= TRUE;
+ /* We don't need to load triggers. */
+ DBUG_ASSERT(table_ref->trg_event_map == 0);
+ /*
+ Our metadata lock guarantees that no transaction is reading
+ or writing into the table. Yet, to open a write cursor we need
+ a thr_lock lock. Allow to open base tables only.
+ */
+ table_ref->required_type= FRMTYPE_TABLE;
+ /*
+ Ignore pending FLUSH TABLES since we don't want to release
+ the MDL lock taken above and otherwise there is no way to
+ wait for FLUSH TABLES in deadlock-free fashion.
+ */
+ flags= MYSQL_OPEN_IGNORE_FLUSH | MYSQL_OPEN_SKIP_TEMPORARY;
+ /*
+ Even though we have an MDL lock on the table here, we don't
+ pass MYSQL_OPEN_HAS_MDL_LOCK to open_and_lock_tables
+ since to truncate a MERGE table, we must open and lock
+ merge children, and on those we don't have an MDL lock.
+ Thus clear the ticket to satisfy MDL asserts.
+ */
+ table_ref->mdl_request.ticket= NULL;
}
- if (error || thd->killed)
- goto end;
+ /* Open the table as it will handle some required preparations. */
+ if (open_and_lock_tables(thd, table_ref, FALSE, flags))
+ DBUG_RETURN(1);
- /* Truncate resets the auto-increment counter. */
- error= table->file->ha_reset_auto_increment(0);
- if (error)
- {
- if (error != HA_ERR_WRONG_COMMAND)
- table->file->print_error(error, MYF(0));
- else
- error= 0;
- }
+ /* Whether to truncate regardless of foreign keys. */
+ if (! (thd->variables.option_bits & OPTION_NO_FOREIGN_KEY_CHECKS))
+ error= fk_truncate_illegal_if_parent(thd, table_ref->table);
-end:
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
+ if (!error && (error= table_ref->table->file->ha_truncate()))
+ table_ref->table->file->print_error(error, MYF(0));
DBUG_RETURN(error);
}
@@ -225,30 +289,29 @@ static bool recreate_temporary_table(THD *thd, TABLE *table)
/*
- Handle opening and locking if a base table for truncate.
+ Handle locking a base table for truncate.
@param[in] thd Thread context.
@param[in] table_ref Table list element for the table to
be truncated.
@param[out] hton_can_recreate Set to TRUE if table can be dropped
and recreated.
- @param[out] ticket_downgrade Set if a lock must be downgraded after
- truncate is done.
@retval FALSE Success.
@retval TRUE Error.
*/
-static bool open_and_lock_table_for_truncate(THD *thd, TABLE_LIST *table_ref,
- bool *hton_can_recreate,
- MDL_ticket **ticket_downgrade)
+bool Truncate_statement::lock_table(THD *thd, TABLE_LIST *table_ref,
+ bool *hton_can_recreate)
{
TABLE *table= NULL;
- handlerton *table_type;
- DBUG_ENTER("open_and_lock_table_for_truncate");
+ DBUG_ENTER("Truncate_statement::lock_table");
+ /* Lock types are set in the parser. */
DBUG_ASSERT(table_ref->lock_type == TL_WRITE);
- DBUG_ASSERT(table_ref->mdl_request.type == MDL_SHARED_NO_READ_WRITE);
+ /* The handler truncate protocol dictates a exclusive lock. */
+ DBUG_ASSERT(table_ref->mdl_request.type == MDL_EXCLUSIVE);
+
/*
Before doing anything else, acquire a metadata lock on the table,
or ensure we have one. We don't use open_and_lock_tables()
@@ -268,103 +331,45 @@ static bool open_and_lock_table_for_truncate(THD *thd, TABLE_LIST *table_ref,
table_ref->table_name, FALSE)))
DBUG_RETURN(TRUE);
- table_type= table->s->db_type();
- *hton_can_recreate= ha_check_storage_engine_flag(table_type,
+ *hton_can_recreate= ha_check_storage_engine_flag(table->s->db_type(),
HTON_CAN_RECREATE);
table_ref->mdl_request.ticket= table->mdl_ticket;
}
else
{
- /*
- Even though we could use the previous execution branch here just as
- well, we must not try to open the table:
- */
+ /* Acquire an exclusive lock. */
DBUG_ASSERT(table_ref->next_global == NULL);
if (lock_table_names(thd, table_ref, NULL,
thd->variables.lock_wait_timeout,
MYSQL_OPEN_SKIP_TEMPORARY))
DBUG_RETURN(TRUE);
- if (dd_frm_storage_engine(thd, table_ref->db, table_ref->table_name,
- &table_type))
+ if (dd_check_storage_engine_flag(thd, table_ref->db, table_ref->table_name,
+ HTON_CAN_RECREATE, hton_can_recreate))
DBUG_RETURN(TRUE);
- *hton_can_recreate= ha_check_storage_engine_flag(table_type,
- HTON_CAN_RECREATE);
}
-#ifdef WITH_PARTITION_STORAGE_ENGINE
/*
- TODO: Add support for TRUNCATE PARTITION for NDB and other engines
- supporting native partitioning.
+ A storage engine can recreate or truncate the table only if there
+ are no references to it from anywhere, i.e. no cached TABLE in the
+ table cache.
*/
- if (thd->lex->alter_info.flags & ALTER_ADMIN_PARTITION &&
- table_type != partition_hton)
- {
- my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
- DBUG_RETURN(TRUE);
- }
-#endif
- DEBUG_SYNC(thd, "lock_table_for_truncate");
-
- if (*hton_can_recreate)
+ if (thd->locked_tables_mode)
{
- /*
- Acquire an exclusive lock. The storage engine can recreate the
- table only if there are no references to it from anywhere, i.e.
- no cached TABLE in the table cache. To remove the table from the
- cache we need an exclusive lock.
- */
- if (thd->locked_tables_mode)
- {
- if (wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN))
- DBUG_RETURN(TRUE);
- *ticket_downgrade= table->mdl_ticket;
+ DEBUG_SYNC(thd, "upgrade_lock_for_truncate");
+ /* To remove the table from the cache we need an exclusive lock. */
+ if (wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN))
+ DBUG_RETURN(TRUE);
+ m_ticket_downgrade= table->mdl_ticket;
+ /* Close if table is going to be recreated. */
+ if (*hton_can_recreate)
close_all_tables_for_name(thd, table->s, FALSE);
- }
- else
- {
- ulong timeout= thd->variables.lock_wait_timeout;
- if (thd->mdl_context.
- upgrade_shared_lock_to_exclusive(table_ref->mdl_request.ticket,
- timeout))
- DBUG_RETURN(TRUE);
- tdc_remove_table(thd, TDC_RT_REMOVE_ALL, table_ref->db,
- table_ref->table_name, FALSE);
- }
}
else
{
- /*
- Can't recreate, we must mechanically delete all rows in
- the table. Our metadata lock guarantees that no transaction
- is reading or writing into the table. Yet, to open a write
- cursor we need a thr_lock lock. Use open_and_lock_tables()
- to do the necessary job.
- */
-
- /* Allow to open base tables only. */
- table_ref->required_type= FRMTYPE_TABLE;
- /* We don't need to load triggers. */
- DBUG_ASSERT(table_ref->trg_event_map == 0);
- /*
- Even though we have an MDL lock on the table here, we don't
- pass MYSQL_OPEN_HAS_MDL_LOCK to open_and_lock_tables
- since to truncate a MERGE table, we must open and lock
- merge children, and on those we don't have an MDL lock.
- Thus clear the ticket to satisfy MDL asserts.
- */
- table_ref->mdl_request.ticket= NULL;
-
- /*
- Open the table as it will handle some required preparations.
- Ignore pending FLUSH TABLES since we don't want to release
- the MDL lock taken above and otherwise there is no way to
- wait for FLUSH TABLES in deadlock-free fashion.
- */
- if (open_and_lock_tables(thd, table_ref, FALSE,
- MYSQL_OPEN_IGNORE_FLUSH |
- MYSQL_OPEN_SKIP_TEMPORARY))
- DBUG_RETURN(TRUE);
+ /* Table is already locked exclusively. Remove cached instances. */
+ tdc_remove_table(thd, TDC_RT_REMOVE_ALL, table_ref->db,
+ table_ref->table_name, FALSE);
}
DBUG_RETURN(FALSE);
@@ -385,14 +390,17 @@ static bool open_and_lock_table_for_truncate(THD *thd, TABLE_LIST *table_ref,
@retval TRUE Error.
*/
-bool mysql_truncate_table(THD *thd, TABLE_LIST *table_ref)
+bool Truncate_statement::truncate_table(THD *thd, TABLE_LIST *table_ref)
{
+ int error;
TABLE *table;
- bool error= TRUE, binlog_stmt;
- MDL_ticket *mdl_ticket= NULL;
- DBUG_ENTER("mysql_truncate_table");
+ bool binlog_stmt;
+ DBUG_ENTER("Truncate_statement::truncate_table");
- /* Remove tables from the HANDLER's hash. */
+ /* Initialize, or reinitialize in case of reexecution (SP). */
+ m_ticket_downgrade= NULL;
+
+ /* Remove table from the HANDLER's hash. */
mysql_ha_rm_tables(thd, table_ref);
/* If it is a temporary table, no need to take locks. */
@@ -413,14 +421,11 @@ bool mysql_truncate_table(THD *thd, TABLE_LIST *table_ref)
{
/*
The engine does not support truncate-by-recreate. Open the
- table and delete all rows. In such a manner this can in fact
- open several tables if it's a temporary MyISAMMRG table.
+ table and invoke the handler truncate. In such a manner this
+ can in fact open several tables if it's a temporary MyISAMMRG
+ table.
*/
- if (open_and_lock_tables(thd, table_ref, FALSE,
- MYSQL_OPEN_TEMPORARY_ONLY))
- DBUG_RETURN(TRUE);
-
- error= delete_all_rows(thd, table_ref->table);
+ error= handler_truncate(thd, table_ref, TRUE);
}
/*
@@ -434,8 +439,7 @@ bool mysql_truncate_table(THD *thd, TABLE_LIST *table_ref)
{
bool hton_can_recreate;
- if (open_and_lock_table_for_truncate(thd, table_ref,
- &hton_can_recreate, &mdl_ticket))
+ if (lock_table(thd, table_ref, &hton_can_recreate))
DBUG_RETURN(TRUE);
if (hton_can_recreate)
@@ -454,13 +458,18 @@ bool mysql_truncate_table(THD *thd, TABLE_LIST *table_ref)
}
else
{
- error= delete_all_rows(thd, table_ref->table);
+ /*
+ The engine does not support truncate-by-recreate.
+ Attempt to use the handler truncate method.
+ */
+ error= handler_truncate(thd, table_ref, FALSE);
/*
- Regardless of the error status, the query must be written to the
- binary log if rows of a non-transactional table were deleted.
+ All effects of a TRUNCATE TABLE operation are committed even if
+ truncation fails. Thus, the query must be written to the binary
+ log. The only exception is a unimplemented truncate method.
*/
- binlog_stmt= !error || thd->transaction.stmt.modified_non_trans_table;
+ binlog_stmt= !error || error != HA_ERR_WRONG_COMMAND;
}
query_cache_invalidate3(thd, table_ref, FALSE);
@@ -471,49 +480,37 @@ bool mysql_truncate_table(THD *thd, TABLE_LIST *table_ref)
error|= write_bin_log(thd, !error, thd->query(), thd->query_length());
/*
- All effects of a TRUNCATE TABLE operation are rolled back if a row
- by row deletion fails. Otherwise, it is automatically committed at
- the end.
- */
- if (error)
- {
- trans_rollback_stmt(thd);
- trans_rollback(thd);
- }
-
- /*
A locked table ticket was upgraded to a exclusive lock. After the
the query has been written to the binary log, downgrade the lock
to a shared one.
*/
- if (mdl_ticket)
- mdl_ticket->downgrade_exclusive_lock(MDL_SHARED_NO_READ_WRITE);
+ if (m_ticket_downgrade)
+ m_ticket_downgrade->downgrade_exclusive_lock(MDL_SHARED_NO_READ_WRITE);
- DBUG_PRINT("exit", ("error: %d", error));
- DBUG_RETURN(test(error));
+ DBUG_RETURN(error);
}
+/**
+ Execute a TRUNCATE statement at runtime.
+
+ @param thd The current thread.
+
+ @return FALSE on success.
+*/
+
bool Truncate_statement::execute(THD *thd)
{
- TABLE_LIST *first_table= thd->lex->select_lex.table_list.first;
bool res= TRUE;
+ TABLE_LIST *first_table= thd->lex->select_lex.table_list.first;
DBUG_ENTER("Truncate_statement::execute");
if (check_one_table_access(thd, DROP_ACL, first_table))
- goto error;
- /*
- Don't allow this within a transaction because we want to use
- re-generate table
- */
- if (thd->in_active_multi_stmt_transaction())
- {
- my_message(ER_LOCK_OR_ACTIVE_TRANSACTION,
- ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0));
- goto error;
- }
- if (! (res= mysql_truncate_table(thd, first_table)))
+ DBUG_RETURN(res);
+
+ if (! (res= truncate_table(thd, first_table)))
my_ok(thd);
-error:
+
DBUG_RETURN(res);
}
+
diff --git a/sql/sql_truncate.h b/sql/sql_truncate.h
index b8b1d3da53d..95a2f35df4f 100644
--- a/sql/sql_truncate.h
+++ b/sql/sql_truncate.h
@@ -18,13 +18,15 @@
class THD;
struct TABLE_LIST;
-bool mysql_truncate_table(THD *thd, TABLE_LIST *table_ref);
-
/**
Truncate_statement represents the TRUNCATE statement.
*/
class Truncate_statement : public Sql_statement
{
+private:
+ /* Set if a lock must be downgraded after truncate is done. */
+ MDL_ticket *m_ticket_downgrade;
+
public:
/**
Constructor, used to represent a ALTER TABLE statement.
@@ -34,7 +36,7 @@ public:
: Sql_statement(lex)
{}
- ~Truncate_statement()
+ virtual ~Truncate_statement()
{}
/**
@@ -43,7 +45,20 @@ public:
@return false on success.
*/
bool execute(THD *thd);
-};
+protected:
+ /** Handle locking a base table for truncate. */
+ bool lock_table(THD *, TABLE_LIST *, bool *);
+
+ /** Truncate table via the handler method. */
+ int handler_truncate(THD *, TABLE_LIST *, bool);
+
+ /**
+ Optimized delete of all rows by doing a full regenerate of the table.
+ Depending on the storage engine, it can be accomplished through a
+ drop and recreate or via the handler truncate method.
+ */
+ bool truncate_table(THD *, TABLE_LIST *);
+};
#endif
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 68440f6623a..96b1ac67b49 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -42,11 +42,68 @@
// mysql_handle_derived,
// mysql_derived_filling
-/* Return 0 if row hasn't changed */
-bool compare_record(TABLE *table)
+/**
+ True if the table's input and output record buffers are comparable using
+ compare_records(TABLE*).
+ */
+bool records_are_comparable(const TABLE *table) {
+ return ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) == 0) ||
+ bitmap_is_subset(table->write_set, table->read_set);
+}
+
+
+/**
+ Compares the input and outbut record buffers of the table to see if a row
+ has changed. The algorithm iterates over updated columns and if they are
+ nullable compares NULL bits in the buffer before comparing actual
+ data. Special care must be taken to compare only the relevant NULL bits and
+ mask out all others as they may be undefined. The storage engine will not
+ and should not touch them.
+
+ @param table The table to evaluate.
+
+ @return true if row has changed.
+ @return false otherwise.
+*/
+bool compare_records(const TABLE *table)
{
+ DBUG_ASSERT(records_are_comparable(table));
+
+ if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) != 0)
+ {
+ /*
+ Storage engine may not have read all columns of the record. Fields
+ (including NULL bits) not in the write_set may not have been read and
+ can therefore not be compared.
+ */
+ for (Field **ptr= table->field ; *ptr != NULL; ptr++)
+ {
+ Field *field= *ptr;
+ if (bitmap_is_set(table->write_set, field->field_index))
+ {
+ if (field->real_maybe_null())
+ {
+ uchar null_byte_index= field->null_ptr - table->record[0];
+
+ if (((table->record[0][null_byte_index]) & field->null_bit) !=
+ ((table->record[1][null_byte_index]) & field->null_bit))
+ return TRUE;
+ }
+ if (field->cmp_binary_offset(table->s->rec_buff_length))
+ return TRUE;
+ }
+ }
+ return FALSE;
+ }
+
+ /*
+ The storage engine has read all columns, so it's safe to compare all bits
+ including those not in the write_set. This is cheaper than the field-by-field
+ comparison done above.
+ */
if (table->s->blob_fields + table->s->varchar_fields == 0)
+ // Fixed-size record: do bitwise comparison of the records
return cmp_record(table,record[1]);
/* Compare null bits */
if (memcmp(table->null_flags,
@@ -204,7 +261,6 @@ int mysql_update(THD *thd,
bool using_limit= limit != HA_POS_ERROR;
bool safe_update= test(thd->variables.option_bits & OPTION_SAFE_UPDATES);
bool used_key_is_modified= FALSE, transactional_table, will_batch;
- bool can_compare_record;
int res;
int error, loc_error;
uint used_index, dup_key_found;
@@ -579,15 +635,6 @@ int mysql_update(THD *thd,
if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)
table->prepare_for_position();
- /*
- We can use compare_record() to optimize away updates if
- the table handler is returning all columns OR if
- if all updated columns are read
- */
- can_compare_record= (!(table->file->ha_table_flags() &
- HA_PARTIAL_COLUMN_READ) ||
- bitmap_is_subset(table->write_set, table->read_set));
-
while (!(error=info.read_record(&info)) && !thd->killed)
{
thd->examined_row_count++;
@@ -605,7 +652,7 @@ int mysql_update(THD *thd,
found++;
- if (!can_compare_record || compare_record(table))
+ if (!records_are_comparable(table) || compare_records(table))
{
if ((res= table_list->view_check_option(thd, ignore)) !=
VIEW_CHECK_OK)
@@ -1645,18 +1692,8 @@ bool multi_update::send_data(List<Item> &not_used_values)
if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
continue;
- /*
- We can use compare_record() to optimize away updates if
- the table handler is returning all columns OR if
- if all updated columns are read
- */
if (table == table_to_update)
{
- bool can_compare_record;
- can_compare_record= (!(table->file->ha_table_flags() &
- HA_PARTIAL_COLUMN_READ) ||
- bitmap_is_subset(table->write_set,
- table->read_set));
table->status|= STATUS_UPDATED;
store_record(table,record[1]);
if (fill_record_n_invoke_before_triggers(thd, *fields_for_table[offset],
@@ -1671,7 +1708,7 @@ bool multi_update::send_data(List<Item> &not_used_values)
*/
table->auto_increment_field_not_null= FALSE;
found++;
- if (!can_compare_record || compare_record(table))
+ if (!records_are_comparable(table) || compare_records(table))
{
int error;
if ((error= cur_table->view_check_option(thd, ignore)) !=
@@ -1860,7 +1897,6 @@ int multi_update::do_updates()
DBUG_RETURN(0);
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
{
- bool can_compare_record;
uint offset= cur_table->shared;
table = cur_table->table;
@@ -1897,11 +1933,6 @@ int multi_update::do_updates()
if ((local_error = tmp_table->file->ha_rnd_init(1)))
goto err;
- can_compare_record= (!(table->file->ha_table_flags() &
- HA_PARTIAL_COLUMN_READ) ||
- bitmap_is_subset(table->write_set,
- table->read_set));
-
for (;;)
{
if (thd->killed && trans_safe)
@@ -1942,7 +1973,7 @@ int multi_update::do_updates()
TRG_ACTION_BEFORE, TRUE))
goto err2;
- if (!can_compare_record || compare_record(table))
+ if (!records_are_comparable(table) || compare_records(table))
{
int error;
if ((error= cur_table->view_check_option(thd, ignore)) !=
diff --git a/sql/sql_update.h b/sql/sql_update.h
index 6bf022a171c..50ff50f025d 100644
--- a/sql/sql_update.h
+++ b/sql/sql_update.h
@@ -38,6 +38,7 @@ bool mysql_multi_update(THD *thd, TABLE_LIST *table_list,
enum enum_duplicates handle_duplicates, bool ignore,
SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex,
multi_update **result);
-bool compare_record(TABLE *table);
+bool records_are_comparable(const TABLE *table);
+bool compare_records(const TABLE *table);
#endif /* SQL_UPDATE_INCLUDED */
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 7c24f18aa6a..d73dff9aaeb 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1933,35 +1933,28 @@ master_def:
| MASTER_HEARTBEAT_PERIOD_SYM EQ NUM_literal
{
Lex->mi.heartbeat_period= (float) $3->val_real();
- if (Lex->mi.heartbeat_period > SLAVE_MAX_HEARTBEAT_PERIOD ||
- Lex->mi.heartbeat_period < 0.0)
- {
- const char format[]= "%d seconds";
- char buf[4*sizeof(SLAVE_MAX_HEARTBEAT_PERIOD) + sizeof(format)];
- sprintf(buf, format, SLAVE_MAX_HEARTBEAT_PERIOD);
- my_error(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE,
- MYF(0), " is negative or exceeds the maximum ", buf);
- MYSQL_YYABORT;
+ if (Lex->mi.heartbeat_period > SLAVE_MAX_HEARTBEAT_PERIOD ||
+ Lex->mi.heartbeat_period < 0.0)
+ {
+ const char format[]= "%d";
+ char buf[4*sizeof(SLAVE_MAX_HEARTBEAT_PERIOD) + sizeof(format)];
+ sprintf(buf, format, SLAVE_MAX_HEARTBEAT_PERIOD);
+ my_error(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE, MYF(0), buf);
+ MYSQL_YYABORT;
}
if (Lex->mi.heartbeat_period > slave_net_timeout)
{
push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE,
- ER(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE),
- " exceeds the value of `slave_net_timeout' sec.",
- " A sensible value for the period should be"
- " less than the timeout.");
+ ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX,
+ ER(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX));
}
if (Lex->mi.heartbeat_period < 0.001)
{
if (Lex->mi.heartbeat_period != 0.0)
{
push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE,
- ER(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE),
- " is less than 1 msec.",
- " The period is reset to zero which means"
- " no heartbeats will be sending");
+ ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN,
+ ER(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN));
Lex->mi.heartbeat_period= 0.0;
}
Lex->mi.heartbeat_opt= LEX_MASTER_INFO::LEX_MI_DISABLE;
@@ -10770,7 +10763,7 @@ truncate:
lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE_UNSPECIFIED;
lex->select_lex.init_order();
YYPS->m_lock_type= TL_WRITE;
- YYPS->m_mdl_type= MDL_SHARED_NO_READ_WRITE;
+ YYPS->m_mdl_type= MDL_EXCLUSIVE;
}
table_name
{
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index 69a01259fc0..5c9df82ddac 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -2014,15 +2014,6 @@ static Sys_var_ulong Sys_thread_cache_size(
GLOBAL_VAR(thread_cache_size), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, 16384), DEFAULT(0), BLOCK_SIZE(1));
-#if HAVE_POOL_OF_THREADS == 1
-static Sys_var_ulong Sys_thread_pool_size(
- "thread_pool_size",
- "How many threads we should create to handle query requests in "
- "case of 'thread_handling=pool-of-threads'",
- GLOBAL_VAR(thread_pool_size), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(1, 16384), DEFAULT(20), BLOCK_SIZE(0));
-#endif
-
/**
Can't change the 'next' tx_isolation if we are already in a
transaction.
@@ -2943,11 +2934,8 @@ static bool fix_slave_net_timeout(sys_var *self, THD *thd, enum_var_type type)
(active_mi? active_mi->heartbeat_period : 0.0)));
if (active_mi && slave_net_timeout < active_mi->heartbeat_period)
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE,
- "The current value for master_heartbeat_period"
- " exceeds the new value of `slave_net_timeout' sec."
- " A sensible value for the period should be"
- " less than the timeout.");
+ ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX,
+ ER(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX));
mysql_mutex_unlock(&LOCK_active_mi);
return false;
}
diff --git a/sql/sys_vars.h b/sql/sys_vars.h
index 89e2bdfdf60..e16bd3c5330 100644
--- a/sql/sys_vars.h
+++ b/sql/sys_vars.h
@@ -458,12 +458,10 @@ class Sys_var_proxy_user: public sys_var
public:
Sys_var_proxy_user(const char *name_arg,
const char *comment, enum charset_enum is_os_charset_arg)
- : sys_var(&all_sys_vars, name_arg, comment,
+ : sys_var(&all_sys_vars, name_arg, comment,
sys_var::READONLY+sys_var::ONLY_SESSION, 0, -1,
- NO_ARG, SHOW_CHAR, (intptr)NULL,
- 0, VARIABLE_NOT_IN_BINLOG,
- 0, 0,
- 0, 0, PARSE_NORMAL)
+ NO_ARG, SHOW_CHAR, 0, NULL, VARIABLE_NOT_IN_BINLOG,
+ NULL, NULL, 0, NULL, PARSE_NORMAL)
{
is_os_charset= is_os_charset_arg == IN_FS_CHARSET;
option.var_type= GET_STR;
diff --git a/sql/table.h b/sql/table.h
index 6723293c1ec..c8e1ad8e658 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -1170,7 +1170,9 @@ enum enum_schema_table_state
typedef struct st_foreign_key_info
{
- LEX_STRING *forein_id;
+ LEX_STRING *foreign_id;
+ LEX_STRING *foreign_db;
+ LEX_STRING *foreign_table;
LEX_STRING *referenced_db;
LEX_STRING *referenced_table;
LEX_STRING *update_method;