summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_partition.cc33
-rw-r--r--sql/item.cc18
-rw-r--r--sql/item_cmpfunc.h18
-rw-r--r--sql/item_sum.cc4
-rw-r--r--sql/item_timefunc.cc2
-rw-r--r--sql/log_event.cc2
-rw-r--r--sql/log_event_old.cc2
-rw-r--r--sql/mysqld.cc30
-rw-r--r--sql/share/Makefile.am8
-rw-r--r--sql/slave.cc1
-rw-r--r--sql/sql_base.cc1
-rw-r--r--sql/sql_select.cc79
-rw-r--r--sql/sql_table.cc80
-rw-r--r--sql/sql_update.cc3
-rw-r--r--sql/sql_yacc.yy2
15 files changed, 199 insertions, 84 deletions
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 7b9ecd7d902..1d4290c6ab0 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -1746,13 +1746,23 @@ void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
void ha_partition::change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
{
- handler **file_array= m_file;
+ handler **file_array;
table= table_arg;
table_share= share;
- do
+ /*
+ m_file can be NULL when using an old cached table in DROP TABLE, when the
+ table just has REMOVED PARTITIONING, see Bug#42438
+ */
+ if (m_file)
{
- (*file_array)->change_table_ptr(table_arg, share);
- } while (*(++file_array));
+ file_array= m_file;
+ DBUG_ASSERT(*file_array);
+ do
+ {
+ (*file_array)->change_table_ptr(table_arg, share);
+ } while (*(++file_array));
+ }
+
if (m_added_file && m_added_file[0])
{
/* if in middle of a drop/rename etc */
@@ -6055,7 +6065,13 @@ void ha_partition::print_error(int error, myf errflag)
if (error == HA_ERR_NO_PARTITION_FOUND)
m_part_info->print_no_partition_found(table);
else
- m_file[m_last_part]->print_error(error, errflag);
+ {
+ /* In case m_file has not been initialized, like in bug#42438 */
+ if (m_file)
+ m_file[m_last_part]->print_error(error, errflag);
+ else
+ handler::print_error(error, errflag);
+ }
DBUG_VOID_RETURN;
}
@@ -6065,7 +6081,12 @@ bool ha_partition::get_error_message(int error, String *buf)
DBUG_ENTER("ha_partition::get_error_message");
/* Should probably look for my own errors first */
- DBUG_RETURN(m_file[m_last_part]->get_error_message(error, buf));
+
+ /* In case m_file has not been initialized, like in bug#42438 */
+ if (m_file)
+ DBUG_RETURN(m_file[m_last_part]->get_error_message(error, buf));
+ DBUG_RETURN(handler::get_error_message(error, buf));
+
}
diff --git a/sql/item.cc b/sql/item.cc
index df266434f72..04496338b8f 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -2209,14 +2209,14 @@ String *Item_int::val_str(String *str)
{
// following assert is redundant, because fixed=1 assigned in constructor
DBUG_ASSERT(fixed == 1);
- str->set(value, &my_charset_bin);
+ str->set_int(value, unsigned_flag, &my_charset_bin);
return str;
}
void Item_int::print(String *str, enum_query_type query_type)
{
// my_charset_bin is good enough for numbers
- str_value.set(value, &my_charset_bin);
+ str_value.set_int(value, unsigned_flag, &my_charset_bin);
str->append(str_value);
}
@@ -5690,9 +5690,14 @@ void Item_field::print(String *str, enum_query_type query_type)
char buff[MAX_FIELD_WIDTH];
String tmp(buff,sizeof(buff),str->charset());
field->val_str(&tmp);
- str->append('\'');
- str->append(tmp);
- str->append('\'');
+ if (field->is_null())
+ str->append("NULL");
+ else
+ {
+ str->append('\'');
+ str->append(tmp);
+ str->append('\'');
+ }
return;
}
Item_ident::print(str, query_type);
@@ -6488,7 +6493,8 @@ int Item_default_value::save_in_field(Field *field_arg, bool no_conversions)
{
if (!arg)
{
- if (field_arg->flags & NO_DEFAULT_VALUE_FLAG)
+ if (field_arg->flags & NO_DEFAULT_VALUE_FLAG &&
+ field_arg->real_type() != MYSQL_TYPE_ENUM)
{
if (field_arg->reset())
{
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 38025ff0af5..425f54fb079 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -1474,9 +1474,21 @@ public:
Item_cond(THD *thd, Item_cond *item);
Item_cond(List<Item> &nlist)
:Item_bool_func(), list(nlist), abort_on_null(0) {}
- bool add(Item *item) { return list.push_back(item); }
- bool add_at_head(Item *item) { return list.push_front(item); }
- void add_at_head(List<Item> *nlist) { list.prepand(nlist); }
+ bool add(Item *item)
+ {
+ DBUG_ASSERT(item);
+ return list.push_back(item);
+ }
+ bool add_at_head(Item *item)
+ {
+ DBUG_ASSERT(item);
+ return list.push_front(item);
+ }
+ void add_at_head(List<Item> *nlist)
+ {
+ DBUG_ASSERT(nlist->elements);
+ list.prepand(nlist);
+ }
bool fix_fields(THD *, Item **ref);
enum Type type() const { return COND_ITEM; }
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 4ab8e75ddf5..142e90639e8 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -3402,6 +3402,8 @@ String* Item_func_group_concat::val_str(String* str)
void Item_func_group_concat::print(String *str, enum_query_type query_type)
{
+ /* orig_args is not filled with valid values until fix_fields() */
+ Item **pargs= fixed ? orig_args : args;
str->append(STRING_WITH_LEN("group_concat("));
if (distinct)
str->append(STRING_WITH_LEN("distinct "));
@@ -3409,7 +3411,7 @@ void Item_func_group_concat::print(String *str, enum_query_type query_type)
{
if (i)
str->append(',');
- args[i]->print(str, query_type);
+ pargs[i]->print(str, query_type);
}
if (arg_count_order)
{
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index ded4d28ca29..4248c2e6b4f 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -876,7 +876,7 @@ static bool get_interval_info(const char *str,uint length,CHARSET_INFO *cs,
value= value*LL(10) + (longlong) (*str - '0');
if (transform_msec && i == count - 1) // microseconds always last
{
- int msec_length= 6 - (int)(str - start);
+ int msec_length= 6 - (int) (str - start);
if (msec_length > 0)
value*= (long)log_10_int[msec_length];
}
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 2398f33dc23..8e8e7bd4338 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -8753,7 +8753,7 @@ static bool record_compare(TABLE *table)
DBUG_DUMP("record[1]", table->record[1], table->s->reclength);
bool result= FALSE;
- uchar saved_x[2], saved_filler[2];
+ uchar saved_x[2]= {0, 0}, saved_filler[2]= {0, 0};
if (table->s->null_bytes > 0)
{
diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc
index 313916c9818..0f501fd1514 100644
--- a/sql/log_event_old.cc
+++ b/sql/log_event_old.cc
@@ -337,7 +337,7 @@ static bool record_compare(TABLE *table)
*/
bool result= FALSE;
- uchar saved_x[2], saved_filler[2];
+ uchar saved_x[2]= {0, 0}, saved_filler[2]= {0, 0};
if (table->s->null_bytes > 0)
{
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index a483b9e2381..f658a7c8c3c 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -1287,6 +1287,7 @@ void clean_up(bool print_message)
lex_free(); /* Free some memory */
item_create_cleanup();
set_var_free();
+ free_charsets();
if (!opt_noacl)
{
#ifdef HAVE_DLOPEN
@@ -6076,8 +6077,8 @@ each time the SQL thread starts.",
TC_LOG_PAGE_SIZE, 0},
#endif
{"log-update", OPT_UPDATE_LOG,
- "The update log is deprecated since version 5.0, is replaced by the binary \
-log and this option justs turns on --log-bin instead.",
+ "The update log is deprecated since version 5.0, is replaced by the binary "
+ "log and this option just turns on --log-bin instead.",
(uchar**) &opt_update_logname, (uchar**) &opt_update_logname, 0, GET_STR,
OPT_ARG, 0, 0, 0, 0, 0, 0},
{"log-warnings", 'W', "Log some not critical warnings to the log file.",
@@ -6337,7 +6338,10 @@ thread is in the relay logs.",
"Tells the slave thread to not replicate to the specified database. To specify more than one database to ignore, use the directive multiple times, once for each database. This option will not work if you use cross database updates. If you need cross database updates to work, make sure you have 3.23.28 or later, and use replicate-wild-ignore-table=db_name.%. ",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"replicate-ignore-table", OPT_REPLICATE_IGNORE_TABLE,
- "Tells the slave thread to not replicate to the specified table. To specify more than one table to ignore, use the directive multiple times, once for each table. This will work for cross-datbase updates, in contrast to replicate-ignore-db.",
+ "Tells the slave thread to not replicate to the specified table. To specify "
+ "more than one table to ignore, use the directive multiple times, once for "
+ "each table. This will work for cross-database updates, in contrast to "
+ "replicate-ignore-db.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"replicate-rewrite-db", OPT_REPLICATE_REWRITE_DB,
"Updates to a database with a different name than the original. Example: replicate-rewrite-db=master_db_name->slave_db_name.",
@@ -6359,7 +6363,13 @@ Can't be set to 1 if --log-slave-updates is used.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
// In replication, we may need to tell the other servers how to connect
{"report-host", OPT_REPORT_HOST,
- "Hostname or IP of the slave to be reported to to the master during slave registration. Will appear in the output of SHOW SLAVE HOSTS. Leave unset if you do not want the slave to register itself with the master. Note that it is not sufficient for the master to simply read the IP of the slave off the socket once the slave connects. Due to NAT and other routing issues, that IP may not be valid for connecting to the slave from the master or other hosts.",
+ "Hostname or IP of the slave to be reported to the master during slave "
+ "registration. Will appear in the output of SHOW SLAVE HOSTS. Leave unset "
+ "if you do not want the slave to register itself with the master. Note that "
+ "it is not sufficient for the master to simply read the IP of the slave "
+ "from the socket once the slave connects. Due to NAT and other routing "
+ "issues, that IP may not be valid for connecting to the slave from the "
+ "master or other hosts.",
(uchar**) &report_host, (uchar**) &report_host, 0, GET_STR, REQUIRED_ARG, 0, 0,
0, 0, 0, 0},
{"report-password", OPT_REPORT_PASSWORD, "Undocumented.",
@@ -6664,7 +6674,10 @@ log and this option does nothing anymore.",
(uchar**) &max_system_variables.keep_files_on_create,
0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"key_buffer_size", OPT_KEY_BUFFER_SIZE,
- "The size of the buffer used for index blocks for MyISAM tables. Increase this to get better index handling (for all reads and multiple writes) to as much as you can afford; 64M on a 256M machine that mainly runs MySQL is quite common.",
+ "The size of the buffer used for index blocks for MyISAM tables. Increase "
+ "this to get better index handling (for all reads and multiple writes) to "
+ "as much as you can afford; 1GB on a 4GB machine that mainly runs MySQL is "
+ "quite common.",
(uchar**) &dflt_key_cache_var.param_buff_size,
(uchar**) 0,
0, (GET_ULL | GET_ASK_ADDR),
@@ -6835,7 +6848,9 @@ The minimum value for this variable is 4096.",
(uchar**) &myisam_mmap_size, (uchar**) &myisam_mmap_size, 0,
GET_ULL, REQUIRED_ARG, SIZE_T_MAX, MEMMAP_EXTRA_MARGIN, SIZE_T_MAX, 0, 1, 0},
{"myisam_repair_threads", OPT_MYISAM_REPAIR_THREADS,
- "Number of threads to use when repairing MyISAM tables. The value of 1 disables parallel repair.",
+ "Specifies whether several threads should be used when repairing MyISAM "
+ "tables. For values > 1, one thread is used per index. The value of 1 "
+ "disables parallel repair.",
(uchar**) &global_system_variables.myisam_repair_threads,
(uchar**) &max_system_variables.myisam_repair_threads, 0,
GET_ULONG, REQUIRED_ARG, 1, 1, ULONG_MAX, 0, 1, 0},
@@ -7162,7 +7177,8 @@ static int show_slave_running(THD *thd, SHOW_VAR *var, char *buff)
var->type= SHOW_MY_BOOL;
pthread_mutex_lock(&LOCK_active_mi);
var->value= buff;
- *((my_bool *)buff)= (my_bool) (active_mi && active_mi->slave_running &&
+ *((my_bool *)buff)= (my_bool) (active_mi &&
+ active_mi->slave_running == MYSQL_SLAVE_RUN_CONNECT &&
active_mi->rli.slave_running);
pthread_mutex_unlock(&LOCK_active_mi);
return 0;
diff --git a/sql/share/Makefile.am b/sql/share/Makefile.am
index 68b393e619f..357f9ac0876 100644
--- a/sql/share/Makefile.am
+++ b/sql/share/Makefile.am
@@ -22,7 +22,7 @@ dist-hook:
test -d $(distdir)/$$dir || mkdir $(distdir)/$$dir; \
$(INSTALL_DATA) $(srcdir)/$$dir/*.* $(distdir)/$$dir; \
done; \
- sleep 1 ; touch $(srcdir)/*/errmsg.sys
+ sleep 1 ; touch $(builddir)/*/errmsg.sys
$(INSTALL_DATA) $(srcdir)/charsets/README $(distdir)/charsets
$(INSTALL_DATA) $(srcdir)/charsets/Index.xml $(distdir)/charsets
@@ -39,11 +39,11 @@ install-data-local:
for lang in @AVAILABLE_LANGUAGES@; \
do \
$(mkinstalldirs) $(DESTDIR)$(pkgdatadir)/$$lang; \
- $(INSTALL_DATA) $(srcdir)/$$lang/errmsg.sys \
+ $(INSTALL_DATA) $(builddir)/$$lang/errmsg.sys \
$(DESTDIR)$(pkgdatadir)/$$lang/errmsg.sys; \
done
$(mkinstalldirs) $(DESTDIR)$(pkgdatadir)/charsets
- $(INSTALL_DATA) $(srcdir)/errmsg.txt \
+ $(INSTALL_DATA) $(builddir)/errmsg.txt \
$(DESTDIR)$(pkgdatadir)/errmsg.txt; \
$(INSTALL_DATA) $(srcdir)/charsets/README $(DESTDIR)$(pkgdatadir)/charsets/README
$(INSTALL_DATA) $(srcdir)/charsets/*.xml $(DESTDIR)$(pkgdatadir)/charsets
@@ -53,7 +53,7 @@ uninstall-local:
@RM@ -f -r $(DESTDIR)$(pkgdatadir)
distclean-local:
- @RM@ -f */errmsg.sys
+ @RM@ -f $(builddir)/*/errmsg.sys
# Do nothing
link_sources:
diff --git a/sql/slave.cc b/sql/slave.cc
index 271b3635cf1..a89ac2e682b 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -2557,6 +2557,7 @@ pthread_handler_t handle_slave_io(void *arg)
connected:
+ DBUG_SYNC_POINT("debug_lock.before_get_running_status_yes", 10);
// TODO: the assignment below should be under mutex (5.0)
mi->slave_running= MYSQL_SLAVE_RUN_CONNECT;
thd->slave_net = &mysql->net;
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 06e4b1d3e63..f75a73c15a8 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -2169,6 +2169,7 @@ void wait_for_condition(THD *thd, pthread_mutex_t *mutex, pthread_cond_t *cond)
proc_info=thd->proc_info;
thd_proc_info(thd, "Waiting for table");
DBUG_ENTER("wait_for_condition");
+ DEBUG_SYNC(thd, "waiting_for_table");
if (!thd->killed)
(void) pthread_cond_wait(cond, mutex);
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index a3ce50fe4ee..bc68d3b03e0 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -7028,9 +7028,11 @@ eq_ref_table(JOIN *join, ORDER *start_order, JOIN_TAB *tab)
}
if (order)
{
- found++;
- DBUG_ASSERT(!(order->used & map));
- order->used|=map;
+ if (!(order->used & map))
+ {
+ found++;
+ order->used|= map;
+ }
continue; // Used in ORDER BY
}
if (!only_eq_ref_tables(join,start_order, (*ref_item)->used_tables()))
@@ -8198,7 +8200,8 @@ static Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels,
else
{
DBUG_ASSERT(cond->type() == Item::COND_ITEM);
- ((Item_cond *) cond)->add_at_head(&eq_list);
+ if (eq_list.elements)
+ ((Item_cond *) cond)->add_at_head(&eq_list);
}
cond->quick_fix_field();
@@ -9822,7 +9825,11 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
KEY_PART_INFO *key_part_info;
Item **copy_func;
MI_COLUMNDEF *recinfo;
- uint total_uneven_bit_length= 0;
+ /*
+ total_uneven_bit_length is uneven bit length for visible fields
+ hidden_uneven_bit_length is uneven bit length for hidden fields
+ */
+ uint total_uneven_bit_length= 0, hidden_uneven_bit_length= 0;
bool force_copy_fields= param->force_copy_fields;
/* Treat sum functions as normal ones when loose index scan is used. */
save_sum_fields|= param->precomputed_group_by;
@@ -10099,6 +10106,14 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
*/
param->hidden_field_count= fieldnr;
null_count= 0;
+ /*
+ On last hidden field we store uneven bit length in
+ hidden_uneven_bit_length and proceed calculation of
+ uneven bits for visible fields into
+ total_uneven_bit_length variable.
+ */
+ hidden_uneven_bit_length= total_uneven_bit_length;
+ total_uneven_bit_length= 0;
}
}
DBUG_ASSERT(fieldnr == (uint) (reg_field - table->field));
@@ -10144,7 +10159,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
else
null_count++;
}
- hidden_null_pack_length=(hidden_null_count+7)/8;
+ hidden_null_pack_length= (hidden_null_count + 7 +
+ hidden_uneven_bit_length) / 8;
null_pack_length= (hidden_null_pack_length +
(null_count + total_uneven_bit_length + 7) / 8);
reclength+=null_pack_length;
@@ -12896,12 +12912,35 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
uint find_shortest_key(TABLE *table, const key_map *usable_keys)
{
- uint min_length= (uint) ~0;
uint best= MAX_KEY;
+ uint usable_clustered_pk= (table->file->primary_key_is_clustered() &&
+ table->s->primary_key != MAX_KEY &&
+ usable_keys->is_set(table->s->primary_key)) ?
+ table->s->primary_key : MAX_KEY;
if (!usable_keys->is_clear_all())
{
+ uint min_length= (uint) ~0;
for (uint nr=0; nr < table->s->keys ; nr++)
{
+ /*
+ As far as
+ 1) clustered primary key entry data set is a set of all record
+ fields (key fields and not key fields) and
+ 2) secondary index entry data is a union of its key fields and
+ primary key fields (at least InnoDB and its derivatives don't
+ duplicate primary key fields there, even if the primary and
+ the secondary keys have a common subset of key fields),
+ then secondary index entry data is always a subset of primary key
+ entry, and the PK is always longer.
+ Unfortunately, key_info[nr].key_length doesn't show the length
+ of key/pointer pair but a sum of key field lengths only, thus
+ we can't estimate index IO volume comparing only this key_length
+ value of seconday keys and clustered PK.
+ So, try secondary keys first, and choose PK only if there are no
+ usable secondary covering keys:
+ */
+ if (nr == usable_clustered_pk)
+ continue;
if (usable_keys->is_set(nr))
{
if (table->key_info[nr].key_length < min_length)
@@ -12912,7 +12951,7 @@ uint find_shortest_key(TABLE *table, const key_map *usable_keys)
}
}
}
- return best;
+ return best != MAX_KEY ? best : usable_clustered_pk;
}
/**
@@ -13291,12 +13330,6 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
*/
if (select_limit >= table_records)
{
- /*
- filesort() and join cache are usually faster than reading in
- index order and not using join cache
- */
- if (tab->type == JT_ALL && tab->join->tables > tab->join->const_tables + 1)
- DBUG_RETURN(0);
keys= *table->file->keys_to_use_for_scanning();
keys.merge(table->covering_keys);
@@ -13446,6 +13479,19 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
}
}
}
+
+ /*
+ filesort() and join cache are usually faster than reading in
+ index order and not using join cache, except in case that chosen
+ index is clustered primary key.
+ */
+ if ((select_limit >= table_records) &&
+ (tab->type == JT_ALL &&
+ tab->join->tables > tab->join->const_tables + 1) &&
+ ((unsigned) best_key != table->s->primary_key ||
+ !table->file->primary_key_is_clustered()))
+ DBUG_RETURN(0);
+
if (best_key >= 0)
{
bool quick_created= FALSE;
@@ -15642,7 +15688,7 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab)
Item_cond_and *cond=new Item_cond_and();
TABLE *table=join_tab->table;
- int error;
+ int error= 0;
if (!cond)
DBUG_RETURN(TRUE);
@@ -15660,7 +15706,8 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab)
cond->fix_fields(thd, (Item**)&cond);
if (join_tab->select)
{
- error=(int) cond->add(join_tab->select->cond);
+ if (join_tab->select->cond)
+ error=(int) cond->add(join_tab->select->cond);
join_tab->select_cond=join_tab->select->cond=cond;
}
else if ((join_tab->select= make_select(join_tab->table, 0, 0, cond, 0,
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 301ab6ceda6..eb88b1e70a5 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -22,6 +22,7 @@
#include "sp_head.h"
#include "sql_trigger.h"
#include "sql_show.h"
+#include "debug_sync.h"
#ifdef __WIN__
#include <io.h>
@@ -1889,22 +1890,10 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
pthread_mutex_lock(&LOCK_open);
- /*
- If we have the table in the definition cache, we don't have to check the
- .frm file to find if the table is a normal table (not view) and what
- engine to use.
- */
-
+ /* Disable drop of enabled log tables, must be done before name locking */
for (table= tables; table; table= table->next_local)
{
- TABLE_SHARE *share;
- table->db_type= NULL;
- if ((share= get_cached_table_share(table->db, table->table_name)))
- table->db_type= share->db_type();
-
- /* Disable drop of enabled log tables */
- if (share && (share->table_category == TABLE_CATEGORY_PERFORMANCE) &&
- check_if_log_table(table->db_length, table->db,
+ if (check_if_log_table(table->db_length, table->db,
table->table_name_length, table->table_name, 1))
{
my_error(ER_BAD_LOG_STATEMENT, MYF(0), "DROP");
@@ -1923,7 +1912,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
{
char *db=table->db;
handlerton *table_type;
- enum legacy_db_type frm_db_type;
+ enum legacy_db_type frm_db_type= DB_TYPE_UNKNOWN;
DBUG_PRINT("table", ("table_l: '%s'.'%s' table: 0x%lx s: 0x%lx",
table->db, table->table_name, (long) table->table,
@@ -1988,7 +1977,6 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
built_query.append("`,");
}
- table_type= table->db_type;
if (!drop_temporary)
{
TABLE *locked_table;
@@ -2015,9 +2003,9 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
table->internal_tmp_table ?
FN_IS_TMP : 0);
}
+ DEBUG_SYNC(thd, "rm_table_part2_before_delete_table");
if (drop_temporary ||
- ((table_type == NULL &&
- access(path, F_OK) &&
+ ((access(path, F_OK) &&
ha_create_table_from_engine(thd, db, alias)) ||
(!drop_view &&
mysql_frm_type(thd, path, &frm_db_type) != FRMTYPE_TABLE)))
@@ -2033,15 +2021,25 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
else
{
char *end;
- if (table_type == NULL)
+ /*
+ Cannot use the db_type from the table, since that might have changed
+ while waiting for the exclusive name lock. We are under LOCK_open,
+ so reading from the frm-file is safe.
+ */
+ if (frm_db_type == DB_TYPE_UNKNOWN)
{
- mysql_frm_type(thd, path, &frm_db_type);
- table_type= ha_resolve_by_legacy_type(thd, frm_db_type);
+ mysql_frm_type(thd, path, &frm_db_type);
+ DBUG_PRINT("info", ("frm_db_type %d from %s", frm_db_type, path));
}
+ table_type= ha_resolve_by_legacy_type(thd, frm_db_type);
// Remove extension for delete
*(end= path + path_length - reg_ext_length)= '\0';
+ DBUG_PRINT("info", ("deleting table of type %d",
+ (table_type ? table_type->db_type : 0)));
error= ha_delete_table(thd, table_type, path, db, table->table_name,
!dont_log_query);
+
+ /* No error if non existent table and 'IF EXIST' clause or view */
if ((error == ENOENT || error == HA_ERR_NO_SUCH_TABLE) &&
(if_exists || table_type == NULL))
{
@@ -2081,6 +2079,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
on the table name.
*/
pthread_mutex_unlock(&LOCK_open);
+ DEBUG_SYNC(thd, "rm_table_part2_before_binlog");
thd->thread_specific_used|= tmp_table_deleted;
error= 0;
if (wrong_tables.length())
@@ -7120,6 +7119,7 @@ view_err:
else
create_info->data_file_name=create_info->index_file_name=0;
+ DEBUG_SYNC(thd, "alter_table_before_create_table_no_lock");
/*
Create a table with a temporary name.
With create_info->frm_only == 1 this creates a .frm file only.
@@ -7320,6 +7320,7 @@ view_err:
intern_close_table(new_table);
my_free(new_table,MYF(0));
}
+ DEBUG_SYNC(thd, "alter_table_before_rename_result_table");
VOID(pthread_mutex_lock(&LOCK_open));
if (error)
{
@@ -7462,6 +7463,7 @@ view_err:
thd_proc_info(thd, "end");
DBUG_EXECUTE_IF("sleep_alter_before_main_binlog", my_sleep(6000000););
+ DEBUG_SYNC(thd, "alter_table_before_main_binlog");
ha_binlog_log_query(thd, create_info->db_type, LOGCOM_ALTER_TABLE,
thd->query(), thd->query_length(),
@@ -7932,22 +7934,28 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
for (uint i= 0; i < t->s->fields; i++ )
{
Field *f= t->field[i];
- enum_field_types field_type= f->type();
- /*
- BLOB and VARCHAR have pointers in their field, we must convert
- to string; GEOMETRY is implemented on top of BLOB.
- */
- if ((field_type == MYSQL_TYPE_BLOB) ||
- (field_type == MYSQL_TYPE_VARCHAR) ||
- (field_type == MYSQL_TYPE_GEOMETRY))
- {
- String tmp;
- f->val_str(&tmp);
- row_crc= my_checksum(row_crc, (uchar*) tmp.ptr(), tmp.length());
+
+ /*
+ BLOB and VARCHAR have pointers in their field, we must convert
+ to string; GEOMETRY is implemented on top of BLOB.
+ BIT may store its data among NULL bits, convert as well.
+ */
+ switch (f->type()) {
+ case MYSQL_TYPE_BLOB:
+ case MYSQL_TYPE_VARCHAR:
+ case MYSQL_TYPE_GEOMETRY:
+ case MYSQL_TYPE_BIT:
+ {
+ String tmp;
+ f->val_str(&tmp);
+ row_crc= my_checksum(row_crc, (uchar*) tmp.ptr(),
+ tmp.length());
+ break;
+ }
+ default:
+ row_crc= my_checksum(row_crc, f->ptr, f->pack_length());
+ break;
}
- else
- row_crc= my_checksum(row_crc, f->ptr,
- f->pack_length());
}
crc+= row_crc;
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 84610630d62..63af275cef3 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -1283,7 +1283,8 @@ bool mysql_multi_update(THD *thd,
if (using_handler)
{
- Internal_error_handler *top_handler= thd->pop_internal_handler();
+ Internal_error_handler *top_handler;
+ top_handler= thd->pop_internal_handler();
DBUG_ASSERT(&handler == top_handler);
}
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 37150bf835d..8dc08f8425f 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -4596,7 +4596,7 @@ create_table_option:
| TYPE_SYM opt_equal storage_engines
{
Lex->create_info.db_type= $3;
- WARN_DEPRECATED(yythd, "5.4.4", "TYPE=storage_engine",
+ WARN_DEPRECATED(yythd, "6.0", "TYPE=storage_engine",
"'ENGINE=storage_engine'");
Lex->create_info.used_fields|= HA_CREATE_USED_ENGINE;
}