summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/examples/ha_archive.cc27
-rw-r--r--sql/ha_innodb.cc18
-rw-r--r--sql/item.cc3
-rw-r--r--sql/item_cmpfunc.h9
-rw-r--r--sql/item_func.cc17
-rw-r--r--sql/item_func.h4
-rw-r--r--sql/item_strfunc.cc18
-rw-r--r--sql/item_strfunc.h1
-rw-r--r--sql/item_subselect.cc16
-rw-r--r--sql/mysqld.cc23
-rw-r--r--sql/set_var.cc14
-rw-r--r--sql/sql_base.cc14
-rw-r--r--sql/sql_class.cc8
-rw-r--r--sql/sql_class.h6
-rw-r--r--sql/sql_lex.cc5
-rw-r--r--sql/sql_lex.h1
-rw-r--r--sql/sql_parse.cc3
-rw-r--r--sql/sql_select.cc2
-rw-r--r--sql/sql_table.cc9
-rw-r--r--sql/sql_update.cc5
20 files changed, 156 insertions, 47 deletions
diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc
index 436c72702a0..b33b5102e69 100644
--- a/sql/examples/ha_archive.cc
+++ b/sql/examples/ha_archive.cc
@@ -43,18 +43,20 @@
handle bulk inserts as well (that is if someone was trying to read at
the same time since we would want to flush).
- A "meta" file is kept. All this file does is contain information on
- the number of rows.
+ A "meta" file is kept alongside the data file. This file serves two purpose.
+ The first purpose is to track the number of rows in the table. The second
+ purpose is to determine if the table was closed properly or not. When the
+ meta file is first opened it is marked as dirty. It is opened when the table
+ itself is opened for writing. When the table is closed the new count for rows
+ is written to the meta file and the file is marked as clean. If the meta file
+ is opened and it is marked as dirty, it is assumed that a crash occured. At
+ this point an error occurs and the user is told to rebuild the file.
+ A rebuild scans the rows and rewrites the meta file. If corruption is found
+ in the data file then the meta file is not repaired.
- No attempts at durability are made. You can corrupt your data. A repair
- method was added to repair the meta file that stores row information,
- but if your data file gets corrupted I haven't solved that. I could
- create a repair that would solve this, but do you want to take a
- chance of loosing your data?
+ At some point a recovery method for such a drastic case needs to be divised.
- Locks are row level, and you will get a consistant read. Transactions
- will be added later (they are not that hard to add at this
- stage).
+ Locks are row level, and you will get a consistant read.
For performance as far as table scans go it is quite fast. I don't have
good numbers but locally it has out performed both Innodb and MyISAM. For
@@ -89,7 +91,6 @@
compression but may speed up ordered searches).
Checkpoint the meta file to allow for faster rebuilds.
Dirty open (right now the meta file is repaired if a crash occured).
- Transactions.
Option to allow for dirty reads, this would lower the sync calls, which would make
inserts a lot faster, but would mean highly arbitrary reads.
@@ -347,6 +348,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
share->crashed= TRUE;
else
(void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
+
/*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
@@ -393,7 +395,8 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
(void)write_meta_file(share->meta_file, share->rows_recorded, FALSE);
if (gzclose(share->archive_write) == Z_ERRNO)
rc= 1;
- my_close(share->meta_file,MYF(0));
+ if (my_close(share->meta_file, MYF(0)))
+ rc= 1;
my_free((gptr) share, MYF(0));
}
pthread_mutex_unlock(&archive_mutex);
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 32b203f7ad6..797f51c0293 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -4871,12 +4871,12 @@ ha_innobase::update_table_comment(
dict_print_info_on_foreign_keys(FALSE, file,
prebuilt->trx, prebuilt->table);
flen = ftell(file);
- if(length + flen + 3 > 64000) {
+ if (flen < 0) {
+ flen = 0;
+ } else if (length + flen + 3 > 64000) {
flen = 64000 - 3 - length;
}
- ut_ad(flen > 0);
-
/* allocate buffer for the full string, and
read the contents of the temporary file */
@@ -4940,12 +4940,12 @@ ha_innobase::get_foreign_key_create_info(void)
prebuilt->trx->op_info = (char*)"";
flen = ftell(file);
- if(flen > 64000 - 1) {
+ if (flen < 0) {
+ flen = 0;
+ } else if(flen > 64000 - 1) {
flen = 64000 - 1;
}
- ut_ad(flen >= 0);
-
/* allocate buffer for the string, and
read the contents of the temporary file */
@@ -5546,12 +5546,12 @@ innodb_show_status(
srv_printf_innodb_monitor(srv_monitor_file);
flen = ftell(srv_monitor_file);
os_file_set_eof(srv_monitor_file);
- if(flen > 64000 - 1) {
+ if (flen < 0) {
+ flen = 0;
+ } else if (flen > 64000 - 1) {
flen = 64000 - 1;
}
- ut_ad(flen > 0);
-
/* allocate buffer for the string, and
read the contents of the temporary file */
diff --git a/sql/item.cc b/sql/item.cc
index 763ab84582d..76d802e972f 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -1775,12 +1775,13 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select)
if (select_ref != not_found_item && !ambiguous_fields)
{
DBUG_ASSERT(*select_ref);
- if (! (*select_ref)->fixed)
+ if (!last->ref_pointer_array[counter])
{
my_error(ER_ILLEGAL_REFERENCE, MYF(0),
ref->name, "forward reference in item list");
return NULL;
}
+ DBUG_ASSERT((*select_ref)->fixed);
return (select->ref_pointer_array + counter);
}
if (group_by_ref)
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index a156322e13c..181f1312d46 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -215,7 +215,7 @@ class Item_bool_rowready_func2 :public Item_bool_func2
public:
Item_bool_rowready_func2(Item *a, Item *b) :Item_bool_func2(a, b)
{
- allowed_arg_cols= a->cols();
+ allowed_arg_cols= 0; // Fetch this value from first argument
}
Item *neg_transformer(THD *thd);
virtual Item *negated_item();
@@ -427,7 +427,10 @@ class Item_func_interval :public Item_int_func
double *intervals;
public:
Item_func_interval(Item_row *a)
- :Item_int_func(a),row(a),intervals(0) { allowed_arg_cols= a->cols(); }
+ :Item_int_func(a),row(a),intervals(0)
+ {
+ allowed_arg_cols= 0; // Fetch this value from first argument
+ }
longlong val_int();
void fix_length_and_dec();
const char *func_name() const { return "interval"; }
@@ -780,7 +783,7 @@ class Item_func_in :public Item_int_func
Item_func_in(List<Item> &list)
:Item_int_func(list), array(0), in_item(0), have_null(0)
{
- allowed_arg_cols= args[0]->cols();
+ allowed_arg_cols= 0; // Fetch this value from first argument
}
longlong val_int();
void fix_length_and_dec();
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 434a4741cf3..3d5a73dd2a0 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -308,10 +308,23 @@ Item_func::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref)
We can't yet set item to *arg as fix_fields may change *arg
We shouldn't call fix_fields() twice, so check 'fixed' field first
*/
- if ((!(*arg)->fixed && (*arg)->fix_fields(thd, tables, arg)) ||
- (*arg)->check_cols(allowed_arg_cols))
+ if ((!(*arg)->fixed && (*arg)->fix_fields(thd, tables, arg)))
return TRUE; /* purecov: inspected */
item= *arg;
+
+ if (allowed_arg_cols)
+ {
+ if (item->check_cols(allowed_arg_cols))
+ return 1;
+ }
+ else
+ {
+ /* we have to fetch allowed_arg_cols from first argument */
+ DBUG_ASSERT(arg == args); // it is first argument
+ allowed_arg_cols= item->cols();
+ DBUG_ASSERT(allowed_arg_cols); // Can't be 0 any more
+ }
+
if (item->maybe_null)
maybe_null=1;
diff --git a/sql/item_func.h b/sql/item_func.h
index fb8d77d5b83..a3618cca23e 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -32,6 +32,10 @@ class Item_func :public Item_result_field
{
protected:
Item **args, *tmp_arg[2];
+ /*
+ Allowed numbers of columns in result (usually 1, which means scalar value)
+ 0 means get this number from first argument
+ */
uint allowed_arg_cols;
public:
uint arg_count;
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index f9843692b7b..c39caabeacf 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -2189,6 +2189,7 @@ String *Item_func_conv::val_str(String *str)
return 0;
}
null_value=0;
+ unsigned_flag= !(from_base < 0);
if (from_base < 0)
dec= my_strntoll(res->charset(),res->ptr(),res->length(),-from_base,&endptr,&err);
else
@@ -2643,18 +2644,13 @@ String *Item_func_quote::val_str(String *str)
for (from= (char*) arg->ptr(), end= from + arg_length; from < end; from++)
new_length+= get_esc_bit(escmask, (uchar) *from);
- /*
- We have to use realloc() instead of alloc() as we want to keep the
- old result in arg
- */
- if (arg->realloc(new_length))
+ if (tmp_value.alloc(new_length))
goto null;
/*
- As 'arg' and 'str' may be the same string, we must replace characters
- from the end to the beginning
+ We replace characters from the end to the beginning
*/
- to= (char*) arg->ptr() + new_length - 1;
+ to= (char*) tmp_value.ptr() + new_length - 1;
*to--= '\'';
for (start= (char*) arg->ptr(),end= start + arg_length; end-- != start; to--)
{
@@ -2682,10 +2678,10 @@ String *Item_func_quote::val_str(String *str)
}
}
*to= '\'';
- arg->length(new_length);
- str->set_charset(collation.collation);
+ tmp_value.length(new_length);
+ tmp_value.set_charset(collation.collation);
null_value= 0;
- return arg;
+ return &tmp_value;
null:
null_value= 1;
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index e322e5616a1..97c42c3abf6 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -596,6 +596,7 @@ public:
class Item_func_quote :public Item_str_func
{
+ String tmp_value;
public:
Item_func_quote(Item *a) :Item_str_func(a) {}
const char *func_name() const { return "quote"; }
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index d6d58adaf7c..3ac75bfdd30 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -995,6 +995,10 @@ Item_in_subselect::row_value_transformer(JOIN *join)
List_iterator_fast<Item> li(select_lex->item_list);
for (uint i= 0; i < n; i++)
{
+ DBUG_ASSERT(left_expr->fixed && select_lex->ref_pointer_array[i]->fixed);
+ if (select_lex->ref_pointer_array[i]->
+ check_cols(left_expr->el(i)->cols()))
+ goto err;
Item *func= new Item_ref_null_helper(this,
select_lex->ref_pointer_array+i,
(char *) "<no matter>",
@@ -1117,6 +1121,7 @@ void subselect_single_select_engine::cleanup()
DBUG_ENTER("subselect_single_select_engine::cleanup");
prepared= optimized= executed= 0;
join= 0;
+ result->cleanup();
DBUG_VOID_RETURN;
}
@@ -1125,6 +1130,7 @@ void subselect_union_engine::cleanup()
{
DBUG_ENTER("subselect_union_engine::cleanup");
unit->reinit_exec_mechanism();
+ result->cleanup();
DBUG_VOID_RETURN;
}
@@ -1132,6 +1138,10 @@ void subselect_union_engine::cleanup()
void subselect_uniquesubquery_engine::cleanup()
{
DBUG_ENTER("subselect_uniquesubquery_engine::cleanup");
+ /*
+ subselect_uniquesubquery_engine have not 'result' assigbed, so we do not
+ cleanup() it
+ */
DBUG_VOID_RETURN;
}
@@ -1415,13 +1425,15 @@ int subselect_indexsubquery_engine::exec()
uint subselect_single_select_engine::cols()
{
- return select_lex->item_list.elements;
+ DBUG_ASSERT(select_lex->join); // should be called after fix_fields()
+ return select_lex->join->fields_list.elements;
}
uint subselect_union_engine::cols()
{
- return unit->first_select()->item_list.elements;
+ DBUG_ASSERT(unit->is_prepared()); // should be called after fix_fields()
+ return unit->types.elements;
}
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index e4df359d35c..97c150cd42b 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -4251,7 +4251,11 @@ enum options_mysqld
OPT_RANGE_ALLOC_BLOCK_SIZE,
OPT_QUERY_ALLOC_BLOCK_SIZE, OPT_QUERY_PREALLOC_SIZE,
OPT_TRANS_ALLOC_BLOCK_SIZE, OPT_TRANS_PREALLOC_SIZE,
- OPT_SYNC_FRM, OPT_SYNC_BINLOG, OPT_BDB_NOSYNC,
+ OPT_SYNC_FRM, OPT_SYNC_BINLOG,
+ OPT_SYNC_REPLICATION,
+ OPT_SYNC_REPLICATION_SLAVE_ID,
+ OPT_SYNC_REPLICATION_TIMEOUT,
+ OPT_BDB_NOSYNC,
OPT_ENABLE_SHARED_MEMORY,
OPT_SHARED_MEMORY_BASE_NAME,
OPT_OLD_PASSWORDS,
@@ -5447,6 +5451,23 @@ The minimum value for this variable is 4096.",
(gptr*) &sync_binlog_period,
(gptr*) &sync_binlog_period, 0, GET_ULONG, REQUIRED_ARG, 0, 0, ~0L, 0, 1,
0},
+#ifdef DOES_NOTHING_YET
+ {"sync-replication", OPT_SYNC_REPLICATION,
+ "Enable synchronous replication",
+ (gptr*) &global_system_variables.sync_replication,
+ (gptr*) &global_system_variables.sync_replication,
+ 0, GET_ULONG, REQUIRED_ARG, 0, 0, 1, 0, 1, 0},
+ {"sync-replication-slave-id", OPT_SYNC_REPLICATION_SLAVE_ID,
+ "Synchronous replication is wished for this slave",
+ (gptr*) &global_system_variables.sync_replication_slave_id,
+ (gptr*) &global_system_variables.sync_replication_slave_id,
+ 0, GET_ULONG, REQUIRED_ARG, 0, 0, ~0L, 0, 1, 0},
+ {"sync-replication-timeout", OPT_SYNC_REPLICATION_TIMEOUT,
+ "Synchronous replication timeout",
+ (gptr*) &global_system_variables.sync_replication_timeout,
+ (gptr*) &global_system_variables.sync_replication_timeout,
+ 0, GET_ULONG, REQUIRED_ARG, 10, 0, ~0L, 0, 1, 0},
+#endif
{"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default",
(gptr*) &opt_sync_frm, (gptr*) &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0,
0, 0, 0, 0},
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 541bcf0ff7d..f4d07360282 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -358,6 +358,14 @@ sys_var_thd_storage_engine sys_storage_engine("storage_engine",
&SV::table_type);
#ifdef HAVE_REPLICATION
sys_var_sync_binlog_period sys_sync_binlog_period("sync_binlog", &sync_binlog_period);
+sys_var_thd_ulong sys_sync_replication("sync_replication",
+ &SV::sync_replication);
+sys_var_thd_ulong sys_sync_replication_slave_id(
+ "sync_replication_slave_id",
+ &SV::sync_replication_slave_id);
+sys_var_thd_ulong sys_sync_replication_timeout(
+ "sync_replication_timeout",
+ &SV::sync_replication_timeout);
#endif
sys_var_bool_ptr sys_sync_frm("sync_frm", &opt_sync_frm);
sys_var_long_ptr sys_table_cache_size("table_cache",
@@ -647,6 +655,9 @@ sys_var *sys_variables[]=
&sys_storage_engine,
#ifdef HAVE_REPLICATION
&sys_sync_binlog_period,
+ &sys_sync_replication,
+ &sys_sync_replication_slave_id,
+ &sys_sync_replication_timeout,
#endif
&sys_sync_frm,
&sys_table_cache_size,
@@ -915,6 +926,9 @@ struct show_var_st init_vars[]= {
{sys_storage_engine.name, (char*) &sys_storage_engine, SHOW_SYS},
#ifdef HAVE_REPLICATION
{sys_sync_binlog_period.name,(char*) &sys_sync_binlog_period, SHOW_SYS},
+ {sys_sync_replication.name, (char*) &sys_sync_replication, SHOW_SYS},
+ {sys_sync_replication_slave_id.name, (char*) &sys_sync_replication_slave_id,SHOW_SYS},
+ {sys_sync_replication_timeout.name, (char*) &sys_sync_replication_timeout,SHOW_SYS},
#endif
{sys_sync_frm.name, (char*) &sys_sync_frm, SHOW_SYS},
#ifdef HAVE_TZNAME
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 3fb2fac5b27..8a7ae2dffc3 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -2758,6 +2758,20 @@ bool setup_fields(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
thd->allow_sum_func= allow_sum_func;
thd->where="field list";
+ /*
+ To prevent fail on forward lookup we fill it with zerows,
+ then if we got pointer on zero after find_item_in_list we will know
+ that it is forward lookup.
+
+ There is other way to solve problem: fill array with pointers to list,
+ but it will be slower.
+
+ TODO: remove it when (if) we made one list for allfields and
+ ref_pointer_array
+ */
+ if (ref_pointer_array)
+ bzero(ref_pointer_array, sizeof(Item *) * fields.elements);
+
Item **ref= ref_pointer_array;
while ((item= it++))
{
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index c9545a0141e..89442d157c6 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1303,6 +1303,14 @@ bool select_singlerow_subselect::send_data(List<Item> &items)
}
+void select_max_min_finder_subselect::cleanup()
+{
+ DBUG_ENTER("select_max_min_finder_subselect::cleanup");
+ cache= 0;
+ DBUG_VOID_RETURN;
+}
+
+
bool select_max_min_finder_subselect::send_data(List<Item> &items)
{
DBUG_ENTER("select_max_min_finder_subselect::send_data");
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 561cf099592..29185dfbf7b 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -431,6 +431,11 @@ struct system_variables
my_bool low_priority_updates;
my_bool new_mode;
my_bool query_cache_wlock_invalidate;
+#ifdef HAVE_REPLICATION
+ ulong sync_replication;
+ ulong sync_replication_slave_id;
+ ulong sync_replication_timeout;
+#endif /* HAVE_REPLICATION */
#ifdef HAVE_INNOBASE_DB
my_bool innodb_table_locks;
#endif /* HAVE_INNOBASE_DB */
@@ -1510,6 +1515,7 @@ public:
select_max_min_finder_subselect(Item_subselect *item, bool mx)
:select_subselect(item), cache(0), fmax(mx)
{}
+ void cleanup();
bool send_data(List<Item> &items);
bool cmp_real();
bool cmp_int();
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 43e82ff57c9..06e271333bf 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -177,7 +177,10 @@ void lex_start(THD *thd, uchar *buf,uint length)
void lex_end(LEX *lex)
{
- lex->select_lex.expr_list.delete_elements(); // If error when parsing sql-varargs
+ for (SELECT_LEX *sl= lex->all_selects_list;
+ sl;
+ sl= sl->next_select_in_list())
+ sl->expr_list.delete_elements(); // If error when parsing sql-varargs
x_free(lex->yacc_yyss);
x_free(lex->yacc_yyvs);
}
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index b2c214bf1fa..266cb3cc030 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -416,6 +416,7 @@ public:
void print(String *str);
ulong init_prepare_fake_select_lex(THD *thd);
+ inline bool is_prepared() { return prepared; }
bool change_result(select_subselect *result, select_subselect *old_result);
void set_limit(st_select_lex *values, st_select_lex *sl);
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 05d6a00805c..d309f58a37c 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -712,6 +712,8 @@ static int check_connection(THD *thd)
DBUG_PRINT("info",
("New connection received on %s", vio_description(net->vio)));
+ vio_in_addr(net->vio,&thd->remote.sin_addr);
+
if (!thd->host) // If TCP/IP connection
{
char ip[30];
@@ -756,7 +758,6 @@ static int check_connection(THD *thd)
DBUG_PRINT("info",("Host: %s",thd->host));
thd->host_or_ip= thd->host;
thd->ip= 0;
- bzero((char*) &thd->remote, sizeof(struct sockaddr));
}
vio_keepalive(net->vio, TRUE);
ulong pkt_len= 0;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 69b5c667f6b..89b84f40eb6 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1435,7 +1435,7 @@ JOIN::exec()
curr_join->select_distinct=0; /* Each row is unique */
curr_join->join_free(0); /* Free quick selects */
- if (select_distinct && ! group_list)
+ if (curr_join->select_distinct && ! curr_join->group_list)
{
thd->proc_info="Removing duplicates";
if (curr_join->tmp_having)
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 880cce06c27..38f1e6e7250 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -2394,7 +2394,10 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
strxmov(src_path, (*tmp_table)->s->path, reg_ext, NullS);
else
{
- fn_format( src_path, src_table, src_db, reg_ext, MYF(MY_UNPACK_FILENAME));
+ strxmov(src_path, mysql_data_home, "/", src_db, "/", src_table,
+ reg_ext, NullS);
+ /* Resolve symlinks (for windows) */
+ fn_format(src_path, src_path, "", "", MYF(MY_UNPACK_FILENAME));
if (access(src_path, F_OK))
{
my_error(ER_BAD_TABLE_ERROR, MYF(0), src_table);
@@ -2421,7 +2424,9 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
}
else
{
- fn_format( dst_path, table_name, db, reg_ext, MYF(MY_UNPACK_FILENAME));
+ strxmov(dst_path, mysql_data_home, "/", db, "/", table_name,
+ reg_ext, NullS);
+ fn_format(dst_path, dst_path, "", "", MYF(MY_UNPACK_FILENAME));
if (!access(dst_path, F_OK))
goto table_exists;
}
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index f9ad513ea6a..82cc1394eaf 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -120,7 +120,7 @@ int mysql_update(THD *thd,
bool used_key_is_modified, transactional_table, log_delayed;
int res;
int error=0;
- uint used_index= MAX_KEY;
+ uint used_index;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
uint want_privilege;
#endif
@@ -264,7 +264,10 @@ int mysql_update(THD *thd,
else if ((used_index=table->file->key_used_on_scan) < MAX_KEY)
used_key_is_modified=check_if_key_used(table, used_index, fields);
else
+ {
used_key_is_modified=0;
+ used_index= MAX_KEY;
+ }
if (used_key_is_modified || order)
{
/*