summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <monty@mashka.mysql.fi>2002-12-05 19:38:42 +0200
committerunknown <monty@mashka.mysql.fi>2002-12-05 19:38:42 +0200
commit6d33f73416b5576b4e48412fd3a7342385c1c747 (patch)
tree56f9dc35fe3c9e6c4f341e577ff3db68855d9e81 /sql
parent930b3fa897112c49392c2154de1ec21b9e44edda (diff)
parent9b5167eeac3e8d14134769334d94f260cb5db277 (diff)
downloadmariadb-git-6d33f73416b5576b4e48412fd3a7342385c1c747.tar.gz
Merge with 4.0.6
BitKeeper/etc/ignore: auto-union acinclude.m4: Auto merged configure.in: Auto merged heap/hp_delete.c: Auto merged heap/hp_scan.c: Auto merged include/my_base.h: Auto merged libmysql/libmysql.c: Auto merged libmysqld/lib_sql.cc: Auto merged myisam/mi_check.c: Auto merged myisam/mi_rnext_same.c: Auto merged myisam/sort.c: Auto merged mysql-test/r/alter_table.result: Auto merged mysql-test/r/distinct.result: Auto merged mysql-test/r/func_math.result: Auto merged mysql-test/r/group_by.result: Auto merged mysql-test/r/innodb.result: Auto merged mysql-test/r/select.result: Auto merged mysql-test/t/group_by.test: Auto merged mysql-test/t/select.test: Auto merged mysys/hash.c: Auto merged sql/field.h: Auto merged sql/field_conv.cc: Auto merged sql/ha_innodb.cc: Auto merged sql/handler.cc: Auto merged sql/item_func.cc: Auto merged sql/item_func.h: Auto merged sql/log.cc: Auto merged sql/mysql_priv.h: Auto merged sql/mysqld.cc: Auto merged sql/opt_sum.cc: Auto merged sql/set_var.cc: Auto merged sql/set_var.h: Auto merged sql/sql_analyse.cc: Auto merged sql/sql_class.cc: Auto merged sql/sql_show.cc: Auto merged sql/sql_table.cc: Auto merged sql/sql_udf.cc: Auto merged sql/structs.h: Auto merged sql/uniques.cc: Auto merged strings/strto.c: Auto merged vio/vio.c: Auto merged BitKeeper/triggers/post-commit: Add changeset to commit messages sql-bench/crash-me.sh: Use version from 4.0 sql-bench/server-cfg.sh: Use version from 4.0
Diffstat (limited to 'sql')
-rw-r--r--sql/field.cc28
-rw-r--r--sql/field.h2
-rw-r--r--sql/field_conv.cc24
-rw-r--r--sql/ha_innodb.cc108
-rw-r--r--sql/handler.cc4
-rw-r--r--sql/item.cc32
-rw-r--r--sql/item.h27
-rw-r--r--sql/item_cmpfunc.cc44
-rw-r--r--sql/item_cmpfunc.h9
-rw-r--r--sql/item_func.cc25
-rw-r--r--sql/item_func.h3
-rw-r--r--sql/item_timefunc.cc4
-rw-r--r--sql/item_timefunc.h4
-rw-r--r--sql/log.cc70
-rw-r--r--sql/mysql_priv.h25
-rw-r--r--sql/mysqld.cc40
-rw-r--r--sql/opt_range.cc6
-rw-r--r--sql/password.c2
-rw-r--r--sql/set_var.cc19
-rw-r--r--sql/set_var.h16
-rw-r--r--sql/sql_base.cc21
-rw-r--r--sql/sql_class.cc4
-rw-r--r--sql/sql_class.h107
-rw-r--r--sql/sql_delete.cc67
-rw-r--r--sql/sql_handler.cc4
-rw-r--r--sql/sql_insert.cc24
-rw-r--r--sql/sql_olap.cc4
-rw-r--r--sql/sql_parse.cc77
-rw-r--r--sql/sql_select.cc251
-rw-r--r--sql/sql_select.h22
-rw-r--r--sql/sql_table.cc9
-rw-r--r--sql/sql_udf.cc2
-rw-r--r--sql/sql_union.cc8
-rw-r--r--sql/sql_update.cc789
-rw-r--r--sql/sql_yacc.yy113
-rw-r--r--sql/table.h17
-rw-r--r--sql/unireg.cc2
37 files changed, 1158 insertions, 855 deletions
diff --git a/sql/field.cc b/sql/field.cc
index f0f3b22f1cc..7b3b88a69f1 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1576,11 +1576,14 @@ void Field_medium::sql_type(String &res) const
int Field_long::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ char *end;
while (len && my_isspace(system_charset_info,*from))
{
len--; from++;
}
long tmp;
+ String tmp_str(from,len);
+ from= tmp_str.c_ptr(); // Add end null if needed
int error= 0;
errno=0;
if (unsigned_flag)
@@ -1592,12 +1595,13 @@ int Field_long::store(const char *from,uint len,CHARSET_INFO *cs)
error= 1;
}
else
- tmp=(long) my_strntoul(cs,from,len,NULL,10);
+ tmp=(long) my_strntoul(cs,from,len,&end,10);
}
else
- tmp=my_strntol(cs,from,len,NULL,10);
- if (errno || current_thd->count_cuted_fields && !test_if_int(from,len))
- {
+ tmp=my_strntol(cs,from,len,&end,10);
+ if (errno ||
+ (from+len != end && current_thd->count_cuted_fields &&
+ !test_if_int(from,len)))
current_thd->cuted_fields++;
error= 1;
}
@@ -1821,11 +1825,14 @@ void Field_long::sql_type(String &res) const
int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs)
{
+ char *end;
while (len && my_isspace(system_charset_info,*from))
{ // For easy error check
len--; from++;
}
longlong tmp;
+ String tmp_str(from,len);
+ from= tmp_str.c_ptr(); // Add end null if needed
int error= 0;
errno=0;
if (unsigned_flag)
@@ -1837,15 +1844,14 @@ int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs)
error= 1;
}
else
- tmp=(longlong) my_strntoull(cs,from,len,NULL,10);
+ tmp=(longlong) my_strntoull(cs,from,len,&end,10);
}
else
- tmp=my_strntoll(cs,from,len,NULL,10);
- if (errno || current_thd->count_cuted_fields && !test_if_int(from,len))
- {
- current_thd->cuted_fields++;
- error= 1;
- }
+ tmp=my_strntoll(cs,from,len,&end,10);
+ if (errno ||
+ (from+len != end && current_thd->count_cuted_fields &&
+ !test_if_int(from,len)))
+ current_thd->cuted_fields++;
#ifdef WORDS_BIGENDIAN
if (table->db_low_byte_first)
{
diff --git a/sql/field.h b/sql/field.h
index 9fc72cf56ec..16929a363dd 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1069,7 +1069,7 @@ Field *make_field(char *ptr, uint32 field_length,
uint pack_length_to_packflag(uint type);
uint32 calc_pack_length(enum_field_types type,uint32 length);
bool set_field_to_null(Field *field);
-bool set_field_to_null_with_conversions(Field *field);
+bool set_field_to_null_with_conversions(Field *field, bool no_conversions);
uint find_enum(TYPELIB *typelib,const char *x, uint length);
ulonglong find_set(TYPELIB *typelib,const char *x, uint length);
bool test_if_int(const char *str,int length);
diff --git a/sql/field_conv.cc b/sql/field_conv.cc
index ab71f324732..409c22d61d4 100644
--- a/sql/field_conv.cc
+++ b/sql/field_conv.cc
@@ -122,8 +122,26 @@ set_field_to_null(Field *field)
}
+/*
+ Set field to NULL or TIMESTAMP or to next auto_increment number
+
+ SYNOPSIS
+ set_field_to_null_with_conversions()
+ field Field to update
+ no_conversion Set to 1 if we should return 1 if field can't
+ take null values.
+ If set to 0 we will do store the 'default value'
+ if the field is a special field. If not we will
+ give an error.
+
+ RETURN VALUES
+ 0 Field could take 0 or an automatic conversion was used
+ 1 Field could not take NULL and no conversion was used.
+ If no_conversion was not set, an error message is printed
+*/
+
bool
-set_field_to_null_with_conversions(Field *field)
+set_field_to_null_with_conversions(Field *field, bool no_conversions)
{
if (field->real_maybe_null())
{
@@ -131,6 +149,8 @@ set_field_to_null_with_conversions(Field *field)
field->reset();
return 0;
}
+ if (no_conversions)
+ return 1;
/*
Check if this is a special type, which will get a special walue
@@ -156,8 +176,6 @@ set_field_to_null_with_conversions(Field *field)
}
-
-
static void do_skip(Copy_field *copy __attribute__((unused)))
{
}
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 7787b543f34..14810bada31 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -90,10 +90,11 @@ long innobase_mirrored_log_groups, innobase_log_files_in_group,
are determined in innobase_init below: */
char* innobase_data_home_dir = NULL;
+char* innobase_data_file_path = NULL;
char* innobase_log_group_home_dir = NULL;
char* innobase_log_arch_dir = NULL;
-/* The following has a midleading name: starting from 4.0.5 this also
-affects Windows */
+/* The following has a misleading name: starting from 4.0.5, this also
+affects Windows: */
char* innobase_unix_file_flush_method = NULL;
/* Below we have boolean-valued start-up parameters, and their default
@@ -104,14 +105,7 @@ my_bool innobase_log_archive = FALSE;
my_bool innobase_use_native_aio = FALSE;
my_bool innobase_fast_shutdown = TRUE;
-/*
- Set default InnoDB data file size to 10 MB and let it be
- auto-extending. Thus users can use InnoDB without having to
- specify any startup options.
-*/
-
-char *innobase_data_file_path= (char*) "ibdata1:10M:autoextend";
-static char *internal_innobase_data_file_path=0;
+static char *internal_innobase_data_file_path = NULL;
/* The following counter is used to convey information to InnoDB
about server activity: in selects it is not sensible to call
@@ -650,46 +644,59 @@ innobase_init(void)
DBUG_ENTER("innobase_init");
- os_innodb_umask = (ulint)my_umask;
+ os_innodb_umask = (ulint)my_umask;
- /*
- When using the embedded server, the datadirectory is not
- in the current directory.
- */
- if (mysql_embedded)
- default_path=mysql_real_data_home;
- else
- {
- /* It's better to use current lib, to keep path's short */
- current_dir[0] = FN_CURLIB;
- current_dir[1] = FN_LIBCHAR;
- current_dir[2] = 0;
- default_path=current_dir;
+ /* First calculate the default path for innodb_data_home_dir etc.,
+ in case the user has not given any value.
+
+ Note that when using the embedded server, the datadirectory is not
+ necessarily the current directory of this program. */
+
+ if (mysql_embedded) {
+ default_path = mysql_real_data_home;
+ } else {
+ /* It's better to use current lib, to keep paths short */
+ current_dir[0] = FN_CURLIB;
+ current_dir[1] = FN_LIBCHAR;
+ current_dir[2] = 0;
+ default_path = current_dir;
}
+ ut_a(default_path);
+
if (specialflag & SPECIAL_NO_PRIOR) {
srv_set_thread_priorities = FALSE;
} else {
srv_set_thread_priorities = TRUE;
srv_query_thread_priority = QUERY_PRIOR;
}
+
+ /* Set InnoDB initialization parameters according to the values
+ read from MySQL .cnf file */
- /*
- Set InnoDB initialization parameters according to the values
- read from MySQL .cnf file
- */
+ /*--------------- Data files -------------------------*/
- // Make a copy of innobase_data_file_path to not modify the original
- internal_innobase_data_file_path=my_strdup(innobase_data_file_path,
- MYF(MY_WME));
+ /* The default dir for data files is the datadir of MySQL */
srv_data_home = (innobase_data_home_dir ? innobase_data_home_dir :
default_path);
- srv_arch_dir = (innobase_log_arch_dir ? innobase_log_arch_dir :
- default_path);
- ret = (bool)
- srv_parse_data_file_paths_and_sizes(internal_innobase_data_file_path,
+ /* Set default InnoDB data file size to 10 MB and let it be
+ auto-extending. Thus users can use InnoDB in >= 4.0 without having
+ to specify any startup options. */
+
+ if (!innobase_data_file_path) {
+ innobase_data_file_path = (char*) "ibdata1:10M:autoextend";
+ }
+
+ /* Since InnoDB edits the argument in the next call, we make another
+ copy of it: */
+
+ internal_innobase_data_file_path = my_strdup(innobase_data_file_path,
+ MYF(MY_WME));
+
+ ret = (bool) srv_parse_data_file_paths_and_sizes(
+ internal_innobase_data_file_path,
&srv_data_file_names,
&srv_data_file_sizes,
&srv_data_file_is_raw_partition,
@@ -697,12 +704,26 @@ innobase_init(void)
&srv_auto_extend_last_data_file,
&srv_last_file_size_max);
if (ret == FALSE) {
- sql_print_error("InnoDB: syntax error in innodb_data_file_path");
- DBUG_RETURN(TRUE);
+ sql_print_error(
+ "InnoDB: syntax error in innodb_data_file_path");
+ DBUG_RETURN(TRUE);
}
- if (!innobase_log_group_home_dir)
- innobase_log_group_home_dir= default_path;
+ /* -------------- Log files ---------------------------*/
+
+ /* The default dir for log files is the datadir of MySQL */
+
+ if (!innobase_log_group_home_dir) {
+ innobase_log_group_home_dir = default_path;
+ }
+
+ /* Since innodb_log_arch_dir has no relevance under MySQL,
+ starting from 4.0.6 we always set it the same as
+ innodb_log_group_home_dir: */
+
+ innobase_log_arch_dir = innobase_log_group_home_dir;
+
+ srv_arch_dir = innobase_log_arch_dir;
ret = (bool)
srv_parse_log_group_home_dirs(innobase_log_group_home_dir,
@@ -716,9 +737,9 @@ innobase_init(void)
DBUG_RETURN(TRUE);
}
- srv_file_flush_method_str = (innobase_unix_file_flush_method ?
- innobase_unix_file_flush_method :
- NULL);
+ /* --------------------------------------------------*/
+
+ srv_file_flush_method_str = innobase_unix_file_flush_method;
srv_n_log_groups = (ulint) innobase_mirrored_log_groups;
srv_n_log_files = (ulint) innobase_log_files_in_group;
@@ -741,7 +762,9 @@ innobase_init(void)
srv_fast_shutdown = (ibool) innobase_fast_shutdown;
srv_print_verbose_log = mysql_embedded ? 0 : 1;
+
if (strcmp(default_charset_info->name, "latin1") == 0) {
+
/* Store the character ordering table to InnoDB.
For non-latin1 charsets we use the MySQL comparison
functions, and consequently we do not need to know
@@ -4179,3 +4202,4 @@ ha_innobase::get_auto_increment()
}
#endif /* HAVE_INNOBASE_DB */
+
diff --git a/sql/handler.cc b/sql/handler.cc
index cdd007f2cc2..a36a77484e5 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -883,13 +883,13 @@ int ha_create_table(const char *name, HA_CREATE_INFO *create_info,
void ha_key_cache(void)
{
if (keybuff_size)
- (void) init_key_cache(keybuff_size);
+ (void) init_key_cache((ulong) keybuff_size);
}
void ha_resize_key_cache(void)
{
- (void) resize_key_cache(keybuff_size);
+ (void) resize_key_cache((ulong) keybuff_size);
}
diff --git a/sql/item.cc b/sql/item.cc
index 1c46f9abb7e..875eeb4d940 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -701,7 +701,7 @@ void Item_field::save_org_in_field(Field *to)
if (field->is_null())
{
null_value=1;
- set_field_to_null_with_conversions(to);
+ set_field_to_null_with_conversions(to, 1);
}
else
{
@@ -711,12 +711,12 @@ void Item_field::save_org_in_field(Field *to)
}
}
-int Item_field::save_in_field(Field *to)
+int Item_field::save_in_field(Field *to, bool no_conversions)
{
if (result_field->is_null())
{
null_value=1;
- return set_field_to_null_with_conversions(to);
+ return set_field_to_null_with_conversions(to, no_conversions);
}
else
{
@@ -744,9 +744,9 @@ int Item_field::save_in_field(Field *to)
1 Field doesn't support NULL values and can't handle 'field = NULL'
*/
-int Item_null::save_in_field(Field *field)
+int Item_null::save_in_field(Field *field, bool no_conversions)
{
- return set_field_to_null_with_conversions(field);
+ return set_field_to_null_with_conversions(field, no_conversions);
}
@@ -768,7 +768,7 @@ int Item_null::save_safe_in_field(Field *field)
}
-int Item::save_in_field(Field *field)
+int Item::save_in_field(Field *field, bool no_conversions)
{
int error;
if (result_type() == STRING_RESULT ||
@@ -781,7 +781,7 @@ int Item::save_in_field(Field *field)
str_value.set_quick(buff,sizeof(buff),cs);
result=val_str(&str_value);
if (null_value)
- return set_field_to_null_with_conversions(field);
+ return set_field_to_null_with_conversions(field, no_conversions);
field->set_notnull();
error=field->store(result->ptr(),result->length(),cs);
str_value.set_quick(0, 0, cs);
@@ -798,14 +798,15 @@ int Item::save_in_field(Field *field)
{
longlong nr=val_int();
if (null_value)
- return set_field_to_null_with_conversions(field);
+ return set_field_to_null_with_conversions(field, no_conversions);
field->set_notnull();
error=field->store(nr);
}
return (error) ? -1 : 0;
}
-int Item_string::save_in_field(Field *field)
+
+int Item_string::save_in_field(Field *field, bool no_conversions)
{
String *result;
result=val_str(&str_value);
@@ -815,7 +816,8 @@ int Item_string::save_in_field(Field *field)
return (field->store(result->ptr(),result->length(),charset())) ? -1 : 0;
}
-int Item_int::save_in_field(Field *field)
+
+int Item_int::save_in_field(Field *field, bool no_conversions)
{
longlong nr=val_int();
if (null_value)
@@ -824,7 +826,8 @@ int Item_int::save_in_field(Field *field)
return (field->store(nr)) ? -1 : 0;
}
-int Item_real::save_in_field(Field *field)
+
+int Item_real::save_in_field(Field *field, bool no_conversions)
{
double nr=val();
if (null_value)
@@ -877,7 +880,7 @@ longlong Item_varbinary::val_int()
}
-int Item_varbinary::save_in_field(Field *field)
+int Item_varbinary::save_in_field(Field *field, bool no_conversions)
{
int error;
field->set_notnull();
@@ -1030,9 +1033,10 @@ bool Item_ref::check_loop(uint id)
DBUG_RETURN((*ref)->check_loop(id));
}
+
/*
-** If item is a const function, calculate it and return a const item
-** The original item is freed if not returned
+ If item is a const function, calculate it and return a const item
+ The original item is freed if not returned
*/
Item_result item_cmp_type(Item_result a,Item_result b)
diff --git a/sql/item.h b/sql/item.h
index 11b141613f3..5321c5874a4 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -54,11 +54,11 @@ public:
void set_name(const char *str,uint length=0);
void init_make_field(Send_field *tmp_field,enum enum_field_types type);
virtual bool fix_fields(THD *, struct st_table_list *, Item **);
- virtual int save_in_field(Field *field);
+ virtual int save_in_field(Field *field, bool no_conversions);
virtual void save_org_in_field(Field *field)
- { (void) save_in_field(field); }
+ { (void) save_in_field(field, 1); }
virtual int save_safe_in_field(Field *field)
- { return save_in_field(field); }
+ { return save_in_field(field, 1); }
virtual bool send(THD *thd, String *str);
virtual bool eq(const Item *, bool binary_cmp) const;
virtual Item_result result_type () const { return REAL_RESULT; }
@@ -194,7 +194,7 @@ public:
}
void make_field(Send_field *field);
bool fix_fields(THD *, struct st_table_list *, Item **);
- int save_in_field(Field *field);
+ int save_in_field(Field *field,bool no_conversions);
void save_org_in_field(Field *field);
table_map used_tables() const;
enum Item_result result_type () const
@@ -219,7 +219,7 @@ public:
longlong val_int();
String *val_str(String *str);
void make_field(Send_field *field);
- int save_in_field(Field *field);
+ int save_in_field(Field *field, bool no_conversions);
int save_safe_in_field(Field *field);
enum Item_result result_type () const
{ return STRING_RESULT; }
@@ -289,7 +289,7 @@ public:
double val() { return (double) value; }
String *val_str(String*);
void make_field(Send_field *field);
- int save_in_field(Field *field);
+ int save_in_field(Field *field, bool no_conversions);
bool basic_const_item() const { return 1; }
Item *new_item() { return new Item_int(name,value,max_length); }
void print(String *str);
@@ -329,7 +329,7 @@ public:
max_length=length;
}
Item_real(double value_par) :value(value_par) {}
- int save_in_field(Field *field);
+ int save_in_field(Field *field, bool no_conversions);
enum Type type() const { return REAL_ITEM; }
double val() { return value; }
longlong val_int() { return (longlong) (value+(value > 0 ? 0.5 : -0.5));}
@@ -372,7 +372,7 @@ public:
double val() { return atof(str_value.ptr()); }
longlong val_int() { return strtoll(str_value.ptr(),(char**) 0,10); }
String *val_str(String*) { return (String*) &str_value; }
- int save_in_field(Field *field);
+ int save_in_field(Field *field, bool no_conversions);
void make_field(Send_field *field);
enum Item_result result_type () const { return STRING_RESULT; }
bool basic_const_item() const { return 1; }
@@ -392,7 +392,7 @@ public:
Item_default() { name= (char*) "DEFAULT"; }
enum Type type() const { return DEFAULT_ITEM; }
void make_field(Send_field *field) {}
- int save_in_field(Field *field)
+ int save_in_field(Field *field, bool no_conversions)
{
field->set_default();
return 0;
@@ -430,7 +430,7 @@ public:
double val() { return (double) Item_varbinary::val_int(); }
longlong val_int();
String *val_str(String*) { return &str_value; }
- int save_in_field(Field *field);
+ int save_in_field(Field *field, bool no_conversions);
void make_field(Send_field *field);
enum Item_result result_type () const { return INT_RESULT; }
};
@@ -490,7 +490,8 @@ public:
bool send(THD *thd, String *tmp) { return (*ref)->send(thd, tmp); }
void make_field(Send_field *field) { (*ref)->make_field(field); }
bool fix_fields(THD *, struct st_table_list *, Item **);
- int save_in_field(Field *field) { return (*ref)->save_in_field(field); }
+ int save_in_field(Field *field, bool no_conversions)
+ { return (*ref)->save_in_field(field, no_conversions); }
void save_org_in_field(Field *field) { (*ref)->save_org_in_field(field); }
enum Item_result result_type () const { return (*ref)->result_type(); }
table_map used_tables() const { return (*ref)->used_tables(); }
@@ -510,9 +511,9 @@ class Item_int_with_ref :public Item_int
public:
Item_int_with_ref(longlong i, Item *ref_arg) :Item_int(i), ref(ref_arg)
{}
- int save_in_field(Field *field)
+ int save_in_field(Field *field, bool no_conversions)
{
- return ref->save_in_field(field);
+ return ref->save_in_field(field, no_conversions);
}
};
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 146758600c0..1c72ee56212 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -77,7 +77,7 @@ static bool convert_constant_item(Field *field, Item **item)
{
if ((*item)->const_item() && (*item)->type() != Item::INT_ITEM)
{
- if (!(*item)->save_in_field(field) && !((*item)->null_value))
+ if (!(*item)->save_in_field(field, 1) && !((*item)->null_value))
{
Item *tmp=new Item_int_with_ref(field->val_int(), *item);
if (tmp)
@@ -519,15 +519,29 @@ longlong Item_func_between::val_int()
return 0;
}
+static Item_result item_store_type(Item_result a,Item_result b)
+{
+ if (a == STRING_RESULT || b == STRING_RESULT)
+ return STRING_RESULT;
+ else if (a == REAL_RESULT || b == REAL_RESULT)
+ return REAL_RESULT;
+ else
+ return INT_RESULT;
+}
+
void
Item_func_ifnull::fix_length_and_dec()
{
maybe_null=args[1]->maybe_null;
max_length=max(args[0]->max_length,args[1]->max_length);
decimals=max(args[0]->decimals,args[1]->decimals);
- cached_result_type=args[0]->result_type();
+ if ((cached_result_type=item_store_type(args[0]->result_type(),
+ args[1]->result_type())) !=
+ REAL_RESULT)
+ decimals= 0;
}
+
double
Item_func_ifnull::val()
{
@@ -1163,6 +1177,18 @@ void Item_func_in::update_used_tables()
const_item_cache&=item->const_item();
}
+void Item_func_in::split_sum_func(List<Item> &fields)
+{
+ if (item->with_sum_func && item->type() != SUM_FUNC_ITEM)
+ item->split_sum_func(fields);
+ else if (item->used_tables() || item->type() == SUM_FUNC_ITEM)
+ {
+ fields.push_front(item);
+ item=new Item_ref((Item**) fields.head_ref(),0,item->name);
+ }
+ Item_func::split_sum_func(fields);
+}
+
longlong Item_func_bit_or::val_int()
{
@@ -1394,15 +1420,15 @@ longlong Item_cond_or::val_int()
Item *and_expressions(Item *a, Item *b, Item **org_item)
{
if (!a)
- return (*org_item= b);
+ return (*org_item= (Item*) b);
if (a == *org_item)
{
Item_cond *res;
- if ((res= new Item_cond_and(a, b)))
+ if ((res= new Item_cond_and(a, (Item*) b)))
res->used_tables_cache= a->used_tables() | b->used_tables();
return res;
}
- if (((Item_cond_and*) a)->add(b))
+ if (((Item_cond_and*) a)->add((Item*) b))
return 0;
((Item_cond_and*) a)->used_tables_cache|= b->used_tables();
return a;
@@ -1797,7 +1823,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
i -= u;
}
if (i < 0)
- return true;
+ return 1;
register const int v = plm1 - i;
turboShift = u - v;
@@ -1814,7 +1840,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
}
j += shift;
}
- return false;
+ return 0;
}
else
{
@@ -1828,7 +1854,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
i -= u;
}
if (i < 0)
- return true;
+ return 1;
register const int v = plm1 - i;
turboShift = u - v;
@@ -1845,7 +1871,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
}
j += shift;
}
- return false;
+ return 0;
}
}
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 74c9dec7ef8..d3e83a55add 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -435,9 +435,11 @@ class Item_func_in :public Item_int_func
longlong val_int();
bool fix_fields(THD *thd, struct st_table_list *tlist, Item **ref)
{
- return (item->check_cols(1) ||
- item->fix_fields(thd, tlist, &item) ||
- Item_func::fix_fields(thd, tlist, ref));
+ bool res=(item->check_cols(1) ||
+ item->fix_fields(thd, tlist, &item) ||
+ Item_func::fix_fields(thd, tlist, ref));
+ with_sum_func= with_sum_func || item->with_sum_func;
+ return res;
}
void fix_length_and_dec();
~Item_func_in() { delete item; delete array; delete in_item; }
@@ -448,6 +450,7 @@ class Item_func_in :public Item_int_func
enum Functype functype() const { return IN_FUNC; }
const char *func_name() const { return " IN "; }
void update_used_tables();
+ void split_sum_func(List<Item> &fields);
bool check_loop(uint id)
{
DBUG_ENTER("Item_func_in::check_loop");
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 1611b5f2257..c84b554b522 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -779,21 +779,20 @@ double Item_func_round::val()
}
-double Item_func_rand::val()
+void Item_func_rand::fix_length_and_dec()
{
- THD* thd = current_thd;
+ decimals=NOT_FIXED_DEC;
+ max_length=float_length(decimals);
if (arg_count)
{ // Only use argument once in query
uint32 tmp= (uint32) (args[0]->val_int());
- randominit(&thd->rand,(uint32) (tmp*0x10001L+55555555L),
- (uint32) (tmp*0x10000001L));
-#ifdef DELETE_ITEMS
- delete args[0];
-#endif
- arg_count=0;
+ if ((rand= (struct rand_struct*) sql_alloc(sizeof(*rand))))
+ randominit(rand,(uint32) (tmp*0x10001L+55555555L),
+ (uint32) (tmp*0x10000001L));
}
- else if (!thd->rand_used)
+ else
{
+ THD *thd= current_thd;
/*
No need to send a Rand log event if seed was given eg: RAND(seed),
as it will be replicated in the query as such.
@@ -805,8 +804,14 @@ double Item_func_rand::val()
thd->rand_used=1;
thd->rand_saved_seed1=thd->rand.seed1;
thd->rand_saved_seed2=thd->rand.seed2;
+ rand= &thd->rand;
}
- return rnd(&thd->rand);
+}
+
+
+double Item_func_rand::val()
+{
+ return rnd(rand);
}
longlong Item_func_sign::val_int()
diff --git a/sql/item_func.h b/sql/item_func.h
index 67c088f2bd9..771881a0465 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -522,14 +522,15 @@ public:
class Item_func_rand :public Item_real_func
{
+ struct rand_struct *rand;
public:
Item_func_rand(Item *a) :Item_real_func(a) {}
Item_func_rand() :Item_real_func() {}
double val();
const char *func_name() const { return "rand"; }
- void fix_length_and_dec() { decimals=NOT_FIXED_DEC; max_length=float_length(decimals); }
bool const_item() const { return 0; }
table_map used_tables() const { return RAND_TABLE_BIT; }
+ void fix_length_and_dec();
};
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 7c085a1b25a..7e2e8f7cfbd 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -429,7 +429,7 @@ String *Item_date::val_str(String *str)
}
-int Item_date::save_in_field(Field *field)
+int Item_date::save_in_field(Field *field, bool no_conversions)
{
TIME ltime;
timestamp_type t_type=TIMESTAMP_FULL;
@@ -567,7 +567,7 @@ bool Item_func_now::get_date(TIME *res,
}
-int Item_func_now::save_in_field(Field *to)
+int Item_func_now::save_in_field(Field *to, bool no_conversions)
{
to->set_notnull();
to->store_time(&ltime,TIMESTAMP_FULL);
diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h
index f9b987324f0..40397351c18 100644
--- a/sql/item_timefunc.h
+++ b/sql/item_timefunc.h
@@ -325,7 +325,7 @@ public:
decimals=0;
max_length=10*thd_charset()->mbmaxlen;
}
- int save_in_field(Field *to);
+ int save_in_field(Field *to, bool no_conversions);
void make_field(Send_field *tmp_field)
{
init_make_field(tmp_field,FIELD_TYPE_DATE);
@@ -406,7 +406,7 @@ public:
enum Item_result result_type () const { return STRING_RESULT; }
double val() { return (double) value; }
longlong val_int() { return value; }
- int save_in_field(Field *to);
+ int save_in_field(Field *to, bool no_conversions);
String *val_str(String *str);
const char *func_name() const { return "now"; }
void fix_length_and_dec();
diff --git a/sql/log.cc b/sql/log.cc
index 0e1af8e5dae..dc7b5789efb 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -1062,40 +1062,44 @@ bool MYSQL_LOG::write(Log_event* event_info)
No check for auto events flag here - this write method should
never be called if auto-events are enabled
*/
- if (thd && thd->last_insert_id_used)
+ if (thd)
{
- Intvar_log_event e(thd,(uchar)LAST_INSERT_ID_EVENT,thd->last_insert_id);
- e.set_log_pos(this);
- if (thd->server_id)
- e.server_id = thd->server_id;
- if (e.write(file))
- goto err;
- }
- if (thd && thd->insert_id_used)
- {
- Intvar_log_event e(thd,(uchar)INSERT_ID_EVENT,thd->last_insert_id);
- e.set_log_pos(this);
- if (thd->server_id)
- e.server_id = thd->server_id;
- if (e.write(file))
- goto err;
- }
- if (thd && thd->rand_used)
- {
- Rand_log_event e(thd,thd->rand_saved_seed1,thd->rand_saved_seed2);
- e.set_log_pos(this);
- if (e.write(file))
- goto err;
- }
- if (thd && thd->variables.convert_set)
- {
- char buf[1024] = "SET CHARACTER SET ";
- char* p = strend(buf);
- p = strmov(p, thd->variables.convert_set->name);
- Query_log_event e(thd, buf, (ulong)(p - buf), 0);
- e.set_log_pos(this);
- if (e.write(file))
- goto err;
+ if (thd->last_insert_id_used)
+ {
+ Intvar_log_event e(thd,(uchar) LAST_INSERT_ID_EVENT,
+ thd->last_insert_id);
+ e.set_log_pos(this);
+ if (thd->server_id)
+ e.server_id = thd->server_id;
+ if (e.write(file))
+ goto err;
+ }
+ if (thd->insert_id_used)
+ {
+ Intvar_log_event e(thd,(uchar) INSERT_ID_EVENT,thd->last_insert_id);
+ e.set_log_pos(this);
+ if (thd->server_id)
+ e.server_id = thd->server_id;
+ if (e.write(file))
+ goto err;
+ }
+ if (thd->rand_used)
+ {
+ Rand_log_event e(thd,thd->rand_saved_seed1,thd->rand_saved_seed2);
+ e.set_log_pos(this);
+ if (e.write(file))
+ goto err;
+ }
+ if (thd->variables.convert_set)
+ {
+ char buf[256], *p;
+ p= strmov(strmov(buf, "SET CHARACTER SET "),
+ thd->variables.convert_set->name);
+ Query_log_event e(thd, buf, (ulong) (p - buf), 0);
+ e.set_log_pos(this);
+ if (e.write(file))
+ goto err;
+ }
}
event_info->set_log_pos(this);
if (event_info->write(file) ||
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index f4b556248da..cb9e3a362b4 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -249,6 +249,20 @@ typedef struct st_sql_list {
uint elements;
byte *first;
byte **next;
+
+ inline void empty()
+ {
+ elements=0;
+ first=0;
+ next= &first;
+ }
+ inline void link_in_list(byte *element,byte **next_ptr)
+ {
+ elements++;
+ (*next)=element;
+ next= next_ptr;
+ *next=0;
+ }
} SQL_LIST;
@@ -443,6 +457,10 @@ int mysql_update(THD *thd,TABLE_LIST *tables,List<Item> &fields,
List<Item> &values,COND *conds,
ORDER *order, ha_rows limit,
enum enum_duplicates handle_duplicates);
+int mysql_multi_update(THD *thd, TABLE_LIST *table_list,
+ List<Item> *fields, List<Item> *values,
+ COND *conds, ulong options,
+ enum enum_duplicates handle_duplicates);
int mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields,
List<List_item> &values, List<Item> &update_fields,
List<Item> &update_values, enum_duplicates flag);
@@ -545,7 +563,7 @@ void store_position_for_column(const char *name);
bool add_to_list(SQL_LIST &list,Item *group,bool asc=0);
void add_join_on(TABLE_LIST *b,Item *expr);
void add_join_natural(TABLE_LIST *a,TABLE_LIST *b);
-bool add_proc_to_list(Item *item);
+bool add_proc_to_list(THD *thd, Item *item);
TABLE *unlink_open_table(THD *thd,TABLE *list,TABLE *find);
SQL_SELECT *make_select(TABLE *head, table_map const_tables,
@@ -664,6 +682,7 @@ extern char glob_hostname[FN_REFLEN], mysql_home[FN_REFLEN];
extern char pidfile_name[FN_REFLEN], time_zone[30], *opt_init_file;
extern char blob_newline;
extern double log_10[32];
+extern ulonglong keybuff_size;
extern ulong refresh_version,flush_version, thread_id,query_id,opened_tables;
extern ulong created_tmp_tables, created_tmp_disk_tables;
extern ulong aborted_threads,aborted_connects;
@@ -682,8 +701,7 @@ extern ulong ha_read_count, ha_write_count, ha_delete_count, ha_update_count;
extern ulong ha_read_key_count, ha_read_next_count, ha_read_prev_count;
extern ulong ha_read_first_count, ha_read_last_count;
extern ulong ha_read_rnd_count, ha_read_rnd_next_count;
-extern ulong ha_commit_count, ha_rollback_count;
-extern ulong keybuff_size,table_cache_size;
+extern ulong ha_commit_count, ha_rollback_count,table_cache_size;
extern ulong max_connections,max_connect_errors, connect_timeout;
extern ulong max_insert_delayed_threads, max_user_connections;
extern ulong long_query_count, what_to_log,flush_time,opt_sql_mode;
@@ -732,6 +750,7 @@ extern SHOW_COMP_OPTION have_innodb;
extern SHOW_COMP_OPTION have_berkeley_db;
extern struct system_variables global_system_variables;
extern struct system_variables max_system_variables;
+extern struct rand_struct sql_rand;
/* optional things, have_* variables */
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 3df75b3643d..81676b61b1f 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -32,6 +32,7 @@
#include <nisam.h>
#include <thr_alarm.h>
#include <ft_global.h>
+#include <assert.h>
#ifndef DBUG_OFF
#define ONE_THREAD
@@ -322,7 +323,8 @@ ulong thd_startup_options=(OPTION_UPDATE_LOG | OPTION_AUTO_IS_NULL |
uint protocol_version=PROTOCOL_VERSION;
struct system_variables global_system_variables;
struct system_variables max_system_variables;
-ulong keybuff_size,table_cache_size,
+ulonglong keybuff_size;
+ulong table_cache_size,
thread_stack,
thread_stack_min,what_to_log= ~ (1L << (uint) COM_TIME),
query_buff_size,
@@ -451,7 +453,7 @@ pthread_attr_t connection_attrib;
#include <process.h>
#if !defined(EMBEDDED_LIBRARY)
HANDLE hEventShutdown;
-static char *event_name;
+static char shutdown_event_name[40];
#include "nt_servc.h"
static NTService Service; // Service object for WinNT
#endif
@@ -1019,6 +1021,7 @@ static void set_root(const char *path)
sql_perror("chroot");
unireg_abort(1);
}
+ my_setwd("/", MYF(0));
#endif
}
@@ -1391,7 +1394,7 @@ or misconfigured. This error can also be caused by malfunctioning hardware.\n",
We will try our best to scrape up some info that will hopefully help diagnose\n\
the problem, but since we have already crashed, something is definitely wrong\n\
and this may fail.\n\n");
- fprintf(stderr, "key_buffer_size=%ld\n", keybuff_size);
+ fprintf(stderr, "key_buffer_size=%lu\n", (ulong) keybuff_size);
fprintf(stderr, "read_buffer_size=%ld\n", global_system_variables.read_buff_size);
fprintf(stderr, "sort_buffer_size=%ld\n", thd->variables.sortbuff_size);
fprintf(stderr, "max_used_connections=%ld\n", max_used_connections);
@@ -1399,8 +1402,9 @@ and this may fail.\n\n");
fprintf(stderr, "threads_connected=%d\n", thread_count);
fprintf(stderr, "It is possible that mysqld could use up to \n\
key_buffer_size + (read_buffer_size + sort_buffer_size)*max_connections = %ld K\n\
-bytes of memory\n", (keybuff_size + (global_system_variables.read_buff_size +
- thd->variables.sortbuff_size) *
+bytes of memory\n", ((ulong) keybuff_size +
+ (global_system_variables.read_buff_size +
+ thd->variables.sortbuff_size) *
max_connections)/ 1024);
fprintf(stderr, "Hope that's ok; if not, decrease some variables in the equation.\n\n");
@@ -2102,6 +2106,7 @@ int main(int argc, char **argv)
(void) grant_init((THD*) 0);
init_max_user_conn();
init_update_queries();
+ DBUG_ASSERT(current_thd == 0);
#ifdef HAVE_DLOPEN
if (!opt_noacl)
@@ -2110,6 +2115,7 @@ int main(int argc, char **argv)
/* init_slave() must be called after the thread keys are created */
init_slave();
+ DBUG_ASSERT(current_thd == 0);
if (opt_bin_log && !server_id)
{
server_id= !master_host ? 1 : 2;
@@ -2346,6 +2352,14 @@ bool default_service_handling(char **argv,
int main(int argc, char **argv)
{
+
+ /* When several instances are running on the same machine, we
+ need to have an unique named hEventShudown through the
+ application PID e.g.: MySQLShutdown1890; MySQLShutdown2342
+ */
+ int2str((int) GetCurrentProcessId(),strmov(shutdown_event_name,
+ "MySQLShutdown"), 10);
+
if (Service.GetOS()) /* true NT family */
{
char file_path[FN_REFLEN];
@@ -2360,10 +2374,9 @@ int main(int argc, char **argv)
if (Service.IsService(argv[1]))
{
/* start an optional service */
- event_name= argv[1];
- load_default_groups[0]= argv[1];
+ load_default_groups[0]= argv[1];
start_mode= 1;
- Service.Init(event_name, mysql_service);
+ Service.Init(argv[1], mysql_service);
return 0;
}
}
@@ -2382,9 +2395,8 @@ int main(int argc, char **argv)
use_opt_args=1;
opt_argc=argc;
opt_argv=argv;
- event_name= argv[2];
start_mode= 1;
- Service.Init(event_name, mysql_service);
+ Service.Init(argv[2], mysql_service);
return 0;
}
}
@@ -2404,7 +2416,6 @@ int main(int argc, char **argv)
{
/* start the default service */
start_mode= 1;
- event_name= "MySqlShutdown";
Service.Init(MYSQL_SERVICENAME, mysql_service);
return 0;
}
@@ -3764,8 +3775,9 @@ struct my_option my_long_options[] =
IO_SIZE, 0},
{"key_buffer_size", OPT_KEY_BUFFER_SIZE,
"The size of the buffer used for index blocks. Increase this to get better index handling (for all reads and multiple writes) to as much as you can afford; 64M on a 256M machine that mainly runs MySQL is quite common.",
- (gptr*) &keybuff_size, (gptr*) &keybuff_size, 0, GET_ULONG, REQUIRED_ARG,
- KEY_CACHE_SIZE, MALLOC_OVERHEAD, (long) ~0, MALLOC_OVERHEAD, IO_SIZE, 0},
+ (gptr*) &keybuff_size, (gptr*) &keybuff_size, 0, GET_ULL,
+ REQUIRED_ARG, KEY_CACHE_SIZE, MALLOC_OVERHEAD, (long) ~0, MALLOC_OVERHEAD,
+ IO_SIZE, 0},
{"long_query_time", OPT_LONG_QUERY_TIME,
"Log all queries that have taken more than long_query_time seconds to execute to file.",
(gptr*) &global_system_variables.long_query_time,
@@ -4268,6 +4280,8 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
break;
case 'h':
strmake(mysql_real_data_home,argument, sizeof(mysql_real_data_home)-1);
+ /* Correct pointer set by my_getopt (for embedded library) */
+ mysql_data_home= mysql_real_data_home;
break;
case 'L':
strmake(language, argument, sizeof(language)-1);
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 0fad5769998..d76737e8e31 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -1019,7 +1019,7 @@ get_mm_leaf(PARAM *param, Field *field, KEY_PART *key_part,
field->cmp_type() != value->result_type())
DBUG_RETURN(0);
- if (value->save_in_field(field) > 0)
+ if (value->save_in_field(field, 1) > 0)
{
/* This happens when we try to insert a NULL field in a not null column */
// TODO; Check if we can we remove the following block.
@@ -1028,9 +1028,9 @@ get_mm_leaf(PARAM *param, Field *field, KEY_PART *key_part,
/* convert column_name <=> NULL -> column_name IS NULL */
// Get local copy of key
char *str= (char*) alloc_root(param->mem_root,1);
- if (!*str)
+ if (!str)
DBUG_RETURN(0);
- *str = 1;
+ *str= 1;
DBUG_RETURN(new SEL_ARG(field,str,str));
}
DBUG_RETURN(&null_element); // cmp with NULL is never true
diff --git a/sql/password.c b/sql/password.c
index 48181ea18e6..318c8e84db3 100644
--- a/sql/password.c
+++ b/sql/password.c
@@ -43,7 +43,7 @@
void randominit(struct rand_struct *rand_st,ulong seed1, ulong seed2)
{ /* For mysql 3.21.# */
#ifdef HAVE_purify
- bzero((char*) rand_st,sizeof(*rand_st)); /* Avoid UMC varnings */
+ bzero((char*) rand_st,sizeof(*rand_st)); /* Avoid UMC varnings */
#endif
rand_st->max_value= 0x3FFFFFFFL;
rand_st->max_value_dbl=(double) rand_st->max_value;
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 566ca6da860..691add191b2 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -122,7 +122,7 @@ sys_var_thd_ulong sys_interactive_timeout("interactive_timeout",
&SV::net_interactive_timeout);
sys_var_thd_ulong sys_join_buffer_size("join_buffer_size",
&SV::join_buff_size);
-sys_var_long_ptr sys_key_buffer_size("key_buffer_size",
+sys_var_ulonglong_ptr sys_key_buffer_size("key_buffer_size",
&keybuff_size,
fix_key_buffer_size);
sys_var_bool_ptr sys_local_infile("local_infile",
@@ -693,6 +693,23 @@ void sys_var_long_ptr::set_default(THD *thd, enum_var_type type)
}
+bool sys_var_ulonglong_ptr::update(THD *thd, set_var *var)
+{
+ ulonglong tmp= var->value->val_int();
+ if (option_limits)
+ *value= (ulonglong) getopt_ull_limit_value(tmp, option_limits);
+ else
+ *value= (ulonglong) tmp;
+ return 0;
+}
+
+
+void sys_var_ulonglong_ptr::set_default(THD *thd, enum_var_type type)
+{
+ *value= (ulonglong) option_limits->def_value;
+}
+
+
bool sys_var_bool_ptr::update(THD *thd, set_var *var)
{
*value= (my_bool) var->save_result.ulong_value;
diff --git a/sql/set_var.h b/sql/set_var.h
index de1e27e0da8..39a5995e30f 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -86,6 +86,22 @@ public:
};
+class sys_var_ulonglong_ptr :public sys_var
+{
+public:
+ ulonglong *value;
+ sys_var_ulonglong_ptr(const char *name_arg, ulonglong *value_ptr)
+ :sys_var(name_arg),value(value_ptr) {}
+ sys_var_ulonglong_ptr(const char *name_arg, ulonglong *value_ptr,
+ sys_after_update_func func)
+ :sys_var(name_arg,func), value(value_ptr) {}
+ bool update(THD *thd, set_var *var);
+ void set_default(THD *thd, enum_var_type type);
+ SHOW_TYPE type() { return SHOW_LONGLONG; }
+ byte *value_ptr(THD *thd, enum_var_type type) { return (byte*) value; }
+};
+
+
class sys_var_bool_ptr :public sys_var
{
public:
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 63affe5fde6..e2b36106fb0 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -2224,8 +2224,8 @@ static key_map get_key_map_from_key_list(TABLE *table,
}
/****************************************************************************
-** This just drops in all fields instead of current '*' field
-** Returns pointer to last inserted field if ok
+ This just drops in all fields instead of current '*' field
+ Returns pointer to last inserted field if ok
****************************************************************************/
bool
@@ -2239,21 +2239,26 @@ insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name,
for (; tables ; tables=tables->next)
{
TABLE *table=tables->table;
- if (grant_option && !thd->master_access &&
- check_grant_all_columns(thd,SELECT_ACL,table) )
- DBUG_RETURN(-1);
if (!table_name || (!strcmp(table_name,tables->alias) &&
(!db_name || !strcmp(tables->db,db_name))))
{
+ /* Ensure that we have access right to all columns */
+ if (grant_option && !thd->master_access &&
+ check_grant_all_columns(thd,SELECT_ACL,table) )
+ DBUG_RETURN(-1);
Field **ptr=table->field,*field;
thd->used_tables|=table->map;
while ((field = *ptr++))
{
Item_field *item= new Item_field(field);
if (!found++)
- (void) it->replace(item);
+ (void) it->replace(item); // Replace '*'
else
it->after(item);
+ /*
+ Mark if field used before in this select.
+ Used by 'insert' to verify if a field name is used twice
+ */
if (field->query_id == thd->query_id)
thd->dupp_field=field;
field->query_id=thd->query_id;
@@ -2377,7 +2382,7 @@ fill_record(List<Item> &fields,List<Item> &values)
while ((field=(Item_field*) f++))
{
value=v++;
- if (value->save_in_field(field->field) > 0)
+ if (value->save_in_field(field->field, 0) > 0)
DBUG_RETURN(1);
}
DBUG_RETURN(0);
@@ -2395,7 +2400,7 @@ fill_record(Field **ptr,List<Item> &values)
while ((field = *ptr++))
{
value=v++;
- if (value->save_in_field(field) == 1)
+ if (value->save_in_field(field, 0) == 1)
DBUG_RETURN(1);
}
DBUG_RETURN(0);
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 57cd0e7a13d..ebd1d9d2b3c 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -37,7 +37,6 @@
#include <mysys_err.h>
#include <assert.h>
-extern struct rand_struct sql_rand;
/*****************************************************************************
** Instansiate templates
@@ -172,9 +171,8 @@ THD::THD():user_time(0), fatal_error(0),
{
pthread_mutex_lock(&LOCK_thread_count);
ulong tmp=(ulong) (rnd(&sql_rand) * 3000000);
- randominit(&rand, tmp + (ulong) start_time,
- tmp + (ulong) thread_id);
pthread_mutex_unlock(&LOCK_thread_count);
+ randominit(&rand, tmp + (ulong) &rand, tmp + (ulong) ::query_id);
}
}
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 5326b66e56e..e04c92cffb0 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -679,7 +679,7 @@ public:
}
virtual bool send_fields(List<Item> &list,uint flag)=0;
virtual bool send_data(List<Item> &items)=0;
- virtual void initialize_tables (JOIN *join=0) {}
+ virtual bool initialize_tables (JOIN *join=0) { return 0; }
virtual void send_error(uint errcode,const char *err)
{
my_message(errcode, err, MYF(0));
@@ -743,10 +743,10 @@ class select_insert :public select_result {
List<Item> *fields;
ulonglong last_insert_id;
COPY_INFO info;
- uint save_time_stamp;
select_insert(TABLE *table_par,List<Item> *fields_par,enum_duplicates duplic)
- :table(table_par),fields(fields_par), last_insert_id(0), save_time_stamp(0) {
+ :table(table_par),fields(fields_par), last_insert_id(0)
+ {
bzero((char*) &info,sizeof(info));
info.handle_duplicates=duplic;
}
@@ -790,8 +790,8 @@ class select_union :public select_result {
public:
TABLE *table;
COPY_INFO info;
- uint save_time_stamp;
TMP_TABLE_PARAM *tmp_table_param;
+ bool not_describe;
select_union(TABLE *table_par);
~select_union();
@@ -923,58 +923,61 @@ public:
friend int unique_write_to_ptrs(gptr key, element_count count, Unique *unique);
};
- class multi_delete : public select_result {
- TABLE_LIST *delete_tables, *table_being_deleted;
+class multi_delete : public select_result
+{
+ TABLE_LIST *delete_tables, *table_being_deleted;
#ifdef SINISAS_STRIP
- IO_CACHE **tempfiles;
- byte *memory_lane;
+ IO_CACHE **tempfiles;
+ byte *memory_lane;
#else
- Unique **tempfiles;
+ Unique **tempfiles;
#endif
- THD *thd;
- ha_rows deleted;
- uint num_of_tables;
- int error;
- bool do_delete, transactional_tables, log_delayed, normal_tables;
- public:
- multi_delete(THD *thd, TABLE_LIST *dt, uint num_of_tables);
- ~multi_delete();
- int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
- bool send_fields(List<Item> &list,
- uint flag) { return 0; }
- bool send_data(List<Item> &items);
- void initialize_tables (JOIN *join);
- void send_error(uint errcode,const char *err);
- int do_deletes (bool from_send_error);
- bool send_eof();
- };
-
- class multi_update : public select_result {
- TABLE_LIST *update_tables, *table_being_updated;
- COPY_INFO *infos;
- TABLE **tmp_tables;
- THD *thd;
- ha_rows updated, found;
- List<Item> fields;
- List <Item> **fields_by_tables;
- enum enum_duplicates dupl;
- uint num_of_tables, num_fields, num_updated, *save_time_stamps, *field_sequence;
- int error;
- bool do_update, not_trans_safe;
- public:
- multi_update(THD *thd_arg, TABLE_LIST *ut, List<Item> &fs,
- enum enum_duplicates handle_duplicates,
- uint num);
- ~multi_update();
- int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
- bool send_fields(List<Item> &list,
+ THD *thd;
+ ha_rows deleted;
+ uint num_of_tables;
+ int error;
+ bool do_delete, transactional_tables, log_delayed, normal_tables;
+public:
+ multi_delete(THD *thd, TABLE_LIST *dt, uint num_of_tables);
+ ~multi_delete();
+ int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
+ bool send_fields(List<Item> &list,
uint flag) { return 0; }
- bool send_data(List<Item> &items);
- void initialize_tables (JOIN *join);
- void send_error(uint errcode,const char *err);
- int do_updates (bool from_send_error);
- bool send_eof();
- };
+ bool send_data(List<Item> &items);
+ bool initialize_tables (JOIN *join);
+ void send_error(uint errcode,const char *err);
+ int do_deletes (bool from_send_error);
+ bool send_eof();
+};
+
+
+class multi_update : public select_result
+{
+ TABLE_LIST *all_tables, *update_tables, *table_being_updated;
+ THD *thd;
+ TABLE **tmp_tables, *main_table;
+ TMP_TABLE_PARAM *tmp_table_param;
+ ha_rows updated, found;
+ List <Item> *fields, *values;
+ List <Item> **fields_for_table, **values_for_table;
+ uint table_count;
+ Copy_field *copy_field;
+ enum enum_duplicates handle_duplicates;
+ bool do_update, trans_safe, transactional_tables, log_delayed;
+
+public:
+ multi_update(THD *thd_arg, TABLE_LIST *ut, List<Item> *fields,
+ List<Item> *values, enum_duplicates handle_duplicates);
+ ~multi_update();
+ int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
+ bool send_fields(List<Item> &list, uint flag) { return 0; }
+ bool send_data(List<Item> &items);
+ bool initialize_tables (JOIN *join);
+ void send_error(uint errcode,const char *err);
+ int do_updates (bool from_send_error);
+ bool send_eof();
+};
+
class select_dumpvar :public select_result {
ha_rows row_count;
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index d35790da1b0..6440838ae94 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -226,12 +226,13 @@ cleanup:
extern "C" int refposcmp2(void* arg, const void *a,const void *b)
{
+ /* arg is a pointer to file->ref_length */
return memcmp(a,b, *(int*) arg);
}
multi_delete::multi_delete(THD *thd_arg, TABLE_LIST *dt,
uint num_of_tables_arg)
- : delete_tables (dt), thd(thd_arg), deleted(0),
+ : delete_tables(dt), thd(thd_arg), deleted(0),
num_of_tables(num_of_tables_arg), error(0),
do_delete(0), transactional_tables(0), log_delayed(0), normal_tables(0)
{
@@ -244,31 +245,22 @@ multi_delete::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
{
DBUG_ENTER("multi_delete::prepare");
unit= u;
- do_delete = true;
+ do_delete= 1;
thd->proc_info="deleting from main table";
-
- if (thd->options & OPTION_SAFE_UPDATES)
- {
- TABLE_LIST *table_ref;
- for (table_ref=delete_tables; table_ref; table_ref=table_ref->next)
- {
- TABLE *table=table_ref->table;
- if ((thd->options & OPTION_SAFE_UPDATES) && !table->quick_keys)
- {
- my_error(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,MYF(0));
- DBUG_RETURN(1);
- }
- }
- }
DBUG_RETURN(0);
}
-void
+bool
multi_delete::initialize_tables(JOIN *join)
{
- int counter=0;
TABLE_LIST *walk;
+ Unique **tempfiles_ptr;
+ DBUG_ENTER("initialize_tables");
+
+ if ((thd->options & OPTION_SAFE_UPDATES) && error_if_full_join(join))
+ DBUG_RETURN(1);
+
table_map tables_to_delete_from=0;
for (walk= delete_tables ; walk ; walk=walk->next)
tables_to_delete_from|= walk->table->map;
@@ -282,9 +274,10 @@ multi_delete::initialize_tables(JOIN *join)
{
/* We are going to delete from this table */
TABLE *tbl=walk->table=tab->table;
+ walk=walk->next;
/* Don't use KEYREAD optimization on this table */
tbl->no_keyread=1;
- walk=walk->next;
+ tbl->used_keys= 0;
if (tbl->file->has_transactions())
log_delayed= transactional_tables= 1;
else if (tbl->tmp_table != NO_TMP_TABLE)
@@ -294,19 +287,17 @@ multi_delete::initialize_tables(JOIN *join)
}
}
walk= delete_tables;
- walk->table->used_keys=0;
- for (walk=walk->next ; walk ; walk=walk->next, counter++)
+ tempfiles_ptr= tempfiles;
+ for (walk=walk->next ; walk ; walk=walk->next)
{
- tables_to_delete_from|= walk->table->map;
TABLE *table=walk->table;
- /* Don't use key read with MULTI-TABLE-DELETE */
- table->used_keys=0;
- tempfiles[counter] = new Unique (refposcmp2,
- (void *) &table->file->ref_length,
- table->file->ref_length,
- MEM_STRIP_BUF_SIZE);
+ *tempfiles_ptr++= new Unique (refposcmp2,
+ (void *) &table->file->ref_length,
+ table->file->ref_length,
+ MEM_STRIP_BUF_SIZE);
}
init_ftfuncs(thd, thd->lex.current_select->select_lex(), 1);
+ DBUG_RETURN(thd->fatal_error != 0);
}
@@ -321,7 +312,7 @@ multi_delete::~multi_delete()
t->no_keyread=0;
}
- for (uint counter = 0; counter < num_of_tables-1; counter++)
+ for (uint counter= 0; counter < num_of_tables-1; counter++)
{
if (tempfiles[counter])
delete tempfiles[counter];
@@ -428,7 +419,7 @@ int multi_delete::do_deletes(bool from_send_error)
else
table_being_deleted = delete_tables;
- do_delete = false;
+ do_delete= 0;
for (table_being_deleted=table_being_deleted->next;
table_being_deleted ;
table_being_deleted=table_being_deleted->next, counter++)
@@ -483,7 +474,7 @@ bool multi_delete::send_eof()
was a non-transaction-safe table involved, since
modifications in it cannot be rolled back.
*/
- if (deleted)
+ if (deleted && (error <= 0 || normal_tables))
{
mysql_update_log.write(thd,thd->query,thd->query_length);
if (mysql_bin_log.is_open())
@@ -493,11 +484,17 @@ bool multi_delete::send_eof()
if (mysql_bin_log.write(&qinfo) && !normal_tables)
local_error=1; // Log write failed: roll back the SQL statement
}
- /* Commit or rollback the current SQL statement */
- VOID(ha_autocommit_or_rollback(thd,local_error > 0));
-
- query_cache_invalidate3(thd, delete_tables, 1);
+ if (!log_delayed)
+ thd->options|=OPTION_STATUS_NO_TRANS_UPDATE;
}
+ /* Commit or rollback the current SQL statement */
+ if (transactional_tables)
+ if (ha_autocommit_or_rollback(thd,local_error > 0))
+ local_error=1;
+
+ if (deleted)
+ query_cache_invalidate3(thd, delete_tables, 1);
+
if (local_error)
::send_error(thd);
else
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index 909e1643fe5..6ea319a72e4 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -180,10 +180,10 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables,
Item *item;
for (key_len=0 ; (item=it_ke++) ; key_part++)
{
- (void) item->save_in_field(key_part->field);
+ (void) item->save_in_field(key_part->field, 1);
key_len+=key_part->store_length;
}
- if (!(key= (byte*) sql_calloc(ALIGN_SIZE(key_len))))
+ if (!(key= (byte*) thd->calloc(ALIGN_SIZE(key_len))))
{
send_error(thd,ER_OUTOFMEMORY);
goto err;
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 15c6df0398d..4ad9b6bae95 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -41,7 +41,8 @@ static void unlink_blobs(register TABLE *table);
/*
Check if insert fields are correct
- Resets form->time_stamp if a timestamp value is set
+ Updates table->time_stamp to point to timestamp field or 0, depending on
+ if timestamp should be updated or not.
*/
int
@@ -87,11 +88,12 @@ check_insert_fields(THD *thd,TABLE *table,List<Item> &fields,
my_error(ER_FIELD_SPECIFIED_TWICE,MYF(0), thd->dupp_field->field_name);
return -1;
}
+ table->time_stamp=0;
if (table->timestamp_field && // Don't set timestamp if used
- table->timestamp_field->query_id == thd->query_id)
- table->time_stamp=0; // This should be saved
+ table->timestamp_field->query_id != thd->query_id)
+ table->time_stamp= table->timestamp_field->offset()+1;
}
- // For the values we need select_priv
+ // For the values we need select_priv
table->grant.want_privilege=(SELECT_ACL & ~table->grant.privilege);
return 0;
}
@@ -109,7 +111,6 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list,
!(thd->master_access & SUPER_ACL));
bool transactional_table, log_delayed, bulk_insert=0;
uint value_count;
- uint save_time_stamp;
ulong counter = 1;
ulonglong id;
COPY_INFO info;
@@ -167,7 +168,6 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list,
table= table_list->table;
thd->proc_info="init";
thd->used_tables=0;
- save_time_stamp=table->time_stamp;
values= its++;
if (check_insert_fields(thd,table,fields,*values,1) ||
setup_tables(insert_table_list) ||
@@ -175,10 +175,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list,
(duplic == DUP_UPDATE &&
(setup_fields(thd, insert_table_list, update_fields, 0, 0, 0) ||
setup_fields(thd, insert_table_list, update_values, 0, 0, 0))))
- {
- table->time_stamp= save_time_stamp;
goto abort;
- }
if (find_real_table_in_list(table_list->next,
table_list->db, table_list->real_name))
{
@@ -195,14 +192,10 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list,
my_printf_error(ER_WRONG_VALUE_COUNT_ON_ROW,
ER(ER_WRONG_VALUE_COUNT_ON_ROW),
MYF(0),counter);
- table->time_stamp=save_time_stamp;
goto abort;
}
if (setup_fields(thd,insert_table_list,*values,0,0,0))
- {
- table->time_stamp= save_time_stamp;
goto abort;
- }
}
its.rewind ();
/*
@@ -364,7 +357,6 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list,
}
}
thd->proc_info="end";
- table->time_stamp=save_time_stamp; // Restore auto timestamp ptr
table->next_number_field=0;
thd->count_cuted_fields=0;
thd->next_insert_id=0; // Reset this if wrongly used
@@ -1339,7 +1331,6 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
DBUG_ENTER("select_insert::prepare");
unit= u;
- save_time_stamp=table->time_stamp;
if (check_insert_fields(thd,table,*fields,values,1))
DBUG_RETURN(1);
@@ -1360,8 +1351,6 @@ select_insert::~select_insert()
{
if (table)
{
- if (save_time_stamp)
- table->time_stamp=save_time_stamp;
table->next_number_field=0;
table->file->extra(HA_EXTRA_RESET);
}
@@ -1467,7 +1456,6 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
/* First field to copy */
field=table->field+table->fields - values.elements;
- save_time_stamp=table->time_stamp;
if (table->timestamp_field) // Don't set timestamp if used
{
table->timestamp_field->set_time();
diff --git a/sql/sql_olap.cc b/sql/sql_olap.cc
index 930e052ab90..6eb4fbcaaf6 100644
--- a/sql/sql_olap.cc
+++ b/sql/sql_olap.cc
@@ -75,7 +75,7 @@ static int make_new_olap_select(LEX *lex, SELECT_LEX *select_lex, List<Item> new
!strcmp(((Item_field*)new_item)->table_name,iif->table_name) &&
!strcmp(((Item_field*)new_item)->field_name,iif->field_name))
{
- not_found=false;
+ not_found= 0;
((Item_field*)new_item)->db_name=iif->db_name;
Item_field *new_one=new Item_field(iif->db_name, iif->table_name, iif->field_name);
privlist.push_back(new_one);
@@ -151,7 +151,7 @@ int handle_olaps(LEX *lex, SELECT_LEX *select_lex)
if (cursor->do_redirect)
{
cursor->table= ((TABLE_LIST*) cursor->table)->table;
- cursor->do_redirect=false;
+ cursor->do_redirect= 0;
}
}
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 7f3a4986038..8e8e2c44e01 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -193,6 +193,8 @@ static bool check_user(THD *thd,enum_server_command command, const char *user,
thd->db_length=0;
USER_RESOURCES ur;
+ if (passwd[0] && strlen(passwd) != SCRAMBLE_LENGTH)
+ return 1;
if (!(thd->user = my_strdup(user, MYF(0))))
{
send_error(thd,ER_OUT_OF_RESOURCES);
@@ -419,7 +421,7 @@ end:
}
-static void reset_mqh(THD *thd, LEX_USER *lu, bool get_them=false)
+static void reset_mqh(THD *thd, LEX_USER *lu, bool get_them= 0)
{
(void) pthread_mutex_lock(&LOCK_user_conn);
@@ -593,8 +595,6 @@ check_connections(THD *thd)
char *user= (char*) net->read_pos+5;
char *passwd= strend(user)+1;
char *db=0;
- if (passwd[0] && strlen(passwd) != SCRAMBLE_LENGTH)
- return ER_HANDSHAKE_ERROR;
if (thd->client_capabilities & CLIENT_CONNECT_WITH_DB)
db=strend(passwd)+1;
if (thd->client_capabilities & CLIENT_INTERACTIVE)
@@ -1914,59 +1914,24 @@ mysql_execute_command(THD *thd)
DBUG_VOID_RETURN;
}
{
- multi_update *result;
- uint table_count;
- TABLE_LIST *auxi;
- const char *msg=0;
-
- for (auxi= (TABLE_LIST*) tables, table_count=0 ; auxi ; auxi=auxi->next)
- table_count++;
-
+ const char *msg= 0;
if (select_lex->order_list.elements)
- msg="ORDER BY";
+ msg= "ORDER BY";
else if (select_lex->select_limit && select_lex->select_limit !=
HA_POS_ERROR)
- msg="LIMIT";
+ msg= "LIMIT";
if (msg)
{
net_printf(thd, ER_WRONG_USAGE, "UPDATE", msg);
res= 1;
break;
}
-
- tables->grant.want_privilege=(SELECT_ACL & ~tables->grant.privilege);
- if ((res=open_and_lock_tables(thd,tables)))
- break;
- unit->select_limit_cnt= HA_POS_ERROR;
- if (!setup_fields(thd,tables,select_lex->item_list,1,0,0) &&
- !setup_fields(thd,tables,lex->value_list,0,0,0) &&
- !thd->fatal_error &&
- (result=new multi_update(thd,tables,select_lex->item_list,
- lex->duplicates, table_count)))
- {
- List <Item> total_list;
- List_iterator <Item> field_list(select_lex->item_list);
- List_iterator <Item> value_list(lex->value_list);
- Item *item;
- while ((item=field_list++))
- total_list.push_back(item);
- while ((item=value_list++))
- total_list.push_back(item);
-
- res= mysql_select(thd, tables, total_list,
- select_lex->where,
- (ORDER *)NULL, (ORDER *)NULL, (Item *)NULL,
- (ORDER *)NULL,
- select_lex->options | thd->options |
- SELECT_NO_JOIN_CACHE,
- result, unit, select_lex, 0);
- delete result;
- if (thd->net.report_error)
- res= -1;
- }
- else
- res= -1; // Error is not sent
- close_thread_tables(thd);
+ res= mysql_multi_update(thd,tables,
+ &select_lex->item_list,
+ &lex->value_list,
+ select_lex->where,
+ select_lex->options,
+ lex->duplicates, unit, select_lex);
}
break;
case SQLCOM_REPLACE:
@@ -3022,16 +2987,6 @@ mysql_parse(THD *thd, char *inBuf, uint length)
}
-inline static void
-link_in_list(SQL_LIST *list,byte *element,byte **next)
-{
- list->elements++;
- (*list->next)=element;
- list->next=next;
- *next=0;
-}
-
-
/*****************************************************************************
** Store field definition for create
** Return 0 if ok
@@ -3344,7 +3299,7 @@ void store_position_for_column(const char *name)
}
bool
-add_proc_to_list(Item *item)
+add_proc_to_list(THD* thd, Item *item)
{
ORDER *order;
Item **item_ptr;
@@ -3355,7 +3310,7 @@ add_proc_to_list(Item *item)
*item_ptr= item;
order->item=item_ptr;
order->free_me=0;
- link_in_list(&current_lex->proc_list,(byte*) order,(byte**) &order->next);
+ thd->lex.proc_list.link_in_list((byte*) order,(byte**) &order->next);
return 0;
}
@@ -3409,7 +3364,7 @@ bool add_to_list(SQL_LIST &list,Item *item,bool asc)
order->asc = asc;
order->free_me=0;
order->used=0;
- link_in_list(&list,(byte*) order,(byte**) &order->next);
+ list.link_in_list((byte*) order,(byte**) &order->next);
DBUG_RETURN(0);
}
@@ -3500,7 +3455,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(Table_ident *table,
}
}
}
- link_in_list(&table_list, (byte*) ptr, (byte**) &ptr->next);
+ table_list.link_in_list((byte*) ptr, (byte**) &ptr->next);
DBUG_RETURN(ptr);
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 50820b931b6..a5c69763863 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -132,7 +132,9 @@ static void read_cached_record(JOIN_TAB *tab);
static bool cmp_buffer_with_ref(JOIN_TAB *tab);
static bool setup_new_fields(THD *thd,TABLE_LIST *tables,List<Item> &fields,
List<Item> &all_fields,ORDER *new_order);
-static ORDER *create_distinct_group(ORDER *order, List<Item> &fields);
+static ORDER *create_distinct_group(THD *thd, ORDER *order,
+ List<Item> &fields,
+ bool *all_order_by_fields_used);
static bool test_if_subpart(ORDER *a,ORDER *b);
static TABLE *get_sort_by_table(ORDER *a,ORDER *b,TABLE_LIST *tables);
static void calc_group_buffer(JOIN *join,ORDER *group);
@@ -248,7 +250,7 @@ JOIN::prepare(TABLE_LIST *tables_init,
if (!fake_select_lex)
select_lex->join= this;
union_part= (unit->first_select()->next_select() != 0);
-
+
/* Check that all tables, fields, conds and order are ok */
if (setup_tables(tables_list) ||
@@ -343,6 +345,10 @@ JOIN::prepare(TABLE_LIST *tables_init,
this->group= group_list != 0;
row_limit= ((select_distinct || order || group_list) ? HA_POS_ERROR :
unit->select_limit_cnt);
+ /* select_limit is used to decide if we are likely to scan the whole table */
+ select_limit= unit->select_limit_cnt;
+ if (having || (select_options & OPTION_FOUND_ROWS))
+ select_limit= HA_POS_ERROR;
do_send_rows = (unit->select_limit_cnt) ? 1 : 0;
this->unit= unit;
@@ -371,6 +377,7 @@ JOIN::prepare(TABLE_LIST *tables_init,
int
JOIN::optimize()
{
+ ha_rows select_limit;
DBUG_ENTER("JOIN::optimize");
#ifdef HAVE_REF_TO_FIELDS // Not done yet
@@ -403,7 +410,8 @@ JOIN::optimize()
// normal error processing & cleanup
DBUG_RETURN(-1);
- if (cond_value == Item::COND_FALSE || (!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS)))
+ if (cond_value == Item::COND_FALSE ||
+ (!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS)))
{ /* Impossible cond */
zero_result_cause= "Impossible WHERE";
DBUG_RETURN(0);
@@ -451,11 +459,12 @@ JOIN::optimize()
found_const_table_map= 0;
}
thd->proc_info= "preparing";
- result->initialize_tables(this);
+ if (result->initialize_tables(this))
+ DBUG_RETURN(-1);
if (const_table_map != found_const_table_map &&
!(select_options & SELECT_DESCRIBE))
{
- zero_result_cause= "";
+ zero_result_cause= "no matching row in const table";
select_options= 0; //TODO why option in return_zero_rows was droped
DBUG_RETURN(0);
}
@@ -513,21 +522,46 @@ JOIN::optimize()
if (! hidden_group_fields)
select_distinct=0;
}
- else if (select_distinct && tables - const_tables == 1 &&
- (unit->select_limit_cnt == HA_POS_ERROR ||
- (select_options & OPTION_FOUND_ROWS) ||
- order &&
- !(skip_sort_order=
- test_if_skip_sort_order(&join_tab[const_tables],
- order,
- unit->select_limit_cnt,
- 1))))
+ else if (select_distinct && tables - const_tables == 1)
{
- if ((group_list= create_distinct_group(order, fields_list)))
- {
- select_distinct= 0;
- no_order= !order;
- group= 1; // For end_write_group
+ /*
+ We are only using one table. In this case we change DISTINCT to a
+ GROUP BY query if:
+ - The GROUP BY can be done through indexes (no sort) and the ORDER
+ BY only uses selected fields.
+ (In this case we can later optimize away GROUP BY and ORDER BY)
+ - We are scanning the whole table without LIMIT
+ This can happen if:
+ - We are using CALC_FOUND_ROWS
+ - We are using an ORDER BY that can't be optimized away.
+
+ We don't want to use this optimization when we are using LIMIT
+ because in this case we can just create a temporary table that
+ holds LIMIT rows and stop when this table is full.
+ */
+ JOIN_TAB *tab= &join_tab[const_tables];
+ bool all_order_fields_used;
+ if (order)
+ skip_sort_order= test_if_skip_sort_order(tab, order, select_limit, 1);
+ if ((group=create_distinct_group(thd, order, fields_list,
+ &all_order_fields_used)))
+ {
+ bool skip_group= (skip_sort_order &&
+ test_if_skip_sort_order(tab, group, select_limit,
+ 1) != 0);
+ if ((skip_group && all_order_fields_used) ||
+ select_limit == HA_POS_ERROR ||
+ (order && !skip_sort_order))
+ {
+ /* Change DISTINCT to GROUP BY */
+ select_distinct= 0;
+ no_order= !order;
+ if (all_order_fields_used)
+ order=0;
+ group=1; // For end_write_group
+ }
+ else
+ group= 0;
}
else if (thd->fatal_error) // End of memory
DBUG_RETURN(-1);
@@ -727,11 +761,9 @@ JOIN::exec()
order=group_list;
if (order &&
(const_tables == tables ||
- (simple_order &&
+ ((simple_order || skip_sort_order) &&
test_if_skip_sort_order(&join_tab[const_tables], order,
- (select_options & OPTION_FOUND_ROWS) ?
- HA_POS_ERROR : unit->select_limit_cnt,
- 0))))
+ select_limit, 0))))
order=0;
select_describe(this, need_tmp,
order != 0 && !skip_sort_order,
@@ -759,7 +791,7 @@ JOIN::exec()
group_list ? 0 : select_distinct,
group_list && simple_group,
(order == 0 || skip_sort_order) &&
- !(select_options & OPTION_FOUND_ROWS),
+ select_limit != HA_POS_ERROR,
select_options, unit)))
DBUG_VOID_RETURN;
@@ -813,9 +845,10 @@ JOIN::exec()
/* Optimize "select distinct b from t1 order by key_part_1 limit #" */
if (order && skip_sort_order)
{
- (void) test_if_skip_sort_order(&this->join_tab[const_tables],
- order, unit->select_limit_cnt, 0);
- order=0;
+ /* Should always succeed */
+ if (test_if_skip_sort_order(&this->join_tab[const_tables],
+ order, unit->select_limit_cnt, 0))
+ order=0;
}
}
@@ -989,8 +1022,7 @@ JOIN::exec()
}
}
{
- ha_rows select_limit= unit->select_limit_cnt;
- if (having || group || (select_options & OPTION_FOUND_ROWS))
+ if (group)
select_limit= HA_POS_ERROR;
else
{
@@ -1002,7 +1034,13 @@ JOIN::exec()
JOIN_TAB *end_table= &join_tab[tables];
for (; table < end_table ; table++)
{
- if (table->select_cond)
+ /*
+ table->keyuse is set in the case there was an original WHERE clause
+ on the table that was optimized away.
+ table->on_expr tells us that it was a LEFT JOIN and there will be
+ at least one row generated from the table.
+ */
+ if (table->select_cond || (table->keyuse && !table->on_expr))
{
/* We have to sort all rows */
select_limit= HA_POS_ERROR;
@@ -1186,13 +1224,21 @@ static ha_rows get_quick_record_count(SQL_SELECT *select,TABLE *table,
}
+/*
+ Calculate the best possible join and initialize the join structure
+
+ RETURN VALUES
+ 0 ok
+ 1 Fatal error
+*/
+
static bool
make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
DYNAMIC_ARRAY *keyuse_array)
{
int error;
uint i,table_count,const_count,found_ref,refs,key,const_ref,eq_part;
- table_map const_table_map,found_const_table_map,all_table_map;
+ table_map found_const_table_map,all_table_map;
TABLE **table_vector;
JOIN_TAB *stat,*stat_end,*s,**stat_ref;
SQL_SELECT *select;
@@ -1212,7 +1258,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
join->best_ref=stat_vector;
stat_end=stat+table_count;
- const_table_map=found_const_table_map=all_table_map=0;
+ found_const_table_map=all_table_map=0;
const_count=0;
for (s=stat,i=0 ; tables ; s++,tables=tables->next,i++)
@@ -1303,7 +1349,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
DBUG_RETURN(1);
/* Read tables with 0 or 1 rows (system tables) */
- join->const_table_map=const_table_map;
+ join->const_table_map= 0;
for (POSITION *p_pos=join->positions, *p_end=p_pos+const_count;
p_pos < p_end ;
@@ -1340,16 +1386,16 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
if (s->dependent) // If dependent on some table
{
// All dep. must be constants
- if (s->dependent & ~(join->const_table_map))
+ if (s->dependent & ~(found_const_table_map))
continue;
if (table->file->records <= 1L &&
!(table->file->table_flags() & HA_NOT_EXACT_COUNT))
{ // system table
- int tmp;
+ int tmp= 0;
s->type=JT_SYSTEM;
join->const_table_map|=table->map;
set_position(join,const_count++,s,(KEYUSE*) 0);
- if ((tmp=join_read_const_table(s,join->positions+const_count-1)))
+ if ((tmp= join_read_const_table(s,join->positions+const_count-1)))
{
if (tmp > 0)
DBUG_RETURN(1); // Fatal error
@@ -1374,7 +1420,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
{
if (keyuse->val->type() != Item::NULL_ITEM)
{
- if (!((~join->const_table_map) & keyuse->used_tables))
+ if (!((~found_const_table_map) & keyuse->used_tables))
const_ref|= (key_map) 1 << keyuse->keypart;
else
refs|=keyuse->used_tables;
@@ -1395,7 +1441,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
join->const_table_map|=table->map;
set_position(join,const_count++,s,start_keyuse);
if (create_ref_for_key(join, s, start_keyuse,
- join->const_table_map))
+ found_const_table_map))
DBUG_RETURN(1);
if ((tmp=join_read_const_table(s,
join->positions+const_count-1)))
@@ -1443,8 +1489,8 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
{
ha_rows records;
if (!select)
- select=make_select(s->table, join->const_table_map,
- join->const_table_map,
+ select=make_select(s->table, found_const_table_map,
+ found_const_table_map,
and_conds(conds,s->on_expr),&error);
records=get_quick_record_count(select,s->table, s->const_keys,
join->row_limit);
@@ -2607,12 +2653,13 @@ get_store_key(THD *thd, KEYUSE *keyuse, table_map used_tables,
bool
store_val_in_field(Field *field,Item *item)
{
+ bool error;
THD *thd=current_thd;
ha_rows cuted_fields=thd->cuted_fields;
thd->count_cuted_fields=1;
- (void) item->save_in_field(field);
+ error= item->save_in_field(field, 1);
thd->count_cuted_fields=0;
- return cuted_fields != thd->cuted_fields;
+ return error || cuted_fields != thd->cuted_fields;
}
@@ -2957,6 +3004,38 @@ make_join_readinfo(JOIN *join, uint options)
}
+/*
+ Give error if we some tables are done with a full join
+
+ SYNOPSIS
+ error_if_full_join()
+ join Join condition
+
+ USAGE
+ This is used by multi_table_update and multi_table_delete when running
+ in safe mode
+
+ RETURN VALUES
+ 0 ok
+ 1 Error (full join used)
+*/
+
+bool error_if_full_join(JOIN *join)
+{
+ for (JOIN_TAB *tab=join->join_tab, *end=join->join_tab+join->tables;
+ tab < end;
+ tab++)
+ {
+ if (tab->type == JT_ALL && (!tab->select || !tab->select->quick))
+ {
+ my_error(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,MYF(0));
+ return(1);
+ }
+ }
+ return(0);
+}
+
+
static void
join_free(JOIN *join)
{
@@ -3021,9 +3100,7 @@ join_free(JOIN *join)
}
join->group_fields.delete_elements();
join->tmp_table_param.copy_funcs.delete_elements();
- if (join->tmp_table_param.copy_field) // Because of bug in ecc
- delete [] join->tmp_table_param.copy_field;
- join->tmp_table_param.copy_field=0;
+ join->tmp_table_param.cleanup();
DBUG_VOID_RETURN;
}
@@ -3655,12 +3732,34 @@ const_expression_in_where(COND *cond, Item *comp_item, Item **const_item)
/****************************************************************************
- Create a temp table according to a field list.
- Set distinct if duplicates could be removed
- Given fields field pointers are changed to point at tmp_table
- for send_fields
+ Create internal temporary table
****************************************************************************/
+/*
+ Create field for temporary table
+
+ SYNOPSIS
+ create_tmp_field()
+ thd Thread handler
+ table Temporary table
+ item Item to create a field for
+ type Type of item (normally item->type)
+ copy_func If set and item is a function, store copy of item
+ in this array
+ group 1 if we are going to do a relative group by on result
+ modify_item 1 if item->result_field should point to new item.
+ This is relevent for how fill_record() is going to
+ work:
+ If modify_item is 1 then fill_record() will update
+ the record in the original table.
+ If modify_item is 0 then fill_record() will update
+ the temporary table
+
+ RETURN
+ 0 on error
+ new_created field
+*/
+
Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
Item_result_field ***copy_func, Field **from_field,
bool group, bool modify_item)
@@ -3778,6 +3877,13 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
}
+/*
+ Create a temp table according to a field list.
+ Set distinct if duplicates could be removed
+ Given fields field pointers are changed to point at tmp_table
+ for send_fields
+*/
+
TABLE *
create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
ORDER *group, bool distinct, bool save_sum_fields,
@@ -3853,13 +3959,13 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
NullS))
{
bitmap_clear_bit(&temp_pool, temp_pool_slot);
- DBUG_RETURN(NULL); /* purecov: inspected */
+ DBUG_RETURN(NULL); /* purecov: inspected */
}
if (!(param->copy_field=copy=new Copy_field[field_count]))
{
bitmap_clear_bit(&temp_pool, temp_pool_slot);
- my_free((gptr) table,MYF(0)); /* purecov: inspected */
- DBUG_RETURN(NULL); /* purecov: inspected */
+ my_free((gptr) table,MYF(0)); /* purecov: inspected */
+ DBUG_RETURN(NULL); /* purecov: inspected */
}
param->funcs=copy_func;
strmov(tmpname,path);
@@ -3940,9 +4046,19 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
}
else
{
+ /*
+ The last parameter to create_tmp_field() is a bit tricky:
+
+ We need to set it to 0 in union, to get fill_record() to modify the
+ temporary table.
+ We need to set it to 1 on multi-table-update and in select to
+ write rows to the temporary table.
+ We here distinguish between UNION and multi-table-updates by the fact
+ that in the later case group is set to the row pointer.
+ */
Field *new_field=create_tmp_field(thd, table, item,type, &copy_func,
tmp_from_field, group != 0,
- not_all_columns);
+ not_all_columns || group !=0);
if (!new_field)
{
if (thd->fatal_error)
@@ -4258,7 +4374,6 @@ static bool open_tmp_table(TABLE *table)
table->db_stat=0;
return(1);
}
- /* VOID(ha_lock(table,F_WRLCK)); */ /* Single thread table */
(void) table->file->extra(HA_EXTRA_QUICK); /* Faster */
return(0);
}
@@ -4411,12 +4526,11 @@ free_tmp_table(THD *thd, TABLE *entry)
* If a HEAP table gets full, create a MyISAM table and copy all rows to this
*/
-bool create_myisam_from_heap(TABLE *table, TMP_TABLE_PARAM *param, int error,
- bool ignore_last_dupp_key_error)
+bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
+ int error, bool ignore_last_dupp_key_error)
{
TABLE new_table;
const char *save_proc_info;
- THD *thd=current_thd;
int write_err;
DBUG_ENTER("create_myisam_from_heap");
@@ -5392,7 +5506,8 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (error == HA_ERR_FOUND_DUPP_KEY ||
error == HA_ERR_FOUND_DUPP_UNIQUE)
goto end;
- if (create_myisam_from_heap(table, &join->tmp_table_param, error,1))
+ if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
+ error,1))
DBUG_RETURN(-1); // Not a table_is_full error
table->uniques=0; // To ensure rows are the same
}
@@ -5469,7 +5584,8 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
copy_funcs(join->tmp_table_param.funcs);
if ((error=table->file->write_row(table->record[0])))
{
- if (create_myisam_from_heap(table, &join->tmp_table_param, error, 0))
+ if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
+ error, 0))
DBUG_RETURN(-1); // Not a table_is_full error
/* Change method to update rows */
table->file->index_init(0);
@@ -5563,7 +5679,8 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
if ((error=table->file->write_row(table->record[0])))
{
- if (create_myisam_from_heap(table, &join->tmp_table_param,
+ if (create_myisam_from_heap(join.thd, table,
+ &join->tmp_table_param,
error, 0))
DBUG_RETURN(-1); // Not a table_is_full error
}
@@ -6769,12 +6886,14 @@ setup_new_fields(THD *thd,TABLE_LIST *tables,List<Item> &fields,
*/
static ORDER *
-create_distinct_group(ORDER *order_list,List<Item> &fields)
+create_distinct_group(THD *thd, ORDER *order_list, List<Item> &fields,
+ bool *all_order_by_fields_used)
{
List_iterator<Item> li(fields);
Item *item;
ORDER *order,*group,**prev;
+ *all_order_by_fields_used= 1;
while ((item=li++))
item->marker=0; /* Marker that field is not used */
@@ -6783,13 +6902,15 @@ create_distinct_group(ORDER *order_list,List<Item> &fields)
{
if (order->in_field_list)
{
- ORDER *ord=(ORDER*) sql_memdup(order,sizeof(ORDER));
+ ORDER *ord=(ORDER*) thd->memdup((char*) order,sizeof(ORDER));
if (!ord)
return 0;
*prev=ord;
prev= &ord->next;
(*ord->item)->marker=1;
}
+ else
+ *all_order_by_fields_used= 0;
}
li.rewind();
@@ -6799,7 +6920,7 @@ create_distinct_group(ORDER *order_list,List<Item> &fields)
continue;
if (!item->marker)
{
- ORDER *ord=(ORDER*) sql_calloc(sizeof(ORDER));
+ ORDER *ord=(ORDER*) thd->calloc(sizeof(ORDER));
if (!ord)
return 0;
ord->item=li.ref();
@@ -7251,7 +7372,7 @@ copy_sum_funcs(Item_sum **func_ptr)
{
Item_sum *func;
for (; (func = *func_ptr) ; func_ptr++)
- (void) func->save_in_field(func->result_field);
+ (void) func->save_in_field(func->result_field, 1);
return;
}
@@ -7282,7 +7403,7 @@ copy_funcs(Item_result_field **func_ptr)
{
Item_result_field *func;
for (; (func = *func_ptr) ; func_ptr++)
- (void) func->save_in_field(func->result_field);
+ (void) func->save_in_field(func->result_field, 1);
return;
}
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 3b89c1ce0d3..31693628be5 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -115,7 +115,8 @@ typedef struct st_position { /* Used in find_best */
/* Param to create temporary tables when doing SELECT:s */
-class TMP_TABLE_PARAM {
+class TMP_TABLE_PARAM :public Sql_alloc
+{
public:
List<Item> copy_funcs;
List_iterator_fast<Item> copy_funcs_it;
@@ -321,12 +322,12 @@ class store_key_field: public store_key
copy_field.set(to_field,from_field,0);
}
}
- bool copy()
- {
- copy_field.do_copy(&copy_field);
- return err != 0;
- }
- const char *name() const { return field_name; }
+ bool copy()
+ {
+ copy_field.do_copy(&copy_field);
+ return err != 0;
+ }
+ const char *name() const { return field_name; }
};
@@ -343,8 +344,7 @@ public:
{}
bool copy()
{
- (void) item->save_in_field(to_field);
- return err != 0;
+ return item->save_in_field(to_field, 1) || err != 0;
}
const char *name() const { return "func"; }
};
@@ -367,7 +367,8 @@ public:
if (!inited)
{
inited=1;
- (void)item->save_in_field(to_field);
+ if (item->save_in_field(to_field, 1))
+ err= 1;
}
return err != 0;
}
@@ -375,3 +376,4 @@ public:
};
bool cp_buffer_from_ref(TABLE_REF *ref);
+bool error_if_full_join(JOIN *join);
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 00077bda39f..6d4669894b9 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1885,8 +1885,14 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
/* We changed a temporary table */
if (error)
{
+ /*
+ * The following function call will also free a
+ * new_table pointer.
+ * Therefore, here new_table pointer is not free'd as it is
+ * free'd in close_temporary() which is called by by the
+ * close_temporary_table() function.
+ */
close_temporary_table(thd,new_db,tmp_name);
- my_free((gptr) new_table,MYF(0));
goto err;
}
/* Close lock if this is a transactional table */
@@ -2206,7 +2212,6 @@ copy_data_between_tables(TABLE *from,TABLE *to,
if (to->file->external_lock(thd,F_UNLCK))
error=1;
err:
- tmp_error = ha_recovery_logging(thd,TRUE);
free_io_cache(from);
*copied= found_count;
*deleted=delete_count;
diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc
index 35e33caf572..51d43b41833 100644
--- a/sql/sql_udf.cc
+++ b/sql/sql_udf.cc
@@ -203,6 +203,8 @@ void udf_init()
new_thd->version--; // Force close to free memory
close_thread_tables(new_thd);
delete new_thd;
+ /* Remember that we don't have a THD */
+ my_pthread_setspecific_ptr(THR_THD, 0);
DBUG_VOID_RETURN;
}
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index e170f6c040e..705152ee9f2 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -41,7 +41,7 @@ int mysql_union(THD *thd, LEX *lex, select_result *result)
***************************************************************************/
select_union::select_union(TABLE *table_par)
- :table(table_par)
+ :table(table_par), not_describe(0)
{
bzero((char*) &info,sizeof(info));
/*
@@ -59,7 +59,7 @@ select_union::~select_union()
int select_union::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
{
unit= u;
- if (save_time_stamp && list.elements != table->fields)
+ if (not_describe && list.elements != table->fields)
{
my_message(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT,
ER(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT),MYF(0));
@@ -117,7 +117,7 @@ int st_select_lex_unit::prepare(THD *thd, select_result *result)
prepared= 1;
union_result=0;
res= 0;
- found_rows_for_union= false;
+ found_rows_for_union= 0;
TMP_TABLE_PARAM tmp_table_param;
this->thd= thd;
this->result= result;
@@ -165,7 +165,7 @@ int st_select_lex_unit::prepare(THD *thd, select_result *result)
if (!(union_result=new select_union(table)))
goto err;
- union_result->save_time_stamp=1;
+ union_result->not_describe=1;
union_result->tmp_table_param=&tmp_table_param;
// prepare selects
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index c3ae435d851..3aab5cd30a9 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -29,10 +29,12 @@ static bool compare_record(TABLE *table, ulong query_id)
{
if (!table->blob_fields)
return cmp_record(table,1);
+ /* Compare null bits */
if (memcmp(table->null_flags,
table->null_flags+table->rec_buff_length,
table->null_bytes))
return 1; // Diff in NULL value
+ /* Compare updated fields */
for (Field **ptr=table->field ; *ptr ; ptr++)
{
if ((*ptr)->query_id == query_id &&
@@ -56,7 +58,7 @@ int mysql_update(THD *thd,
bool safe_update= thd->options & OPTION_SAFE_UPDATES;
bool used_key_is_modified, transactional_table, log_delayed;
int error=0;
- uint save_time_stamp, used_index, want_privilege;
+ uint used_index, want_privilege;
ulong query_id=thd->query_id, timestamp_query_id;
key_map old_used_keys;
TABLE *table;
@@ -73,7 +75,6 @@ int mysql_update(THD *thd,
fix_tables_pointers(thd->lex.all_selects_list);
table= table_list->table;
- save_time_stamp=table->time_stamp;
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
thd->proc_info="init";
@@ -103,6 +104,7 @@ int mysql_update(THD *thd,
{
timestamp_query_id=table->timestamp_field->query_id;
table->timestamp_field->query_id=thd->query_id-1;
+ table->time_stamp= table->timestamp_field->offset() +1;
}
/* Check the fields we are going to modify */
@@ -122,7 +124,6 @@ int mysql_update(THD *thd,
table->grant.want_privilege=(SELECT_ACL & ~table->grant.privilege);
if (setup_fields(thd,update_table_list,values,0,0,0))
{
- table->time_stamp=save_time_stamp; // Restore timestamp pointer
DBUG_RETURN(-1); /* purecov: inspected */
}
@@ -133,7 +134,6 @@ int mysql_update(THD *thd,
(select && select->check_quick(safe_update, limit)) || !limit)
{
delete select;
- table->time_stamp=save_time_stamp; // Restore timestamp pointer
if (error)
{
DBUG_RETURN(-1); // Error in where
@@ -148,7 +148,6 @@ int mysql_update(THD *thd,
if (safe_update && !using_limit)
{
delete select;
- table->time_stamp=save_time_stamp;
send_error(thd,ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE);
DBUG_RETURN(1);
}
@@ -167,8 +166,8 @@ int mysql_update(THD *thd,
if (used_key_is_modified || order)
{
/*
- ** We can't update table directly; We must first search after all
- ** matching rows before updating the table!
+ We can't update table directly; We must first search after all
+ matching rows before updating the table!
*/
table->file->extra(HA_EXTRA_DONT_USE_CURSOR_TO_UPDATE);
IO_CACHE tempfile;
@@ -176,7 +175,6 @@ int mysql_update(THD *thd,
DISK_BUFFER_SIZE, MYF(MY_WME)))
{
delete select; /* purecov: inspected */
- table->time_stamp=save_time_stamp; // Restore timestamp pointer /* purecov: inspected */
DBUG_RETURN(-1);
}
if (old_used_keys & ((key_map) 1 << used_index))
@@ -207,7 +205,6 @@ int mysql_update(THD *thd,
== HA_POS_ERROR)
{
delete select;
- table->time_stamp=save_time_stamp; // Restore timestamp pointer
DBUG_RETURN(-1);
}
}
@@ -261,7 +258,6 @@ int mysql_update(THD *thd,
if (error >= 0)
{
delete select;
- table->time_stamp=save_time_stamp; // Restore timestamp pointer
DBUG_RETURN(-1);
}
}
@@ -311,7 +307,6 @@ int mysql_update(THD *thd,
end_read_record(&info);
thd->proc_info="end";
VOID(table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY));
- table->time_stamp=save_time_stamp; // Restore auto timestamp pointer
transactional_table= table->file->has_transactions();
log_delayed= (transactional_table || table->tmp_table);
if (updated && (error <= 0 || !transactional_table))
@@ -365,331 +360,344 @@ int mysql_update(THD *thd,
DBUG_RETURN(0);
}
+
/***************************************************************************
Update multiple tables from join
***************************************************************************/
-multi_update::multi_update(THD *thd_arg, TABLE_LIST *ut, List<Item> &fs,
- enum enum_duplicates handle_duplicates,
- uint num)
- : update_tables (ut), thd(thd_arg), updated(0), found(0), fields(fs),
- dupl(handle_duplicates), num_of_tables(num), num_fields(0), num_updated(0),
- error(0), do_update(false)
+/*
+ Setup multi-update handling and call SELECT to do the join
+*/
+
+int mysql_multi_update(THD *thd,
+ TABLE_LIST *table_list,
+ List<Item> *fields,
+ List<Item> *values,
+ COND *conds,
+ ulong options,
+ enum enum_duplicates handle_duplicates,
+ SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex)
{
- save_time_stamps = (uint *) sql_calloc (sizeof(uint) * num_of_tables);
- tmp_tables = (TABLE **)NULL;
- int counter=0;
- ulong timestamp_query_id;
- not_trans_safe=false;
- for (TABLE_LIST *dt=ut ; dt ; dt=dt->next,counter++)
+ int res;
+ multi_update *result;
+ TABLE_LIST *tl;
+ DBUG_ENTER("mysql_multi_update");
+
+ table_list->grant.want_privilege=(SELECT_ACL & ~table_list->grant.privilege);
+ if ((res=open_and_lock_tables(thd,table_list)))
+ DBUG_RETURN(res);
+
+ thd->select_limit=HA_POS_ERROR;
+ if (setup_fields(thd, table_list, *fields, 1, 0, 0))
+ DBUG_RETURN(-1);
+
+ /*
+ Count tables and setup timestamp handling
+ */
+ for (tl= (TABLE_LIST*) table_list ; tl ; tl=tl->next)
{
- TABLE *table=ut->table;
- // (void) ut->table->file->extra(HA_EXTRA_NO_KEYREAD);
- dt->table->used_keys=0;
+ TABLE *table= tl->table;
if (table->timestamp_field)
{
- // Don't set timestamp column if this is modified
- timestamp_query_id=table->timestamp_field->query_id;
- table->timestamp_field->query_id=thd->query_id-1;
- if (table->timestamp_field->query_id == thd->query_id)
- table->time_stamp=0;
- else
- table->timestamp_field->query_id=timestamp_query_id;
+ table->time_stamp=0;
+ // Only set timestamp column if this is not modified
+ if (table->timestamp_field->query_id != thd->query_id)
+ table->time_stamp= table->timestamp_field->offset() +1;
}
- save_time_stamps[counter]=table->time_stamp;
}
- error = 1; // In case we do not reach prepare we have to reset timestamps
+
+ if (!(result=new multi_update(thd, table_list, fields, values,
+ handle_duplicates)))
+ DBUG_RETURN(-1);
+
+ List<Item> total_list;
+ res= mysql_select(thd,table_list,total_list,
+ conds, (ORDER *) NULL, (ORDER *)NULL, (Item *) NULL,
+ (ORDER *)NULL,
+ options | SELECT_NO_JOIN_CACHE,
+ result, unit, select_lex, 0);
+
+end:
+ delete result;
+ DBUG_RETURN(res);
}
-int
-multi_update::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
+
+multi_update::multi_update(THD *thd_arg, TABLE_LIST *table_list,
+ List<Item> *field_list, List<Item> *value_list,
+ enum enum_duplicates handle_duplicates_arg)
+ :all_tables(table_list), update_tables(0), thd(thd_arg), tmp_tables(0),
+ updated(0), found(0), fields(field_list), values(value_list),
+ table_count(0), copy_field(0), handle_duplicates(handle_duplicates_arg),
+ do_update(1), trans_safe(0)
+{}
+
+
+/*
+ Connect fields with tables and create list of tables that are updated
+*/
+
+int multi_update::prepare(List<Item> &not_used_values)
{
+ TABLE_LIST *table_ref;
+ SQL_LIST update;
+ table_map tables_to_update= 0;
+ Item_field *item;
+ List_iterator_fast<Item> field_it(*fields);
+ List_iterator_fast<Item> value_it(*values);
+ uint i, max_fields;
DBUG_ENTER("multi_update::prepare");
- unit= u;
- do_update = true;
+
thd->count_cuted_fields=1;
thd->cuted_fields=0L;
- thd->proc_info="updating the main table";
- TABLE_LIST *table_ref;
+ thd->proc_info="updating main table";
+
+ while ((item= (Item_field *) field_it++))
+ tables_to_update|= item->used_tables();
- if (thd->options & OPTION_SAFE_UPDATES)
+ if (!tables_to_update)
{
- for (table_ref=update_tables; table_ref; table_ref=table_ref->next)
- {
- TABLE *table=table_ref->table;
- if ((thd->options & OPTION_SAFE_UPDATES) && !table->quick_keys)
- {
- my_error(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,MYF(0));
- DBUG_RETURN(1);
- }
- }
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0),
+ "You didn't specify any tables to UPDATE");
+ DBUG_RETURN(1);
}
+
/*
- Here I have to connect fields with tables and only update tables that
- need to be updated.
- I calculate num_updated and fill-up table_sequence
- Set table_list->shared to true or false, depending on whether table is
- to be updated or not
+ We have to check values after setup_tables to get used_keys right in
+ reference tables
*/
- Item_field *item;
- List_iterator<Item> it(fields);
- num_fields=fields.elements;
- field_sequence = (uint *) sql_alloc(sizeof(uint)*num_fields);
- uint *int_ptr=field_sequence;
- while ((item= (Item_field *)it++))
- {
- unsigned int counter=0;
- for (table_ref=update_tables; table_ref;
- table_ref=table_ref->next, counter++)
- {
- if (table_ref->table == item->field->table)
- {
- if (!table_ref->shared)
- {
- TABLE *tbl=table_ref->table;
- num_updated++;
- table_ref->shared=1;
- if (!not_trans_safe && !table_ref->table->file->has_transactions())
- not_trans_safe=true;
- // to be moved if initialize_tables has to be used
- tbl->no_keyread=1;
- tbl->used_keys=0;
- }
- break;
- }
- }
- if (!table_ref)
- {
- net_printf(thd, ER_NOT_SUPPORTED_YET, "JOIN SYNTAX WITH MULTI-TABLE UPDATES");
- DBUG_RETURN(1);
- }
- else
- *int_ptr++=counter;
- }
- if (!num_updated--)
- {
- net_printf(thd, ER_NOT_SUPPORTED_YET, "SET CLAUSE MUST CONTAIN TABLE.FIELD REFERENCE");
+ if (setup_fields(thd, all_tables, *values, 1,0,0))
DBUG_RETURN(1);
- }
/*
- Here, I have to allocate the array of temporary tables
- I have to treat a case of num_updated=1 differently in send_data() method.
+ Save tables beeing updated in update_tables
+ update_table->shared is position for table
+ Don't use key read on tables that are updated
*/
- if (num_updated)
+
+ update.empty();
+ for (table_ref= all_tables; table_ref; table_ref=table_ref->next)
{
- tmp_tables = (TABLE **) sql_calloc(sizeof(TABLE *) * num_updated);
- infos = (COPY_INFO *) sql_calloc(sizeof(COPY_INFO) * num_updated);
- fields_by_tables = (List_item **)sql_calloc(sizeof(List_item *) * (num_updated + 1));
- unsigned int counter;
- List<Item> *temp_fields;
- for (table_ref=update_tables, counter = 0; table_ref; table_ref=table_ref->next)
+ TABLE *table=table_ref->table;
+ if (tables_to_update & table->map)
{
- if (!table_ref->shared)
- continue;
- // Here we have to add row offset as an additional field ...
- if (!(temp_fields = (List_item *)sql_calloc(sizeof(List_item))))
- {
- error = 1; // A proper error message is due here
+ TABLE_LIST *tl= (TABLE_LIST*) thd->memdup((char*) table_ref,
+ sizeof(*tl));
+ if (!tl)
DBUG_RETURN(1);
- }
- temp_fields->empty();
- it.rewind(); int_ptr=field_sequence;
- while ((item= (Item_field *)it++))
- {
- if (*int_ptr++ == counter)
- temp_fields->push_back(item);
- }
- if (counter)
- {
- Field_string offset(table_ref->table->file->ref_length, false,
- "offset", table_ref->table, my_charset_bin);
- temp_fields->push_front(new Item_field(((Field *)&offset)));
-
- // Make a temporary table
- int cnt=counter-1;
- TMP_TABLE_PARAM tmp_table_param;
- bzero((char*) &tmp_table_param,sizeof(tmp_table_param));
- tmp_table_param.field_count=temp_fields->elements;
- if (!(tmp_tables[cnt]=create_tmp_table(thd, &tmp_table_param,
- *temp_fields,
- (ORDER*) 0, 1, 0, 0,
- TMP_TABLE_ALL_COLUMNS,
- unit)))
- {
- error = 1; // A proper error message is due here
- DBUG_RETURN(1);
- }
- tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
- tmp_tables[cnt]->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
- infos[cnt].handle_duplicates=DUP_IGNORE;
- temp_fields->pop(); // because we shall use those for values only ...
- }
- fields_by_tables[counter]=temp_fields;
- counter++;
+ update.link_in_list((byte*) tl, (byte**) &tl->next);
+ tl->shared= table_count++;
+ table->no_keyread=1;
+ table->used_keys=0;
+ table->pos_in_table_list= tl;
}
}
- init_ftfuncs(thd, thd->lex.current_select->select_lex(), 1);
- error = 0; // Timestamps do not need to be restored, so far ...
- DBUG_RETURN(0);
+ table_count= update.elements;
+ update_tables= (TABLE_LIST*) update.first;
+
+ tmp_tables = (TABLE **) thd->calloc(sizeof(TABLE *) * table_count);
+ tmp_table_param = (TMP_TABLE_PARAM*) thd->calloc(sizeof(TMP_TABLE_PARAM) *
+ table_count);
+ fields_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
+ table_count);
+ values_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
+ table_count);
+ if (thd->fatal_error)
+ DBUG_RETURN(1);
+ for (i=0 ; i < table_count ; i++)
+ {
+ fields_for_table[i]= new List_item;
+ values_for_table[i]= new List_item;
+ }
+ if (thd->fatal_error)
+ DBUG_RETURN(1);
+
+ /* Split fields into fields_for_table[] and values_by_table[] */
+
+ field_it.rewind();
+ while ((item= (Item_field *) field_it++))
+ {
+ Item *value= value_it++;
+ uint offset= item->field->table->pos_in_table_list->shared;
+ fields_for_table[offset]->push_back(item);
+ values_for_table[offset]->push_back(value);
+ }
+ if (thd->fatal_error)
+ DBUG_RETURN(1);
+
+ /* Allocate copy fields */
+ max_fields=0;
+ for (i=0 ; i < table_count ; i++)
+ set_if_bigger(max_fields, fields_for_table[i]->elements);
+ copy_field= new Copy_field[max_fields];
+ init_ftfuncs(thd,1);
+ DBUG_RETURN(thd->fatal_error != 0);
}
-void
+/*
+ Store first used table in main_table as this should be updated first
+ This is because we know that no row in this table will be read twice.
+
+ Create temporary tables to store changed values for all other tables
+ that are updated.
+*/
+
+bool
multi_update::initialize_tables(JOIN *join)
{
-#ifdef NOT_YET
- We skip it as it only makes a mess ...........
- TABLE_LIST *walk;
- table_map tables_to_update_from=0;
- for (walk= update_tables ; walk ; walk=walk->next)
- tables_to_update_from|= walk->table->map;
-
- walk= update_tables;
- for (JOIN_TAB *tab=join->join_tab, *end=join->join_tab+join->tables;
- tab < end;
- tab++)
+ TABLE_LIST *table_ref;
+ DBUG_ENTER("initialize_tables");
+
+ if ((thd->options & OPTION_SAFE_UPDATES) && error_if_full_join(join))
+ DBUG_RETURN(1);
+ main_table=join->join_tab->table;
+ trans_safe= transactional_tables= main_table->file->has_transactions();
+ log_delayed= trans_safe || main_table->tmp_table != NO_TMP_TABLE;
+
+ /* Create a temporary table for all tables after except main table */
+ for (table_ref= update_tables; table_ref; table_ref=table_ref->next)
{
- if (tab->table->map & tables_to_update_from)
+ TABLE *table=table_ref->table;
+ if (table != main_table)
{
-// We are going to update from this table
- TABLE *tbl=walk->table=tab->table;
- /* Don't use KEYREAD optimization on this table */
- tbl->no_keyread=1;
- walk=walk->next;
+ uint cnt= table_ref->shared;
+ ORDER group;
+ List<Item> temp_fields= *fields_for_table[cnt];
+ TMP_TABLE_PARAM *tmp_param= tmp_table_param+cnt;
+
+ /*
+ Create a temporary table to store all fields that are changed for this
+ table. The first field in the temporary table is a pointer to the
+ original row so that we can find and update it
+ */
+
+ /* ok to be on stack as this is not referenced outside of this func */
+ Field_string offset(table->file->ref_length, 0, "offset",
+ table, 1);
+ if (temp_fields.push_front(new Item_field(((Field *) &offset))))
+ DBUG_RETURN(1);
+
+ /* Make an unique key over the first field to avoid duplicated updates */
+ bzero((char*) &group, sizeof(group));
+ group.asc= 1;
+ group.item= (Item**) temp_fields.head_ref();
+
+ tmp_param->quick_group=1;
+ tmp_param->field_count=temp_fields.elements;
+ tmp_param->group_parts=1;
+ tmp_param->group_length= table->file->ref_length;
+ if (!(tmp_tables[cnt]=create_tmp_table(thd,
+ tmp_param,
+ temp_fields,
+ (ORDER*) &group, 0, 0, 0,
+ TMP_TABLE_ALL_COLUMNS)))
+ DBUG_RETURN(1);
+ tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
}
}
-#endif
+ DBUG_RETURN(0);
}
multi_update::~multi_update()
{
- int counter = 0;
- for (table_being_updated=update_tables ;
- table_being_updated ;
- counter++, table_being_updated=table_being_updated->next)
+ TABLE_LIST *table;
+ for (table= update_tables ; table; table= table->next)
+ table->table->no_keyread=0;
+
+ if (tmp_tables)
{
- TABLE *table=table_being_updated->table;
- table->no_keyread=0;
- if (error)
- table->time_stamp=save_time_stamps[counter];
+ for (uint cnt = 0; cnt < table_count; cnt++)
+ {
+ if (tmp_tables[cnt])
+ {
+ free_tmp_table(thd, tmp_tables[cnt]);
+ tmp_table_param[cnt].cleanup();
+ }
+ }
}
- if (tmp_tables)
- for (uint counter = 0; counter < num_updated; counter++)
- if (tmp_tables[counter])
- free_tmp_table(thd,tmp_tables[counter]);
+ if (copy_field)
+ delete [] copy_field;
+ thd->count_cuted_fields=0; // Restore this setting
+ if (!trans_safe)
+ thd->options|=OPTION_STATUS_NO_TRANS_UPDATE;
}
-bool multi_update::send_data(List<Item> &values)
+bool multi_update::send_data(List<Item> &not_used_values)
{
- List<Item> real_values(values);
- for (uint counter = 0; counter < fields.elements; counter++)
- real_values.pop();
- // We have skipped fields ....
- if (!num_updated)
+ TABLE_LIST *cur_table;
+ DBUG_ENTER("multi_update::send_data");
+
+ found++;
+ for (cur_table= update_tables; cur_table ; cur_table= cur_table->next)
{
- for (table_being_updated=update_tables ;
- table_being_updated ;
- table_being_updated=table_being_updated->next)
+ TABLE *table= cur_table->table;
+ /* Check if we are using outer join and we didn't find the row */
+ if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
+ continue;
+
+ uint offset= cur_table->shared;
+ table->file->position(table->record[0]);
+ if (table == main_table)
{
- if (!table_being_updated->shared)
- continue;
- TABLE *table=table_being_updated->table;
- /* Check if we are using outer join and we didn't find the row */
- if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
- return 0;
- table->file->position(table->record[0]);
- // Only one table being updated receives a completely different treatment
table->status|= STATUS_UPDATED;
- store_record(table,1);
- if (fill_record(fields,real_values) || thd->net.report_error)
- return 1;
- found++;
- if (/* compare_record(table, query_id) && */
- !(error=table->file->update_row(table->record[1], table->record[0])))
- updated++;
- table->file->extra(HA_EXTRA_NO_CACHE);
- return error;
- }
- }
- else
- {
- int secure_counter= -1;
- for (table_being_updated=update_tables ;
- table_being_updated ;
- table_being_updated=table_being_updated->next, secure_counter++)
- {
- if (!table_being_updated->shared)
- continue;
-
- TABLE *table=table_being_updated->table;
- /* Check if we are using outer join and we didn't find the row */
- if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
- continue;
- table->file->position(table->record[0]);
- Item *item;
- List_iterator<Item> it(real_values);
- List <Item> values_by_table;
- uint *int_ptr=field_sequence;
- while ((item= (Item *)it++))
- {
- if (*int_ptr++ == (uint) (secure_counter + 1))
- values_by_table.push_back(item);
- }
- // Here I am breaking values as per each table
- if (secure_counter < 0)
+ store_record(table,1);
+ if (fill_record(*fields_for_table[offset], *values_for_table[offset]))
+ DBUG_RETURN(1);
+ if (compare_record(table, thd->query_id))
{
- table->status|= STATUS_UPDATED;
- store_record(table,1);
- if (fill_record(*fields_by_tables[0], values_by_table) ||
- thd->net.report_error)
- return 1;
- found++;
- if (/*compare_record(table, query_id) && */
- !(error=table->file->update_row(table->record[1], table->record[0])))
+ int error;
+ if (!updated++)
{
- updated++;
- table->file->extra(HA_EXTRA_NO_CACHE);
+ /*
+ Inform the main table that we are going to update the table even
+ while we may be scanning it. This will flush the read cache
+ if it's used.
+ */
+ main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
}
- else
+ if ((error=table->file->update_row(table->record[1],
+ table->record[0])))
{
table->file->print_error(error,MYF(0));
- if (!error) error=1;
- return 1;
+ updated--;
+ DBUG_RETURN(1);
}
}
- else
+ }
+ else
+ {
+ int error;
+ TABLE *tmp_table= tmp_tables[offset];
+ fill_record(tmp_table->field+1, *values_for_table[offset]);
+
+ /* Store pointer to row */
+ memcpy((char*) tmp_table->field[0]->ptr,
+ (char*) table->file->ref, table->file->ref_length);
+ /* Write row, ignoring duplicated updates to a row */
+ if ((error= tmp_table->file->write_row(tmp_table->record[0])) &&
+ (error != HA_ERR_FOUND_DUPP_KEY &&
+ error != HA_ERR_FOUND_DUPP_UNIQUE))
{
- // Here we insert into each temporary table
- values_by_table.push_front(new Item_string((char*) table->file->ref,
- table->file->ref_length,
- system_charset_info));
- fill_record(tmp_tables[secure_counter]->field,values_by_table);
- error= thd->net.report_error ||
- write_record(tmp_tables[secure_counter], &(infos[secure_counter]));
- if (error)
+ if (create_myisam_from_heap(table, tmp_table_param + offset, error, 1))
{
- error=-1;
- return 1;
+ do_update=0;
+ DBUG_RETURN(1); // Not a table_is_full error
}
}
}
}
- return 0;
+ DBUG_RETURN(0);
}
+
void multi_update::send_error(uint errcode,const char *err)
{
-
- //TODO error should be sent at the query processing end
/* First send error what ever it is ... */
- ::send_error(thd,errcode,err);
-
- /* reset used flags */
- // update_tables->table->no_keyread=0;
+ ::send_error(&thd->net,errcode,err);
/* If nothing updated return */
if (!updated)
@@ -698,99 +706,124 @@ void multi_update::send_error(uint errcode,const char *err)
/* Something already updated so we have to invalidate cache */
query_cache_invalidate3(thd, update_tables, 1);
- /* Below can happen when thread is killed early ... */
- if (!table_being_updated)
- table_being_updated=update_tables;
-
/*
- If rows from the first table only has been updated and it is transactional,
- just do rollback.
- The same if all tables are transactional, regardless of where we are.
- In all other cases do attempt updates ...
+ If all tables that has been updated are trans safe then just do rollback.
+ If not attempt to do remaining updates.
*/
- if ((table_being_updated->table->file->has_transactions() &&
- table_being_updated == update_tables) || !not_trans_safe)
+
+ if (trans_safe)
ha_rollback_stmt(thd);
- else if (do_update && num_updated)
- VOID(do_updates(true));
+ else if (do_update && table_count > 1)
+ {
+ /* Add warning here */
+ VOID(do_updates(0));
+ }
}
-int multi_update::do_updates (bool from_send_error)
+int multi_update::do_updates(bool from_send_error)
{
- int local_error= 0, counter= 0;
-
- if (from_send_error)
+ TABLE_LIST *cur_table;
+ int local_error;
+ ha_rows org_updated;
+ TABLE *table;
+ DBUG_ENTER("do_updates");
+
+ do_update= 0; // Don't retry this function
+ for (cur_table= update_tables; cur_table ; cur_table= cur_table->next)
{
- /* Found out table number for 'table_being_updated' */
- for (TABLE_LIST *aux=update_tables;
- aux != table_being_updated;
- aux=aux->next)
- counter++;
- }
- else
- table_being_updated = update_tables;
-
- do_update = false;
- for (table_being_updated=table_being_updated->next;
- table_being_updated ;
- table_being_updated=table_being_updated->next, counter++)
- {
- if (!table_being_updated->shared)
- continue;
+ table = cur_table->table;
+ if (table == main_table)
+ continue; // Already updated
- TABLE *table = table_being_updated->table;
- TABLE *tmp_table=tmp_tables[counter];
- if (tmp_table->file->extra(HA_EXTRA_NO_CACHE))
- {
- local_error=1;
- break;
- }
- List<Item> list;
- Field **ptr=tmp_table->field,*field;
- // This is supposed to be something like insert_fields
- thd->used_tables|=tmp_table->map;
- while ((field = *ptr++))
+ org_updated= updated;
+ byte *ref_pos;
+ TABLE *tmp_table= tmp_tables[cur_table->shared];
+ tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache
+ table->file->extra(HA_EXTRA_NO_CACHE);
+
+ /*
+ Setup copy functions to copy fields from temporary table
+ */
+ List_iterator_fast<Item> field_it(*fields_for_table[cur_table->shared]);
+ Field **field= tmp_table->field+1; // Skip row pointer
+ Copy_field *copy_field_ptr= copy_field, *copy_field_end;
+ for ( ; *field ; field++)
{
- list.push_back((Item *)new Item_field(field));
- if (field->query_id == thd->query_id)
- thd->dupp_field=field;
- field->query_id=thd->query_id;
- tmp_table->used_keys&=field->part_of_key;
+ Item_field *item= (Item_field* ) field_it++;
+ (copy_field_ptr++)->set(item->field, *field, 0);
}
- tmp_table->used_fields=tmp_table->fields;
- local_error=0;
- list.pop(); // we get position some other way ...
- local_error = tmp_table->file->rnd_init(1);
- if (local_error)
- return local_error;
- while (!(local_error=tmp_table->file->rnd_next(tmp_table->record[0])) &&
- (!thd->killed || from_send_error || not_trans_safe))
+ copy_field_end=copy_field_ptr;
+
+ if ((local_error = tmp_table->file->rnd_init(1)))
+ goto err;
+
+ ref_pos= (byte*) tmp_table->field[0]->ptr;
+ for (;;)
{
- found++;
- local_error= table->file->rnd_pos(table->record[0],
- (byte*) (*(tmp_table->field))->ptr);
- if (local_error)
- return local_error;
+ if (thd->killed && trans_safe)
+ goto err;
+ if ((local_error=tmp_table->file->rnd_next(tmp_table->record[0])))
+ {
+ if (local_error == HA_ERR_END_OF_FILE)
+ break;
+ if (local_error == HA_ERR_RECORD_DELETED)
+ continue; // May happen on dup key
+ goto err;
+ }
+ found++;
+ if ((local_error= table->file->rnd_pos(table->record[0], ref_pos)))
+ goto err;
table->status|= STATUS_UPDATED;
- store_record(table,1);
- local_error= (fill_record(*fields_by_tables[counter + 1],list) ||
- thd->net.report_error ||
- /* compare_record(table, query_id) || */
- table->file->update_row(table->record[1],
- table->record[0]));
- if (local_error)
+ store_record(table,1);
+
+ /* Copy data from temporary table to current table */
+ for (copy_field_ptr=copy_field;
+ copy_field_ptr != copy_field_end;
+ copy_field_ptr++)
+ (*copy_field_ptr->do_copy)(copy_field_ptr);
+
+ if (compare_record(table, thd->query_id))
{
- table->file->print_error(local_error,MYF(0));
- break;
+ if ((local_error=table->file->update_row(table->record[1],
+ table->record[0])))
+ {
+ if (local_error != HA_ERR_FOUND_DUPP_KEY ||
+ handle_duplicates != DUP_IGNORE)
+ goto err;
+ }
+ updated++;
+ if (table->tmp_table != NO_TMP_TABLE)
+ log_delayed= 1;
}
+ }
+
+ if (updated != org_updated)
+ {
+ if (table->tmp_table != NO_TMP_TABLE)
+ log_delayed= 1; // Tmp tables forces delay log
+ if (table->file->has_transactions())
+ log_delayed= transactional_tables= 1;
else
- updated++;
+ trans_safe= 0; // Can't do safe rollback
}
- if (local_error == HA_ERR_END_OF_FILE)
- local_error = 0;
}
- return local_error;
+ DBUG_RETURN(0);
+
+err:
+ if (!from_send_error)
+ table->file->print_error(local_error,MYF(0));
+
+ if (updated != org_updated)
+ {
+ if (table->tmp_table != NO_TMP_TABLE)
+ log_delayed= 1;
+ if (table->file->has_transactions())
+ log_delayed= transactional_tables= 1;
+ else
+ trans_safe= 0;
+ }
+ DBUG_RETURN(1);
}
@@ -798,61 +831,57 @@ int multi_update::do_updates (bool from_send_error)
bool multi_update::send_eof()
{
- thd->proc_info="updating the reference tables";
+ char buff[80];
+ thd->proc_info="updating reference tables";
/* Does updates for the last n - 1 tables, returns 0 if ok */
- int local_error = (num_updated) ? do_updates(false) : 0;
-
- /* reset used flags */
-#ifndef NOT_USED
- update_tables->table->no_keyread=0;
-#endif
- if (local_error == -1)
- local_error= 0;
+ int local_error = (table_count) ? do_updates(0) : 0;
thd->proc_info= "end";
- // TODO: Error should be sent at the query processing end
- if (local_error)
- send_error(local_error, "An error occured in multi-table update");
/*
Write the SQL statement to the binlog if we updated
- rows and we succeeded, or also in an error case when there
- was a non-transaction-safe table involved, since
- modifications in it cannot be rolled back.
+ rows and we succeeded or if we updated some non
+ transacational tables
*/
- if (updated || not_trans_safe)
+ if (updated && (local_error <= 0 || !trans_safe))
{
mysql_update_log.write(thd,thd->query,thd->query_length);
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0);
-
- /*
- mysql_bin_log is not open if binlogging or replication
- is not used
- */
+ if (mysql_bin_log.is_open())
+ {
+ Query_log_event qinfo(thd, thd->query, thd->query_length,
+ log_delayed);
+ if (mysql_bin_log.write(&qinfo) && trans_safe)
+ local_error= 1; // Rollback update
+ }
+ if (!log_delayed)
+ thd->options|=OPTION_STATUS_NO_TRANS_UPDATE;
+ }
- if (mysql_bin_log.is_open() && mysql_bin_log.write(&qinfo) &&
- !not_trans_safe)
- local_error=1; /* Log write failed: roll back the SQL statement */
+ if (transactional_tables)
+ {
+ if (ha_autocommit_or_rollback(thd, local_error != 0))
+ local_error=1;
+ }
- /* Commit or rollback the current SQL statement */
- VOID(ha_autocommit_or_rollback(thd, local_error > 0));
+ if (local_error > 0) // if the above log write did not fail ...
+ {
+ /* Safety: If we haven't got an error before (should not happen) */
+ my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update",
+ MYF(0));
+ ::send_error(&thd->net);
+ return 1;
}
- else
- local_error= 0; // this can happen only if it is end of file error
- if (!local_error) // if the above log write did not fail ...
+
+
+ sprintf(buff,ER(ER_UPDATE_INFO), (long) found, (long) updated,
+ (long) thd->cuted_fields);
+ if (updated)
{
- char buff[80];
- sprintf(buff,ER(ER_UPDATE_INFO), (long) found, (long) updated,
- (long) thd->cuted_fields);
- if (updated)
- {
- query_cache_invalidate3(thd, update_tables, 1);
- }
- ::send_ok(thd,
- (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated,
- thd->insert_id_used ? thd->insert_id() : 0L,buff);
+ query_cache_invalidate3(thd, update_tables, 1);
}
- thd->count_cuted_fields=0;
+ ::send_ok(&thd->net,
+ (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated,
+ thd->insert_id_used ? thd->insert_id() : 0L,buff);
return 0;
}
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 62090873178..3109aadca38 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -749,7 +749,10 @@ change:
LEX *lex = Lex;
lex->sql_command = SQLCOM_CHANGE_MASTER;
bzero((char*) &lex->mi, sizeof(lex->mi));
- } master_defs;
+ }
+ master_defs
+ {}
+ ;
master_defs:
master_def
@@ -830,7 +833,7 @@ create:
lex->create_info.table_charset=thd->db_charset?thd->db_charset:default_charset_info;
}
create2
-
+ {}
| CREATE opt_unique_or_fulltext INDEX ident key_alg ON table_ident
{
LEX *lex=Lex;
@@ -1382,8 +1385,9 @@ alter:
lex->alter_keys_onoff=LEAVE_AS_IS;
lex->simple_alter=1;
}
- alter_list;
-
+ alter_list
+ {}
+ ;
| ALTER DATABASE ident opt_db_default_character_set
{
LEX *lex=Lex;
@@ -1549,7 +1553,9 @@ repair:
lex->sql_command = SQLCOM_REPAIR;
lex->check_opt.init();
}
- table_list opt_mi_repair_type;
+ table_list opt_mi_repair_type
+ {}
+ ;
opt_mi_repair_type:
/* empty */ { Lex->check_opt.flags = T_MEDIUM; }
@@ -1571,7 +1577,9 @@ analyze:
lex->sql_command = SQLCOM_ANALYZE;
lex->check_opt.init();
}
- table_list opt_mi_check_type;
+ table_list opt_mi_check_type
+ {}
+ ;
check:
CHECK_SYM table_or_tables
@@ -1580,7 +1588,9 @@ check:
lex->sql_command = SQLCOM_CHECK;
lex->check_opt.init();
}
- table_list opt_mi_check_type;
+ table_list opt_mi_check_type
+ {}
+ ;
opt_mi_check_type:
/* empty */ { Lex->check_opt.flags = T_MEDIUM; }
@@ -1604,14 +1614,18 @@ optimize:
lex->sql_command = SQLCOM_OPTIMIZE;
lex->check_opt.init();
}
- table_list opt_mi_check_type;
+ table_list opt_mi_check_type
+ {}
+ ;
rename:
RENAME table_or_tables
{
Lex->sql_command=SQLCOM_RENAME_TABLE;
}
- table_to_table_list;
+ table_to_table_list
+ {}
+ ;
table_to_table_list:
table_to_table
@@ -1642,7 +1656,7 @@ select_init:
{
LEX *lex= Lex;
SELECT_LEX_NODE * sel= lex->current_select;
- if (sel->set_braces(true))
+ if (sel->set_braces(1))
{
send_error(lex->thd, ER_SYNTAX_ERROR);
YYABORT;
@@ -1656,7 +1670,7 @@ select_init2:
select_part2
{
LEX *lex= Lex;
- if (lex->current_select->set_braces(false))
+ if (lex->current_select->set_braces(0))
{
send_error(lex->thd, ER_SYNTAX_ERROR);
YYABORT;
@@ -2482,7 +2496,7 @@ join_table:
select_derived:
{
LEX *lex= Lex;
- lex->derived_tables= true;
+ lex->derived_tables= 1;
if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE ||
mysql_new_select(lex, 1))
YYABORT;
@@ -2618,7 +2632,7 @@ olap_opt:
| WITH CUBE_SYM
{
LEX *lex=Lex;
- lex->olap = true;
+ lex->olap= 1;
if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE)
{
net_printf(lex->thd, ER_WRONG_USAGE, "WITH CUBE",
@@ -2632,7 +2646,7 @@ olap_opt:
| WITH ROLLUP_SYM
{
LEX *lex= Lex;
- lex->olap= true;
+ lex->olap= 1;
if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE)
{
net_printf(lex->thd, ER_WRONG_USAGE, "WITH ROLLUP",
@@ -2699,6 +2713,7 @@ limit_clause:
}
}
limit_options
+ {}
;
limit_options:
@@ -2753,7 +2768,7 @@ procedure_clause:
lex->proc_list.elements=0;
lex->proc_list.first=0;
lex->proc_list.next= (byte**) &lex->proc_list.first;
- if (add_proc_to_list(new Item_field(NULL,NULL,$2.str)))
+ if (add_proc_to_list(lex->thd, new Item_field(NULL,NULL,$2.str)))
YYABORT;
Lex->safe_to_cache_query=0;
}
@@ -2771,10 +2786,11 @@ procedure_list2:
procedure_item:
remember_name expr
{
- if (add_proc_to_list($2))
+ LEX *lex= Lex;
+ if (add_proc_to_list(lex->thd, $2))
YYABORT;
if (!$2->name)
- $2->set_name($1,(uint) ((char*) Lex->tok_end - $1));
+ $2->set_name($1,(uint) ((char*) lex->tok_end - $1));
}
;
@@ -2842,7 +2858,10 @@ do: DO_SYM
if (!(lex->insert_list = new List_item))
YYABORT;
}
- values;
+ values
+ {}
+ ;
+
/*
Drop : delete tables or index
*/
@@ -2928,6 +2947,8 @@ replace:
Select->set_lock_for_tables($3);
}
insert_field_spec
+ {}
+ {}
;
insert_lock_option:
@@ -3122,13 +3143,15 @@ single_multi:
YYABORT;
}
where_clause opt_order_clause
- delete_limit_clause
+ delete_limit_clause {}
| table_wild_list
{ mysql_init_multi_delete(Lex); }
FROM join_table_list where_clause
| FROM table_wild_list
{ mysql_init_multi_delete(Lex); }
- USING join_table_list where_clause;
+ USING join_table_list where_clause
+ {}
+ ;
table_wild_list:
table_wild_one {}
@@ -3184,7 +3207,9 @@ show: SHOW
lex->wild=0;
bzero((char*) &lex->create_info,sizeof(lex->create_info));
}
- show_param;
+ show_param
+ {}
+ ;
show_param:
DATABASES wild
@@ -3354,13 +3379,13 @@ describe:
if (!Select->add_table_to_list($2, NULL,0))
YYABORT;
}
- opt_describe_column
+ opt_describe_column {}
| describe_command { Lex->describe=1; } select
{
LEX *lex=Lex;
lex->select_lex.options|= SELECT_DESCRIBE;
- };
-
+ }
+ ;
describe_command:
DESC
@@ -3381,14 +3406,16 @@ flush:
LEX *lex=Lex;
lex->sql_command= SQLCOM_FLUSH; lex->type=0;
}
- flush_options;
+ flush_options
+ {}
+ ;
flush_options:
flush_options ',' flush_option
| flush_option;
flush_option:
- table_or_tables { Lex->type|= REFRESH_TABLES; } opt_table_list
+ table_or_tables { Lex->type|= REFRESH_TABLES; } opt_table_list {}
| TABLES WITH READ_SYM LOCK_SYM { Lex->type|= REFRESH_TABLES | REFRESH_READ_LOCK; }
| QUERY_SYM CACHE_SYM { Lex->type|= REFRESH_QUERY_CACHE_FREE; }
| HOSTS_SYM { Lex->type|= REFRESH_HOSTS; }
@@ -3409,7 +3436,10 @@ reset:
{
LEX *lex=Lex;
lex->sql_command= SQLCOM_RESET; lex->type=0;
- } reset_options;
+ } reset_options
+ {}
+ ;
+
reset_options:
reset_options ',' reset_option
| reset_option;
@@ -3840,7 +3870,9 @@ set:
lex->option_type=OPT_DEFAULT;
lex->var_list.empty();
}
- option_value_list;
+ option_value_list
+ {}
+ ;
opt_option:
/* empty */ {}
@@ -3964,7 +3996,9 @@ lock:
{
Lex->sql_command=SQLCOM_LOCK_TABLES;
}
- table_lock_list;
+ table_lock_list
+ {}
+ ;
table_or_tables:
TABLE_SYM
@@ -4074,7 +4108,9 @@ revoke:
lex->ssl_cipher= lex->x509_subject= lex->x509_issuer= 0;
bzero((char*) &lex->mqh, sizeof(lex->mqh));
}
- grant_privileges ON opt_table FROM user_list;
+ grant_privileges ON opt_table FROM user_list
+ {}
+ ;
grant:
GRANT
@@ -4090,7 +4126,9 @@ grant:
bzero(&(lex->mqh),sizeof(lex->mqh));
}
grant_privileges ON opt_table TO_SYM user_list
- require_clause grant_options;
+ require_clause grant_options
+ {}
+ ;
grant_privileges:
grant_privilege_list {}
@@ -4103,10 +4141,10 @@ grant_privilege_list:
| grant_privilege_list ',' grant_privilege;
grant_privilege:
- SELECT_SYM { Lex->which_columns = SELECT_ACL;} opt_column_list
- | INSERT { Lex->which_columns = INSERT_ACL;} opt_column_list
- | UPDATE_SYM { Lex->which_columns = UPDATE_ACL; } opt_column_list
- | REFERENCES { Lex->which_columns = REFERENCES_ACL;} opt_column_list
+ SELECT_SYM { Lex->which_columns = SELECT_ACL;} opt_column_list {}
+ | INSERT { Lex->which_columns = INSERT_ACL;} opt_column_list {}
+ | UPDATE_SYM { Lex->which_columns = UPDATE_ACL; } opt_column_list {}
+ | REFERENCES { Lex->which_columns = REFERENCES_ACL;} opt_column_list {}
| DELETE_SYM { Lex->grant |= DELETE_ACL;}
| USAGE {}
| INDEX { Lex->grant |= INDEX_ACL;}
@@ -4333,7 +4371,8 @@ grant_option:
;
begin:
- BEGIN_SYM { Lex->sql_command = SQLCOM_BEGIN;} opt_work;
+ BEGIN_SYM { Lex->sql_command = SQLCOM_BEGIN;} opt_work {}
+ ;
opt_work:
/* empty */ {}
@@ -4376,7 +4415,7 @@ union_list:
YYABORT;
lex->current_select->linkage=UNION_TYPE;
}
- select_init
+ select_init {}
;
union_opt:
diff --git a/sql/table.h b/sql/table.h
index 18079e183ce..149cc6bca13 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -118,21 +118,22 @@ struct st_table {
table_map map; /* ID bit of table (1,2,4,8,16...) */
ulong version,flush_version;
uchar *null_flags;
- IO_CACHE *io_cache; /* If sorted trough file*/
- byte *record_pointers; /* If sorted in memory */
- ha_rows found_records; /* How many records in sort */
+ IO_CACHE *io_cache; /* If sorted trough filebyte *record_pointers; /* If sorted in memory */
+ ha_rows found_records; /* How many records in sort */
ORDER *group;
ha_rows quick_rows[MAX_KEY];
uint quick_key_parts[MAX_KEY];
key_part_map const_key_parts[MAX_KEY];
ulong query_id;
- uint temp_pool_slot;
-
+ union /* Temporary variables */
+ {
+ uint temp_pool_slot; /* Used by intern temp tables */
+ struct st_table_list *pos_in_table_list;
+ };
/* number of select if it is derived table */
uint derived_select_number;
-
- THD *in_use; /* Which thread uses this */
+ THD *in_use; /* Which thread uses this */
struct st_table *next,*prev;
};
@@ -161,10 +162,10 @@ typedef struct st_table_list
GRANT_INFO grant;
thr_lock_type lock_type;
uint outer_join; /* Which join type */
+ uint shared; /* Used in union or in multi-upd */
uint32 db_length, real_name_length;
bool straight; /* optimize with prev table */
bool updating; /* for replicate-do/ignore table */
- bool shared; /* Used twice in union */
bool do_redirect; /* To get the struct in UNION's */
} TABLE_LIST;
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 81310c4a863..a171ba42ff3 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -590,7 +590,7 @@ static bool make_empty_rec(File file,enum db_type table_type,
if (field->def &&
(regfield->real_type() != FIELD_TYPE_YEAR ||
field->def->val_int() != 0))
- (void) field->def->save_in_field(regfield);
+ (void) field->def->save_in_field(regfield, 1);
else if (regfield->real_type() == FIELD_TYPE_ENUM &&
(field->flags & NOT_NULL_FLAG))
{