summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <tomas@whalegate.ndb.mysql.com>2008-01-23 15:07:18 +0100
committerunknown <tomas@whalegate.ndb.mysql.com>2008-01-23 15:07:18 +0100
commit34f792c3e43318cbf90aafec1864805d3d7698e6 (patch)
treea18b86fdf094478434a9ba76aa7d95410d0ba170 /sql
parente95e266d6636270215721709bf060e8375773ee8 (diff)
parent44236981b35651b787f1a9b43cc8ebc714557a8e (diff)
downloadmariadb-git-34f792c3e43318cbf90aafec1864805d3d7698e6.tar.gz
Merge whalegate.ndb.mysql.com:/home/tomas/mysql-5.0-ndb
into whalegate.ndb.mysql.com:/home/tomas/mysql-5.0-ndb-merge
Diffstat (limited to 'sql')
-rw-r--r--sql/field.cc10
-rw-r--r--sql/field.h15
-rw-r--r--sql/filesort.cc26
-rw-r--r--sql/item.cc6
-rw-r--r--sql/item_sum.cc154
-rw-r--r--sql/item_sum.h28
-rw-r--r--sql/item_timefunc.cc4
-rw-r--r--sql/item_timefunc.h6
-rw-r--r--sql/log.cc4
-rw-r--r--sql/repl_failsafe.cc2
-rw-r--r--sql/sp.cc2
-rw-r--r--sql/sp_head.cc2
-rw-r--r--sql/sql_base.cc31
-rw-r--r--sql/sql_class.cc1
-rw-r--r--sql/sql_class.h37
-rw-r--r--sql/sql_select.cc2
-rw-r--r--sql/sql_yacc.yy36
-rw-r--r--sql/unireg.cc8
18 files changed, 257 insertions, 117 deletions
diff --git a/sql/field.cc b/sql/field.cc
index 3f00b6750bc..f1e2b6a4f27 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1304,7 +1304,8 @@ Field::Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,
field_name(field_name_arg),
query_id(0), key_start(0), part_of_key(0), part_of_sortkey(0),
unireg_check(unireg_check_arg),
- field_length(length_arg), null_bit(null_bit_arg)
+ field_length(length_arg), null_bit(null_bit_arg),
+ is_created_from_null_item(FALSE)
{
flags=null_ptr ? 0: NOT_NULL_FLAG;
comment.str= (char*) "";
@@ -5194,6 +5195,13 @@ String *Field_date::val_str(String *val_buffer,
}
+bool Field_date::get_time(MYSQL_TIME *ltime)
+{
+ bzero((char *)ltime, sizeof(MYSQL_TIME));
+ return 0;
+}
+
+
int Field_date::cmp(const char *a_ptr, const char *b_ptr)
{
int32 a,b;
diff --git a/sql/field.h b/sql/field.h
index 05c22092e53..d681229a9fd 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -89,6 +89,16 @@ public:
uint field_index; // field number in fields array
uint16 flags;
uchar null_bit; // Bit used to test null bit
+ /**
+ If true, this field was created in create_tmp_field_from_item from a NULL
+ value. This means that the type of the field is just a guess, and the type
+ may be freely coerced to another type.
+
+ @see create_tmp_field_from_item
+ @see Item_type_holder::get_real_type
+
+ */
+ bool is_created_from_null_item;
Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,uchar null_bit_arg,
utype unireg_check_arg, const char *field_name_arg,
@@ -934,6 +944,7 @@ public:
double val_real(void);
longlong val_int(void);
String *val_str(String*,String *);
+ bool get_time(MYSQL_TIME *ltime);
bool send_binary(Protocol *protocol);
int cmp(const char *,const char*);
void sort_string(char *buff,uint length);
@@ -951,6 +962,10 @@ public:
:Field_str(ptr_arg, 10, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg, cs)
{}
+ Field_newdate(bool maybe_null_arg, const char *field_name_arg,
+ struct st_table *table_arg, CHARSET_INFO *cs)
+ :Field_str((char*) 0,10, maybe_null_arg ? (uchar*) "": 0,0,
+ NONE, field_name_arg, table_arg, cs) {}
enum_field_types type() const { return FIELD_TYPE_DATE;}
enum_field_types real_type() const { return FIELD_TYPE_NEWDATE; }
enum ha_base_keytype key_type() const { return HA_KEYTYPE_UINT24; }
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 08ffa2211fa..43b079e83d5 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -37,7 +37,8 @@ if (my_b_write((file),(byte*) (from),param->ref_length)) \
static char **make_char_array(char **old_pos, register uint fields,
uint length, myf my_flag);
-static BUFFPEK *read_buffpek_from_file(IO_CACHE *buffer_file, uint count);
+static byte *read_buffpek_from_file(IO_CACHE *buffer_file, uint count,
+ byte *buf);
static ha_rows find_all_keys(SORTPARAM *param,SQL_SELECT *select,
uchar * *sort_keys, IO_CACHE *buffer_file,
IO_CACHE *tempfile,IO_CACHE *indexfile);
@@ -238,9 +239,14 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
}
else
{
- if (!table_sort.buffpek && table_sort.buffpek_len < maxbuffer &&
- !(table_sort.buffpek=
- (byte *) read_buffpek_from_file(&buffpek_pointers, maxbuffer)))
+ if (table_sort.buffpek && table_sort.buffpek_len < maxbuffer)
+ {
+ x_free(table_sort.buffpek);
+ table_sort.buffpek= 0;
+ }
+ if (!(table_sort.buffpek=
+ read_buffpek_from_file(&buffpek_pointers, maxbuffer,
+ table_sort.buffpek)))
goto err;
buffpek= (BUFFPEK *) table_sort.buffpek;
table_sort.buffpek_len= maxbuffer;
@@ -368,18 +374,20 @@ static char **make_char_array(char **old_pos, register uint fields,
/* Read 'count' number of buffer pointers into memory */
-static BUFFPEK *read_buffpek_from_file(IO_CACHE *buffpek_pointers, uint count)
+static byte *read_buffpek_from_file(IO_CACHE *buffpek_pointers, uint count,
+ byte *buf)
{
- ulong length;
- BUFFPEK *tmp;
+ ulong length= sizeof(BUFFPEK)*count;
+ byte *tmp= buf;
DBUG_ENTER("read_buffpek_from_file");
if (count > UINT_MAX/sizeof(BUFFPEK))
return 0; /* sizeof(BUFFPEK)*count will overflow */
- tmp=(BUFFPEK*) my_malloc(length=sizeof(BUFFPEK)*count, MYF(MY_WME));
+ if (!tmp)
+ tmp= (byte *)my_malloc(length, MYF(MY_WME));
if (tmp)
{
if (reinit_io_cache(buffpek_pointers,READ_CACHE,0L,0,0) ||
- my_b_read(buffpek_pointers, (byte*) tmp, length))
+ my_b_read(buffpek_pointers, tmp, length))
{
my_free((char*) tmp, MYF(0));
tmp=0;
diff --git a/sql/item.cc b/sql/item.cc
index 975b517284f..cafa40ecbb0 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -4384,7 +4384,7 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table)
name, table, 0, unsigned_flag);
case MYSQL_TYPE_NEWDATE:
case MYSQL_TYPE_DATE:
- return new Field_date(maybe_null, name, table, &my_charset_bin);
+ return new Field_newdate(maybe_null, name, table, &my_charset_bin);
case MYSQL_TYPE_TIME:
return new Field_time(maybe_null, name, table, &my_charset_bin);
case MYSQL_TYPE_TIMESTAMP:
@@ -6608,6 +6608,8 @@ enum_field_types Item_type_holder::get_real_type(Item *item)
*/
Field *field= ((Item_field *) item)->field;
enum_field_types type= field->real_type();
+ if (field->is_created_from_null_item)
+ return MYSQL_TYPE_NULL;
/* work around about varchar type field detection */
if (type == MYSQL_TYPE_STRING && field->type() == MYSQL_TYPE_VAR_STRING)
return MYSQL_TYPE_VAR_STRING;
@@ -6859,6 +6861,8 @@ Field *Item_type_holder::make_field_by_type(TABLE *table)
Field::NONE, name,
table, get_set_pack_length(enum_set_typelib->count),
enum_set_typelib, collation.collation);
+ case MYSQL_TYPE_NULL:
+ return make_string_field(table);
default:
break;
}
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index ad8ebd0791c..3d261dc2c36 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -628,7 +628,7 @@ Field *Item_sum_hybrid::create_tmp_field(bool group, TABLE *table,
*/
switch (args[0]->field_type()) {
case MYSQL_TYPE_DATE:
- return new Field_date(maybe_null, name, table, collation.collation);
+ return new Field_newdate(maybe_null, name, table, collation.collation);
case MYSQL_TYPE_TIME:
return new Field_time(maybe_null, name, table, collation.collation);
case MYSQL_TYPE_TIMESTAMP:
@@ -2831,44 +2831,51 @@ String *Item_sum_udf_str::val_str(String *str)
concat of values from "group by" operation
BUGS
- DISTINCT and ORDER BY only works if ORDER BY uses all fields and only fields
- in expression list
Blobs doesn't work with DISTINCT or ORDER BY
*****************************************************************************/
-/*
- function of sort for syntax:
- GROUP_CONCAT(DISTINCT expr,...)
+
+
+/**
+ Compares the values for fields in expr list of GROUP_CONCAT.
+ @note
+
+ GROUP_CONCAT([DISTINCT] expr [,expr ...]
+ [ORDER BY {unsigned_integer | col_name | expr}
+ [ASC | DESC] [,col_name ...]]
+ [SEPARATOR str_val])
+
+ @return
+ @retval -1 : key1 < key2
+ @retval 0 : key1 = key2
+ @retval 1 : key1 > key2
*/
-int group_concat_key_cmp_with_distinct(void* arg, byte* key1,
- byte* key2)
+int group_concat_key_cmp_with_distinct(void* arg, const void* key1,
+ const void* key2)
{
- Item_func_group_concat* grp_item= (Item_func_group_concat*)arg;
- TABLE *table= grp_item->table;
- Item **field_item, **end;
+ Item_func_group_concat *item_func= (Item_func_group_concat*)arg;
+ TABLE *table= item_func->table;
- for (field_item= grp_item->args, end= field_item + grp_item->arg_count_field;
- field_item < end;
- field_item++)
+ for (uint i= 0; i < item_func->arg_count_field; i++)
{
+ Item *item= item_func->args[i];
+ /*
+ If field_item is a const item then either get_tp_table_field returns 0
+ or it is an item over a const table.
+ */
+ if (item->const_item())
+ continue;
/*
We have to use get_tmp_table_field() instead of
real_item()->get_tmp_table_field() because we want the field in
the temporary table, not the original field
*/
- Field *field= (*field_item)->get_tmp_table_field();
- /*
- If field_item is a const item then either get_tp_table_field returns 0
- or it is an item over a const table.
- */
- if (field && !(*field_item)->const_item())
- {
- int res;
- uint offset= field->offset() - table->s->null_bytes;
- if ((res= field->cmp((char *) key1 + offset, (char *) key2 + offset)))
- return res;
- }
+ Field *field= item->get_tmp_table_field();
+ int res;
+ uint offset= field->offset()-table->s->null_bytes;
+ if((res= field->cmp((char*)key1 + offset, (char*)key2 + offset)))
+ return res;
}
return 0;
}
@@ -2879,7 +2886,8 @@ int group_concat_key_cmp_with_distinct(void* arg, byte* key1,
GROUP_CONCAT(expr,... ORDER BY col,... )
*/
-int group_concat_key_cmp_with_order(void* arg, byte* key1, byte* key2)
+int group_concat_key_cmp_with_order(void* arg, const void* key1,
+ const void* key2)
{
Item_func_group_concat* grp_item= (Item_func_group_concat*) arg;
ORDER **order_item, **end;
@@ -2918,25 +2926,6 @@ int group_concat_key_cmp_with_order(void* arg, byte* key1, byte* key2)
/*
- function of sort for syntax:
- GROUP_CONCAT(DISTINCT expr,... ORDER BY col,... )
-
- BUG:
- This doesn't work in the case when the order by contains data that
- is not part of the field list because tree-insert will not notice
- the duplicated values when inserting things sorted by ORDER BY
-*/
-
-int group_concat_key_cmp_with_distinct_and_order(void* arg,byte* key1,
- byte* key2)
-{
- if (!group_concat_key_cmp_with_distinct(arg,key1,key2))
- return 0;
- return(group_concat_key_cmp_with_order(arg,key1,key2));
-}
-
-
-/*
Append data from current leaf to item->result
*/
@@ -3020,7 +3009,7 @@ Item_func_group_concat(Name_resolution_context *context_arg,
bool distinct_arg, List<Item> *select_list,
SQL_LIST *order_list, String *separator_arg)
:tmp_table_param(0), warning(0),
- separator(separator_arg), tree(0), table(0),
+ separator(separator_arg), tree(0), unique_filter(NULL), table(0),
order(0), context(context_arg),
arg_count_order(order_list ? order_list->elements : 0),
arg_count_field(select_list->elements),
@@ -3075,6 +3064,7 @@ Item_func_group_concat::Item_func_group_concat(THD *thd,
warning(item->warning),
separator(item->separator),
tree(item->tree),
+ unique_filter(item->unique_filter),
table(item->table),
order(item->order),
context(item->context),
@@ -3125,6 +3115,11 @@ void Item_func_group_concat::cleanup()
delete_tree(tree);
tree= 0;
}
+ if (unique_filter)
+ {
+ delete unique_filter;
+ unique_filter= NULL;
+ }
if (warning)
{
char warn_buff[MYSQL_ERRMSG_SIZE];
@@ -3154,6 +3149,8 @@ void Item_func_group_concat::clear()
no_appended= TRUE;
if (tree)
reset_tree(tree);
+ if (distinct)
+ unique_filter->reset();
/* No need to reset the table as we never call write_row */
}
@@ -3177,9 +3174,19 @@ bool Item_func_group_concat::add()
}
null_value= FALSE;
+ bool row_eligible= TRUE;
+
+ if (distinct)
+ {
+ /* Filter out duplicate rows. */
+ uint count= unique_filter->elements_in_tree();
+ unique_filter->unique_add(table->record[0] + table->s->null_bytes);
+ if (count == unique_filter->elements_in_tree())
+ row_eligible= FALSE;
+ }
TREE_ELEMENT *el= 0; // Only for safety
- if (tree)
+ if (row_eligible && tree)
el= tree_insert(tree, table->record[0] + table->s->null_bytes, 0,
tree->custom_arg);
/*
@@ -3187,7 +3194,7 @@ bool Item_func_group_concat::add()
we can dump the row here in case of GROUP_CONCAT(DISTINCT...)
instead of doing tree traverse later.
*/
- if (!warning_for_row &&
+ if (row_eligible && !warning_for_row &&
(!tree || (el->count == 1 && distinct && !arg_count_order)))
dump_leaf_key(table->record[0] + table->s->null_bytes, 1, this);
@@ -3263,7 +3270,6 @@ bool Item_func_group_concat::setup(THD *thd)
{
List<Item> list;
SELECT_LEX *select_lex= thd->lex->current_select;
- qsort_cmp2 compare_key;
DBUG_ENTER("Item_func_group_concat::setup");
/*
@@ -3353,38 +3359,33 @@ bool Item_func_group_concat::setup(THD *thd)
table->file->extra(HA_EXTRA_NO_ROWS);
table->no_rows= 1;
+ /*
+ Need sorting or uniqueness: init tree and choose a function to sort.
+ Don't reserve space for NULLs: if any of gconcat arguments is NULL,
+ the row is not added to the result.
+ */
+ uint tree_key_length= table->s->reclength - table->s->null_bytes;
- if (distinct || arg_count_order)
+ if (arg_count_order)
{
- /*
- Need sorting: init tree and choose a function to sort.
- Don't reserve space for NULLs: if any of gconcat arguments is NULL,
- the row is not added to the result.
- */
- uint tree_key_length= table->s->reclength - table->s->null_bytes;
-
tree= &tree_base;
- if (arg_count_order)
- {
- if (distinct)
- compare_key= (qsort_cmp2) group_concat_key_cmp_with_distinct_and_order;
- else
- compare_key= (qsort_cmp2) group_concat_key_cmp_with_order;
- }
- else
- {
- compare_key= (qsort_cmp2) group_concat_key_cmp_with_distinct;
- }
/*
- Create a tree for sorting. The tree is used to sort and to remove
- duplicate values (according to the syntax of this function). If there
- is no DISTINCT or ORDER BY clauses, we don't create this tree.
+ Create a tree for sorting. The tree is used to sort (according to the
+ syntax of this function). If there is no ORDER BY clause, we don't
+ create this tree.
*/
init_tree(tree, (uint) min(thd->variables.max_heap_table_size,
thd->variables.sortbuff_size/16), 0,
- tree_key_length, compare_key, 0, NULL, (void*) this);
+ tree_key_length,
+ group_concat_key_cmp_with_order , 0, NULL, (void*) this);
}
+ if (distinct)
+ unique_filter= new Unique(group_concat_key_cmp_with_distinct,
+ (void*)this,
+ tree_key_length,
+ thd->variables.max_heap_table_size);
+
DBUG_RETURN(FALSE);
}
@@ -3454,3 +3455,10 @@ void Item_func_group_concat::print(String *str)
str->append(*separator);
str->append(STRING_WITH_LEN("\')"));
}
+
+
+Item_func_group_concat::~Item_func_group_concat()
+{
+ if (unique_filter)
+ delete unique_filter;
+}
diff --git a/sql/item_sum.h b/sql/item_sum.h
index d18454cc3b8..bf0abe53eea 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -1173,11 +1173,22 @@ class Item_func_group_concat : public Item_sum
String *separator;
TREE tree_base;
TREE *tree;
+
+ /**
+ If DISTINCT is used with this GROUP_CONCAT, this member is used to filter
+ out duplicates.
+ @see Item_func_group_concat::setup
+ @see Item_func_group_concat::add
+ @see Item_func_group_concat::clear
+ */
+ Unique *unique_filter;
TABLE *table;
ORDER **order;
Name_resolution_context *context;
- uint arg_count_order; // total count of ORDER BY items
- uint arg_count_field; // count of arguments
+ /** The number of ORDER BY items. */
+ uint arg_count_order;
+ /** The number of selected items, aka the expr list. */
+ uint arg_count_field;
uint count_cut_values;
bool distinct;
bool warning_for_row;
@@ -1190,13 +1201,10 @@ class Item_func_group_concat : public Item_sum
*/
Item_func_group_concat *original;
- friend int group_concat_key_cmp_with_distinct(void* arg, byte* key1,
- byte* key2);
- friend int group_concat_key_cmp_with_order(void* arg, byte* key1,
- byte* key2);
- friend int group_concat_key_cmp_with_distinct_and_order(void* arg,
- byte* key1,
- byte* key2);
+ friend int group_concat_key_cmp_with_distinct(void* arg, const void* key1,
+ const void* key2);
+ friend int group_concat_key_cmp_with_order(void* arg, const void* key1,
+ const void* key2);
friend int dump_leaf_key(byte* key,
element_count count __attribute__((unused)),
Item_func_group_concat *group_concat_item);
@@ -1207,7 +1215,7 @@ public:
SQL_LIST *is_order, String *is_separator);
Item_func_group_concat(THD *thd, Item_func_group_concat *item);
- ~Item_func_group_concat() {}
+ ~Item_func_group_concat();
void cleanup();
enum Sumfunctype sum_func () const {return GROUP_CONCAT_FUNC;}
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 4ddc788b182..0cb3c963dad 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -3303,7 +3303,7 @@ Field *Item_func_str_to_date::tmp_table_field(TABLE *t_arg)
if (cached_field_type == MYSQL_TYPE_TIME)
return (new Field_time(maybe_null, name, t_arg, &my_charset_bin));
if (cached_field_type == MYSQL_TYPE_DATE)
- return (new Field_date(maybe_null, name, t_arg, &my_charset_bin));
+ return (new Field_newdate(maybe_null, name, t_arg, &my_charset_bin));
if (cached_field_type == MYSQL_TYPE_DATETIME)
return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin));
return (new Field_string(max_length, maybe_null, name, t_arg, &my_charset_bin));
@@ -3415,6 +3415,8 @@ bool Item_func_last_day::get_date(MYSQL_TIME *ltime, uint fuzzy_date)
ltime->day= days_in_month[month_idx];
if ( month_idx == 1 && calc_days_in_year(ltime->year) == 366)
ltime->day= 29;
+ ltime->hour= ltime->minute= ltime->second= 0;
+ ltime->second_part= 0;
ltime->time_type= MYSQL_TIMESTAMP_DATE;
return 0;
}
diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h
index b647e93b700..7960c03d2e5 100644
--- a/sql/item_timefunc.h
+++ b/sql/item_timefunc.h
@@ -340,7 +340,7 @@ public:
}
Field *tmp_table_field(TABLE *t_arg)
{
- return (new Field_date(maybe_null, name, t_arg, &my_charset_bin));
+ return (new Field_newdate(maybe_null, name, t_arg, &my_charset_bin));
}
bool result_as_longlong() { return TRUE; }
my_decimal *val_decimal(my_decimal *decimal_value)
@@ -784,7 +784,7 @@ public:
enum_field_types field_type() const { return MYSQL_TYPE_DATE; }
Field *tmp_table_field(TABLE *t_arg)
{
- return (new Field_date(maybe_null, name, t_arg, &my_charset_bin));
+ return (new Field_newdate(maybe_null, name, t_arg, &my_charset_bin));
}
void fix_length_and_dec()
{
@@ -884,7 +884,7 @@ public:
}
Field *tmp_table_field(TABLE *t_arg)
{
- return (new Field_date(maybe_null, name, t_arg, &my_charset_bin));
+ return (new Field_newdate(maybe_null, name, t_arg, &my_charset_bin));
}
longlong val_int();
my_decimal *val_decimal(my_decimal *decimal_value)
diff --git a/sql/log.cc b/sql/log.cc
index af03cecd462..e66d965c613 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -1284,10 +1284,10 @@ err:
void MYSQL_LOG::make_log_name(char* buf, const char* log_ident)
{
uint dir_len = dirname_length(log_file_name);
- if (dir_len > FN_REFLEN)
+ if (dir_len >= FN_REFLEN)
dir_len=FN_REFLEN-1;
strnmov(buf, log_file_name, dir_len);
- strmake(buf+dir_len, log_ident, FN_REFLEN - dir_len);
+ strmake(buf+dir_len, log_ident, FN_REFLEN - dir_len -1);
}
diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc
index 1dc16b6e566..896315ec82f 100644
--- a/sql/repl_failsafe.cc
+++ b/sql/repl_failsafe.cc
@@ -922,7 +922,7 @@ bool load_master_data(THD* thd)
0, (SLAVE_IO | SLAVE_SQL)))
my_message(ER_MASTER_INFO, ER(ER_MASTER_INFO), MYF(0));
strmake(active_mi->master_log_name, row[0],
- sizeof(active_mi->master_log_name));
+ sizeof(active_mi->master_log_name) -1);
active_mi->master_log_pos= my_strtoll10(row[1], (char**) 0, &error_2);
/* at least in recent versions, the condition below should be false */
if (active_mi->master_log_pos < BIN_LOG_HEADER_SIZE)
diff --git a/sql/sp.cc b/sql/sp.cc
index 0b84e1ad07f..f8b039626f9 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -1895,7 +1895,7 @@ sp_use_new_db(THD *thd, LEX_STRING new_db, LEX_STRING *old_db,
if (thd->db)
{
- old_db->length= (strmake(old_db->str, thd->db, old_db->length) -
+ old_db->length= (strmake(old_db->str, thd->db, old_db->length - 1) -
old_db->str);
}
else
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index e55ba81b117..e8ddacb820e 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -453,7 +453,7 @@ sp_head::operator new(size_t size) throw()
}
void
-sp_head::operator delete(void *ptr, size_t size)
+sp_head::operator delete(void *ptr, size_t size) throw()
{
DBUG_ENTER("sp_head::operator delete");
MEM_ROOT own_root;
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index e79e4db31d2..8190b3eeacd 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -4254,7 +4254,36 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter,
*resolution= RESOLVED_IGNORING_ALIAS;
break;
}
- }
+ }
+ else if (table_name && item->type() == Item::REF_ITEM &&
+ ((Item_ref *)item)->ref_type() == Item_ref::VIEW_REF)
+ {
+ /*
+ TODO:Here we process prefixed view references only. What we should
+ really do is process all types of Item_refs. But this will currently
+ lead to a clash with the way references to outer SELECTs (from the
+ HAVING clause) are handled in e.g. :
+ SELECT 1 FROM t1 AS t1_o GROUP BY a
+ HAVING (SELECT t1_o.a FROM t1 AS t1_i GROUP BY t1_i.a LIMIT 1).
+ Processing all Item_refs here will cause t1_o.a to resolve to itself.
+ We still need to process the special case of Item_direct_view_ref
+ because in the context of views they have the same meaning as
+ Item_field for tables.
+ */
+ Item_ident *item_ref= (Item_ident *) item;
+ if (item_ref->name && item_ref->table_name &&
+ !my_strcasecmp(system_charset_info, item_ref->name, field_name) &&
+ !my_strcasecmp(table_alias_charset, item_ref->table_name,
+ table_name) &&
+ (!db_name || (item_ref->db_name &&
+ !strcmp (item_ref->db_name, db_name))))
+ {
+ found= li.ref();
+ *counter= i;
+ *resolution= RESOLVED_IGNORING_ALIAS;
+ break;
+ }
+ }
}
if (!found)
{
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 93f5a34d5c6..e36ed0c425f 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -931,6 +931,7 @@ void THD::rollback_item_tree_changes()
select_result::select_result()
{
thd=current_thd;
+ nest_level= -1;
}
void select_result::send_error(uint errcode,const char *err)
diff --git a/sql/sql_class.h b/sql/sql_class.h
index daeb16de7e0..97f2d07b1d3 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1912,6 +1912,7 @@ class select_result :public Sql_alloc {
protected:
THD *thd;
SELECT_LEX_UNIT *unit;
+ uint nest_level;
public:
select_result();
virtual ~select_result() {};
@@ -1948,6 +1949,12 @@ public:
*/
virtual void cleanup();
void set_thd(THD *thd_arg) { thd= thd_arg; }
+ /**
+ The nest level, if supported.
+ @return
+ -1 if nest level is undefined, otherwise a positive integer.
+ */
+ int get_nest_level() { return nest_level; }
#ifdef EMBEDDED_LIBRARY
virtual void begin_dataset() {}
#else
@@ -2034,7 +2041,14 @@ class select_export :public select_to_file {
bool is_unsafe_field_sep;
bool fixed_row_size;
public:
- select_export(sql_exchange *ex) :select_to_file(ex) {}
+ /**
+ Creates a select_export to represent INTO OUTFILE <filename> with a
+ defined level of subquery nesting.
+ */
+ select_export(sql_exchange *ex, uint nest_level_arg) :select_to_file(ex)
+ {
+ nest_level= nest_level_arg;
+ }
~select_export();
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
bool send_data(List<Item> &items);
@@ -2043,7 +2057,15 @@ public:
class select_dump :public select_to_file {
public:
- select_dump(sql_exchange *ex) :select_to_file(ex) {}
+ /**
+ Creates a select_export to represent INTO DUMPFILE <filename> with a
+ defined level of subquery nesting.
+ */
+ select_dump(sql_exchange *ex, uint nest_level_arg) :
+ select_to_file(ex)
+ {
+ nest_level= nest_level_arg;
+ }
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
bool send_data(List<Item> &items);
};
@@ -2461,7 +2483,16 @@ class select_dumpvar :public select_result_interceptor {
ha_rows row_count;
public:
List<my_var> var_list;
- select_dumpvar() { var_list.empty(); row_count= 0;}
+ /**
+ Creates a select_dumpvar to represent INTO <variable> with a defined
+ level of subquery nesting.
+ */
+ select_dumpvar(uint nest_level_arg)
+ {
+ var_list.empty();
+ row_count= 0;
+ nest_level= nest_level_arg;
+ }
~select_dumpvar() {}
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
bool send_data(List<Item> &items);
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index d6cb072ce9b..17b6a4a44ab 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -9041,6 +9041,8 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
*((*copy_func)++) = item; // Save for copy_funcs
if (modify_item)
item->set_result_field(new_field);
+ if (item->type() == Item::NULL_ITEM)
+ new_field->is_created_from_null_item= TRUE;
return new_field;
}
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index bd85ce55edb..c4aca1df7ec 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -4211,6 +4211,14 @@ select_paren:
my_parse_error(ER(ER_SYNTAX_ERROR));
MYSQL_YYABORT;
}
+ if (sel->linkage == UNION_TYPE &&
+ sel->olap != UNSPECIFIED_OLAP_TYPE &&
+ sel->master_unit()->fake_select_lex)
+ {
+ my_error(ER_WRONG_USAGE, MYF(0),
+ "CUBE/ROLLUP", "ORDER BY");
+ MYSQL_YYABORT;
+ }
/* select in braces, can't contain global parameters */
if (sel->master_unit()->fake_select_lex)
sel->master_unit()->global_parameters=
@@ -6165,7 +6173,8 @@ order_clause:
SELECT_LEX *sel= lex->current_select;
SELECT_LEX_UNIT *unit= sel-> master_unit();
if (sel->linkage != GLOBAL_OPTIONS_TYPE &&
- sel->olap != UNSPECIFIED_OLAP_TYPE)
+ sel->olap != UNSPECIFIED_OLAP_TYPE &&
+ (sel->linkage != UNION_TYPE || sel->braces))
{
my_error(ER_WRONG_USAGE, MYF(0),
"CUBE/ROLLUP", "ORDER BY");
@@ -6341,7 +6350,8 @@ procedure_item:
select_var_list_init:
{
LEX *lex=Lex;
- if (!lex->describe && (!(lex->result= new select_dumpvar())))
+ if (!lex->describe &&
+ (!(lex->result= new select_dumpvar(lex->nest_level))))
MYSQL_YYABORT;
}
select_var_list
@@ -6415,7 +6425,7 @@ into_destination:
LEX *lex= Lex;
lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
if (!(lex->exchange= new sql_exchange($2.str, 0)) ||
- !(lex->result= new select_export(lex->exchange)))
+ !(lex->result= new select_export(lex->exchange, lex->nest_level)))
MYSQL_YYABORT;
}
opt_field_term opt_line_term
@@ -6427,7 +6437,7 @@ into_destination:
lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
if (!(lex->exchange= new sql_exchange($2.str,1)))
MYSQL_YYABORT;
- if (!(lex->result= new select_dump(lex->exchange)))
+ if (!(lex->result= new select_dump(lex->exchange, lex->nest_level)))
MYSQL_YYABORT;
}
}
@@ -9418,12 +9428,18 @@ union_list:
UNION_SYM union_option
{
LEX *lex=Lex;
- if (lex->result)
- {
- /* Only the last SELECT can have INTO...... */
- my_error(ER_WRONG_USAGE, MYF(0), "UNION", "INTO");
- MYSQL_YYABORT;
- }
+ if (lex->result &&
+ (lex->result->get_nest_level() == -1 ||
+ lex->result->get_nest_level() == lex->nest_level))
+ {
+ /*
+ Only the last SELECT can have INTO unless the INTO and UNION
+ are at different nest levels. In version 5.1 and above, INTO
+ will onle be allowed at top level.
+ */
+ my_error(ER_WRONG_USAGE, MYF(0), "UNION", "INTO");
+ MYSQL_YYABORT;
+ }
if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE)
{
my_parse_error(ER(ER_SYNTAX_ERROR));
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 9e6c77d7b62..b581ad4655a 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -165,6 +165,14 @@ bool mysql_create_frm(THD *thd, my_string file_name,
strmake((char*) forminfo+47, create_info->comment.str ?
create_info->comment.str : "", create_info->comment.length);
forminfo[46]=(uchar) create_info->comment.length;
+#ifdef EXTRA_DEBUG
+ /*
+ EXTRA_DEBUG causes strmake() to initialize its buffer behind the
+ payload with a magic value to detect wrong buffer-sizes. We
+ explicitly zero that segment again.
+ */
+ memset((char*) forminfo+47 + forminfo[46], 0, 61 - forminfo[46]);
+#endif
if (my_pwrite(file,(byte*) fileinfo,64,0L,MYF_RW) ||
my_pwrite(file,(byte*) keybuff,key_info_length,
(ulong) uint2korr(fileinfo+6),MYF_RW))