summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <gluh@gluh.mysql.r18.ru>2004-04-05 19:14:31 +0500
committerunknown <gluh@gluh.mysql.r18.ru>2004-04-05 19:14:31 +0500
commit66de313563a74099d416767fec884ab590530d31 (patch)
tree7d76f49148e3ca862c1ca481ac7ed91bf5d117d8 /sql
parent8c06c8c01025668cdfa6ae66f25060b012ef2347 (diff)
parent829902b028765b0f17927ea72d35b020f6d8afad (diff)
downloadmariadb-git-66de313563a74099d416767fec884ab590530d31.tar.gz
Merge sgluhov@bk-internal.mysql.com:/home/bk/mysql-4.1
into gluh.mysql.r18.ru:/home/gluh/mysql-4.1.curr sql/sql_yacc.yy: Auto merged
Diffstat (limited to 'sql')
-rw-r--r--sql/derror.cc4
-rw-r--r--sql/field.cc11
-rw-r--r--sql/field.h1
-rw-r--r--sql/ha_innodb.cc13
-rw-r--r--sql/item.cc1
-rw-r--r--sql/item_cmpfunc.cc4
-rw-r--r--sql/item_func.cc3
-rw-r--r--sql/item_geofunc.cc16
-rw-r--r--sql/item_sum.cc277
-rw-r--r--sql/item_sum.h85
-rw-r--r--sql/log.cc1
-rw-r--r--sql/log_event.cc2
-rw-r--r--sql/mysqld.cc3
-rw-r--r--sql/protocol.cc2
-rw-r--r--sql/set_var.cc3
-rw-r--r--sql/slave.cc31
-rw-r--r--sql/sql_cache.cc4
-rw-r--r--sql/sql_derived.cc1
-rw-r--r--sql/sql_insert.cc6
-rw-r--r--sql/sql_lex.cc2
-rw-r--r--sql/sql_lex.h2
-rw-r--r--sql/sql_parse.cc17
-rw-r--r--sql/sql_prepare.cc3
-rw-r--r--sql/sql_select.cc35
-rw-r--r--sql/sql_show.cc7
-rw-r--r--sql/sql_yacc.yy13
26 files changed, 299 insertions, 248 deletions
diff --git a/sql/derror.cc b/sql/derror.cc
index 53d0dc5b7e5..09f43d20044 100644
--- a/sql/derror.cc
+++ b/sql/derror.cc
@@ -50,7 +50,7 @@ static bool read_texts(const char *file_name,const char ***point,
char name[FN_REFLEN];
const char *buff;
uchar head[32],*pos;
- CHARSET_INFO *cset;
+ CHARSET_INFO *cset; // For future
DBUG_ENTER("read_texts");
*point=0; // If something goes wrong
@@ -137,7 +137,7 @@ err1:
if (file != FERR)
VOID(my_close(file,MYF(MY_WME)));
unireg_abort(1);
- DBUG_RETURN(1); // Impossible
+ DBUG_RETURN(1); // keep compiler happy
} /* read_texts */
diff --git a/sql/field.cc b/sql/field.cc
index 238d5e36147..8408bfdf578 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -161,13 +161,6 @@ static bool test_if_real(const char *str,int length, CHARSET_INFO *cs)
}
#endif
-static inline uint field_length_without_space(const char *ptr, uint length)
-{
- const char *end= ptr+length;
- while (end > ptr && end[-1] == ' ')
- end--;
- return (uint) (end-ptr);
-}
/*
Tables of filed type compatibility.
@@ -306,7 +299,7 @@ Field::Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,
field_name(field_name_arg),
query_id(0), key_start(0), part_of_key(0), part_of_sortkey(0),
unireg_check(unireg_check_arg),
- field_length(length_arg),null_bit(null_bit_arg),abs_offset(0)
+ field_length(length_arg),null_bit(null_bit_arg)
{
flags=null_ptr ? 0: NOT_NULL_FLAG;
comment.str= (char*) "";
@@ -5597,7 +5590,7 @@ uint32 calc_pack_length(enum_field_types type,uint32 length)
case FIELD_TYPE_ENUM: abort(); return 0; // This shouldn't happen
default: return 0;
}
- return 0; // This shouldn't happen
+ return 0; // Keep compiler happy
}
diff --git a/sql/field.h b/sql/field.h
index 258c18257f0..75bb96f2f6d 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -87,7 +87,6 @@ public:
uint32 field_length; // Length of field
uint16 flags;
uchar null_bit; // Bit used to test null bit
- uint abs_offset; // use only in group_concat
Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,uchar null_bit_arg,
utype unireg_check_arg, const char *field_name_arg,
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 307bd13885c..80721ae373c 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -134,7 +134,6 @@ static mysql_byte* innobase_get_key(INNOBASE_SHARE *share,uint *length,
my_bool not_used __attribute__((unused)));
static INNOBASE_SHARE *get_share(const char *table_name);
static void free_share(INNOBASE_SHARE *share);
-static void innobase_print_error(const char* db_errpfx, char* buffer);
/* General functions */
@@ -1292,18 +1291,6 @@ innobase_close_connection(
return(0);
}
-/**********************************************************************
-Prints an error message. */
-static
-void
-innobase_print_error(
-/*=================*/
- const char* db_errpfx, /* in: error prefix text */
- char* buffer) /* in: error text */
-{
- sql_print_error("%s: %s", db_errpfx, buffer);
-}
-
/*****************************************************************************
** InnoDB database tables
diff --git a/sql/item.cc b/sql/item.cc
index 8da4990942c..e3cf43709ba 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -771,7 +771,6 @@ String *Item_param::query_val_str(String* str)
case INT_RESULT:
case REAL_RESULT:
return val_str(str);
- break;
default:
str->set("'", 1, default_charset());
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 24d60b51eab..ae6658c8e35 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -1472,16 +1472,12 @@ cmp_item* cmp_item::get_comparator(Item *item)
switch (item->result_type()) {
case STRING_RESULT:
return new cmp_item_sort_string(item->collation.collation);
- break;
case INT_RESULT:
return new cmp_item_int;
- break;
case REAL_RESULT:
return new cmp_item_real;
- break;
case ROW_RESULT:
return new cmp_item_row;
- break;
default:
DBUG_ASSERT(0);
break;
diff --git a/sql/item_func.cc b/sql/item_func.cc
index fdc0ee9d6a0..d7e778171a0 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -1143,7 +1143,6 @@ String *Item_func_min_max::val_str(String *str)
// This case should never be choosen
DBUG_ASSERT(0);
return 0;
-
}
return 0; // Keep compiler happy
}
@@ -2442,7 +2441,6 @@ Item_func_set_user_var::check()
save_result.vint= args[0]->val_int();
break;
}
- break;
case STRING_RESULT:
{
save_result.vstr= args[0]->val_str(&value);
@@ -2494,7 +2492,6 @@ Item_func_set_user_var::update()
INT_RESULT, &my_charset_bin, DERIVATION_NONE);
break;
}
- break;
case STRING_RESULT:
{
if (!save_result.vstr) // Null value
diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc
index a1305e0b1d9..555c1a74eaf 100644
--- a/sql/item_geofunc.cc
+++ b/sql/item_geofunc.cc
@@ -66,7 +66,6 @@ String *Item_func_geometry_from_wkb::val_str(String *str)
String arg_val;
String *wkb= args[0]->val_str(&arg_val);
Geometry_buffer buffer;
- Geometry *geom;
uint32 srid= 0;
if ((arg_count == 2) && !args[1]->null_value)
@@ -78,7 +77,7 @@ String *Item_func_geometry_from_wkb::val_str(String *str)
str->q_append(srid);
if ((null_value=
(args[0]->null_value ||
- !(geom= Geometry::create_from_wkb(&buffer, wkb->ptr(), wkb->length())) ||
+ !Geometry::create_from_wkb(&buffer, wkb->ptr(), wkb->length()) ||
str->append(*wkb))))
return 0;
return str;
@@ -126,12 +125,11 @@ String *Item_func_as_wkb::val_str(String *str)
String arg_val;
String *swkb= args[0]->val_str(&arg_val);
Geometry_buffer buffer;
- Geometry *geom;
if ((null_value=
(args[0]->null_value ||
- !(geom= Geometry::create_from_wkb(&buffer, swkb->ptr() + SRID_SIZE,
- swkb->length() - SRID_SIZE)))))
+ !(Geometry::create_from_wkb(&buffer, swkb->ptr() + SRID_SIZE,
+ swkb->length() - SRID_SIZE)))))
return 0;
str->copy(swkb->ptr() + SRID_SIZE, swkb->length() - SRID_SIZE,
@@ -701,10 +699,10 @@ longlong Item_func_srid::val_int()
Geometry_buffer buffer;
Geometry *geom;
- null_value= !swkb ||
- !(geom= Geometry::create_from_wkb(&buffer,
- swkb->ptr() + SRID_SIZE,
- swkb->length() - SRID_SIZE));
+ null_value= (!swkb ||
+ !Geometry::create_from_wkb(&buffer,
+ swkb->ptr() + SRID_SIZE,
+ swkb->length() - SRID_SIZE));
if (null_value)
return 0;
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 113ef0e1922..7df2299239f 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -41,7 +41,11 @@ Item_sum::Item_sum(List<Item> &list)
list.empty(); // Fields are used
}
-// Constructor used in processing select with temporary tebles
+
+/*
+ Constructor used in processing select with temporary tebles
+*/
+
Item_sum::Item_sum(THD *thd, Item_sum *item):
Item_result_field(thd, item), quick_group(item->quick_group)
{
@@ -55,6 +59,7 @@ Item_sum::Item_sum(THD *thd, Item_sum *item):
args[i]= item->args[i];
}
+
void Item_sum::mark_as_sum_func()
{
current_thd->lex->current_select->with_sum_func= 1;
@@ -1495,10 +1500,17 @@ String *Item_sum_udf_str::val_str(String *str)
/*****************************************************************************
GROUP_CONCAT function
- Syntax:
- GROUP_CONCAT([DISTINCT] expr,... [ORDER BY col [ASC|DESC],...]
- [SEPARATOR str_const])
+
+ SQL SYNTAX:
+ GROUP_CONCAT([DISTINCT] expr,... [ORDER BY col [ASC|DESC],...]
+ [SEPARATOR str_const])
+
concat of values from "group by" operation
+
+ BUGS
+ DISTINCT and ORDER BY only works if ORDER BY uses all fields and only fields
+ in expression list
+ Blobs doesn't work with DISTINCT or ORDER BY
*****************************************************************************/
/*
@@ -1510,24 +1522,22 @@ int group_concat_key_cmp_with_distinct(void* arg, byte* key1,
byte* key2)
{
Item_func_group_concat* item= (Item_func_group_concat*)arg;
+ uint *offset= item->field_offsets+ item->field_list_offset;
+ Item **field_item, **end;
- for (uint i= 0; i < item->arg_count_field; i++)
+ for (field_item= item->args, end= field_item+item->arg_count_field;
+ field_item < end;
+ field_item++)
{
- Item *field_item= item->args[i];
- Field *field= field_item->real_item()->get_tmp_table_field();
+ Field *field= (*field_item)->real_item()->get_tmp_table_field();
if (field)
{
- uint offset= field->abs_offset;
-
- int res= field->key_cmp(key1 + offset, key2 + offset);
- /*
- if key1 and key2 is not equal than field->key_cmp return offset. This
- function must return value 1 for this case.
- */
- if (res)
- return 1;
+ int res;
+ if ((res= field->key_cmp(key1 + *offset, key2 + *offset)))
+ return res;
+ offset++;
}
- }
+ }
return 0;
}
@@ -1540,22 +1550,23 @@ int group_concat_key_cmp_with_distinct(void* arg, byte* key1,
int group_concat_key_cmp_with_order(void* arg, byte* key1, byte* key2)
{
Item_func_group_concat* item= (Item_func_group_concat*)arg;
+ uint *offset= item->field_offsets;
+ ORDER **order_item, **end;
- for (uint i=0; i < item->arg_count_order; i++)
+ for (order_item= item->order, end=order_item+ item->arg_count_order;
+ order_item < end;
+ order_item++)
{
- ORDER *order_item= item->order[i];
- Item *item= *order_item->item;
+ Item *item= *(*order_item)->item;
Field *field= item->real_item()->get_tmp_table_field();
if (field)
{
- uint offset= field->abs_offset;
-
- bool dir= order_item->asc;
- int res= field->key_cmp(key1 + offset, key2 + offset);
+ int res= field->key_cmp(key1 + *offset, key2 + *offset);
if (res)
- return dir ? res : -res;
+ return (*order_item)->asc ? res : -res;
+ offset++;
}
- }
+ }
/*
We can't return 0 because tree class remove this item as double value.
*/
@@ -1566,6 +1577,11 @@ int group_concat_key_cmp_with_order(void* arg, byte* key1, byte* key2)
/*
function of sort for syntax:
GROUP_CONCAT(DISTINCT expr,... ORDER BY col,... )
+
+ BUG:
+ This doesn't work in the case when the order by contains data that
+ is not part of the field list because tree-insert will not notice
+ the duplicated values when inserting things sorted by ORDER BY
*/
int group_concat_key_cmp_with_distinct_and_order(void* arg,byte* key1,
@@ -1578,58 +1594,53 @@ int group_concat_key_cmp_with_distinct_and_order(void* arg,byte* key1,
/*
- create result
- item is pointer to Item_func_group_concat
+ Append data from current leaf to item->result
*/
int dump_leaf_key(byte* key, uint32 count __attribute__((unused)),
- Item_func_group_concat *group_concat_item)
+ Item_func_group_concat *item)
{
char buff[MAX_FIELD_WIDTH];
String tmp((char *)&buff,sizeof(buff),default_charset_info);
String tmp2((char *)&buff,sizeof(buff),default_charset_info);
-
+ uint *field_offsets= (item->field_offsets +
+ item->field_list_offset);
tmp.length(0);
- for (uint i= 0; i < group_concat_item->arg_show_fields; i++)
+ for (uint i= 0; i < item->arg_count_field; i++)
{
- Item *show_item= group_concat_item->args[i];
+ Item *show_item= item->args[i];
if (!show_item->const_item())
{
Field *f= show_item->real_item()->get_tmp_table_field();
char *sv= f->ptr;
- f->ptr= (char *)key + f->abs_offset;
+ f->ptr= (char *) key + *(field_offsets++);
String *res= f->val_str(&tmp,&tmp2);
- group_concat_item->result.append(*res);
+ item->result.append(*res);
f->ptr= sv;
}
else
{
String *res= show_item->val_str(&tmp);
if (res)
- group_concat_item->result.append(*res);
+ item->result.append(*res);
}
}
- if (group_concat_item->tree_mode) // Last item of tree
+ if (item->tree_mode) // Last item of tree
{
- group_concat_item->show_elements++;
- if (group_concat_item->show_elements <
- group_concat_item->tree->elements_in_tree)
- group_concat_item->result.append(*group_concat_item->separator);
+ item->show_elements++;
+ if (item->show_elements < item->tree->elements_in_tree)
+ item->result.append(*item->separator);
}
else
+ item->result.append(*item->separator);
+
+ /* stop if length of result more than group_concat_max_len */
+ if (item->result.length() > item->group_concat_max_len)
{
- group_concat_item->result.append(*group_concat_item->separator);
- }
- /*
- if length of result more than group_concat_max_len - stop !
- */
- if (group_concat_item->result.length() >
- group_concat_item->group_concat_max_len)
- {
- group_concat_item->count_cut_values++;
- group_concat_item->result.length(group_concat_item->group_concat_max_len);
- group_concat_item->warning_for_row= TRUE;
+ item->count_cut_values++;
+ item->result.length(item->group_concat_max_len);
+ item->warning_for_row= TRUE;
return 1;
}
return 0;
@@ -1654,51 +1665,85 @@ Item_func_group_concat::Item_func_group_concat(bool is_distinct,
separator(is_separator), tree(&tree_base), table(0),
order(0), tables_list(0),
show_elements(0), arg_count_order(0), arg_count_field(0),
- arg_show_fields(0), count_cut_values(0)
-
+ count_cut_values(0)
{
+ Item *item_select;
+ Item **arg_ptr;
+
original= 0;
quick_group= 0;
mark_as_sum_func();
order= 0;
group_concat_max_len= current_thd->variables.group_concat_max_len;
-
- arg_show_fields= arg_count_field= is_select->elements;
+ arg_count_field= is_select->elements;
arg_count_order= is_order ? is_order->elements : 0;
- arg_count= arg_count_field;
+ arg_count= arg_count_field + arg_count_order;
/*
We need to allocate:
- args - arg_count+arg_count_order (for possible order items in temporare
- tables)
+ args - arg_count_field+arg_count_order
+ (for possible order items in temporare tables)
order - arg_count_order
+ field_offset For offset withing the key
*/
- args= (Item**) sql_alloc(sizeof(Item*)*(arg_count+arg_count_order)+
- sizeof(ORDER*)*arg_count_order);
- if (!args)
+ if (!(args= (Item**) sql_alloc((sizeof(Item*) + sizeof(uint)) * arg_count +
+ sizeof(ORDER*)*arg_count_order)))
return;
+ order= (ORDER**)(args + arg_count);
+ field_offsets= (uint*) (order+ arg_count_order);
+
/* fill args items of show and sort */
- int i= 0;
List_iterator_fast<Item> li(*is_select);
- Item *item_select;
- for ( ; (item_select= li++) ; i++)
- args[i]= item_select;
+ for (arg_ptr=args ; (item_select= li++) ; arg_ptr++)
+ *arg_ptr= item_select;
if (arg_count_order)
{
- i= 0;
- order= (ORDER**)(args + arg_count + arg_count_order);
+ ORDER **order_ptr= order;
for (ORDER *order_item= (ORDER*) is_order->first;
- order_item != NULL;
- order_item= order_item->next)
+ order_item != NULL;
+ order_item= order_item->next)
{
- order[i++]= order_item;
+ (*order_ptr++)= order_item;
+ *arg_ptr= *order_item->item;
+ order_item->item= arg_ptr++;
}
}
}
+
+
+Item_func_group_concat::Item_func_group_concat(THD *thd,
+ Item_func_group_concat *item)
+ :Item_sum(thd, item),item_thd(thd),
+ tmp_table_param(item->tmp_table_param),
+ max_elements_in_tree(item->max_elements_in_tree),
+ warning(item->warning),
+ warning_available(item->warning_available),
+ key_length(item->key_length),
+ rec_offset(item->rec_offset),
+ tree_mode(item->tree_mode),
+ distinct(item->distinct),
+ warning_for_row(item->warning_for_row),
+ separator(item->separator),
+ tree(item->tree),
+ table(item->table),
+ order(item->order),
+ field_offsets(item->field_offsets),
+ tables_list(item->tables_list),
+ group_concat_max_len(item->group_concat_max_len),
+ show_elements(item->show_elements),
+ arg_count_order(item->arg_count_order),
+ arg_count_field(item->arg_count_field),
+ field_list_offset(item->field_list_offset),
+ count_cut_values(item->count_cut_values),
+ original(item)
+{
+ quick_group= item->quick_group;
+}
+
void Item_func_group_concat::cleanup()
@@ -1737,12 +1782,11 @@ Item_func_group_concat::~Item_func_group_concat()
*/
if (!original)
{
- THD *thd= current_thd;
if (warning_available)
{
char warn_buff[MYSQL_ERRMSG_SIZE];
sprintf(warn_buff, ER(ER_CUT_VALUE_GROUP_CONCAT), count_cut_values);
- warning->set_msg(thd, warn_buff);
+ warning->set_msg(current_thd, warn_buff);
}
}
}
@@ -1773,13 +1817,15 @@ void Item_func_group_concat::clear()
bool Item_func_group_concat::add()
{
+ bool record_is_null;
+
if (always_null)
return 0;
copy_fields(tmp_table_param);
copy_funcs(tmp_table_param->items_to_copy);
- bool record_is_null= TRUE;
- for (uint i= 0; i < arg_show_fields; i++)
+ record_is_null= TRUE;
+ for (uint i= 0; i < arg_count_field; i++)
{
Item *show_item= args[i];
if (!show_item->const_item())
@@ -1803,8 +1849,7 @@ bool Item_func_group_concat::add()
else
{
if (result.length() <= group_concat_max_len && !warning_for_row)
- dump_leaf_key(table->record[0] + rec_offset, 1,
- (Item_func_group_concat*)this);
+ dump_leaf_key(table->record[0] + rec_offset, 1, this);
}
return 0;
}
@@ -1832,24 +1877,19 @@ Item_func_group_concat::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref)
thd->allow_sum_func= 0;
maybe_null= 0;
item_thd= thd;
- for (i= 0 ; i < arg_count ; i++)
- {
- if (args[i]->fix_fields(thd, tables, args + i) || args[i]->check_cols(1))
- return 1;
- maybe_null |= args[i]->maybe_null;
- }
+
/*
- Fix fields for order clause in function:
- GROUP_CONCAT(expr,... ORDER BY col,... )
+ Fix fields for select list and ORDER clause
*/
- for (i= 0 ; i < arg_count_order ; i++)
+
+ for (i= 0 ; i < arg_count ; i++)
{
- // order_item->item can be changed by fix_fields() call
- ORDER *order_item= order[i];
- if ((*order_item->item)->fix_fields(thd, tables, order_item->item) ||
- (*order_item->item)->check_cols(1))
+ if (args[i]->fix_fields(thd, tables, args + i) || args[i]->check_cols(1))
return 1;
+ if (i < arg_count_field && args[i]->maybe_null)
+ maybe_null= 0;
}
+
result_field= 0;
null_value= 1;
max_length= group_concat_max_len;
@@ -1864,23 +1904,29 @@ Item_func_group_concat::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref)
bool Item_func_group_concat::setup(THD *thd)
{
- DBUG_ENTER("Item_func_group_concat::setup");
List<Item> list;
SELECT_LEX *select_lex= thd->lex->current_select;
+ Field **field, **field_end;
+ uint offset, *offsets, const_fields;
+ qsort_cmp2 compare_key;
+ DBUG_ENTER("Item_func_group_concat::setup");
if (select_lex->linkage == GLOBAL_OPTIONS_TYPE)
DBUG_RETURN(1);
+
/*
push all not constant fields to list and create temp table
*/
+ const_fields= 0;
always_null= 0;
- for (uint i= 0; i < arg_count; i++)
+ for (uint i= 0; i < arg_count_field; i++)
{
Item *item= args[i];
if (list.push_back(item))
DBUG_RETURN(1);
if (item->const_item())
{
+ const_fields++;
(void) item->val_int();
if (item->null_value)
always_null= 1;
@@ -1900,12 +1946,19 @@ bool Item_func_group_concat::setup(THD *thd)
count_field_types(tmp_table_param,all_fields,0);
if (table)
{
+ /*
+ We come here when we are getting the result from a temporary table,
+ not the original tables used in the query
+ */
free_tmp_table(thd, table);
tmp_table_param->cleanup();
}
/*
- We have to create a temporary table for that we get descriptions of fields
+ We have to create a temporary table to get descriptions of fields
(types, sizes and so on).
+
+ Note that in the table, we first have the ORDER BY fields, then the
+ field list.
*/
if (!(table=create_tmp_table(thd, tmp_table_param, all_fields, 0,
0, 0, 0,select_lex->options | thd->options,
@@ -1914,27 +1967,27 @@ bool Item_func_group_concat::setup(THD *thd)
table->file->extra(HA_EXTRA_NO_ROWS);
table->no_rows= 1;
-
- Field** field, **field_end;
- field_end = (field = table->field) + table->fields;
- uint offset = 0;
- for (key_length = 0; field < field_end; ++field)
+ field_end= (field= table->field) + table->fields;
+ offsets= field_offsets;
+ offset= 0;
+ for (key_length= 0; field < field_end; field++)
{
uint32 length= (*field)->pack_length();
- (*field)->abs_offset= offset;
+ (*offsets++)= offset;
offset+= length;
key_length += length;
- }
- rec_offset = table->reclength - key_length;
+ }
+ /* Offset to first result field in table */
+ field_list_offset= table->fields - (list.elements - const_fields);
+ /* Offset to first field */
+ rec_offset= (uint) (table->field[0]->ptr - table->record[0]);
if (tree_mode)
delete_tree(tree);
- /*
- choise function of sort
- */
+
+ /* choose function of sort */
tree_mode= distinct || arg_count_order;
- qsort_cmp2 compare_key;
if (tree_mode)
{
if (arg_count_order)
@@ -1946,21 +1999,20 @@ bool Item_func_group_concat::setup(THD *thd)
}
else
{
+ compare_key= NULL;
if (distinct)
compare_key= (qsort_cmp2) group_concat_key_cmp_with_distinct;
- else
- compare_key= NULL;
}
/*
- Create a tree of sort. Tree is used for a sort and a remove dubl
- values (according with syntax of the function). If function does't
+ Create a tree of sort. Tree is used for a sort and a remove double
+ values (according with syntax of the function). If function doesn't
contain DISTINCT and ORDER BY clauses, we don't create this tree.
*/
init_tree(tree, min(thd->variables.max_heap_table_size,
- thd->variables.sortbuff_size/16), 0,
+ thd->variables.sortbuff_size/16), 0,
key_length, compare_key, 0, NULL, (void*) this);
- max_elements_in_tree= ((key_length) ?
- thd->variables.max_heap_table_size/key_length : 1);
+ max_elements_in_tree= (key_length ?
+ thd->variables.max_heap_table_size/key_length : 1);
};
/*
@@ -1975,6 +2027,7 @@ bool Item_func_group_concat::setup(THD *thd)
DBUG_RETURN(0);
}
+
/* This is used by rollup to create a separate usable copy of the function */
void Item_func_group_concat::make_unique()
@@ -2017,7 +2070,7 @@ void Item_func_group_concat::print(String *str)
str->append("group_concat(", 13);
if (distinct)
str->append("distinct ", 9);
- for (uint i= 0; i < arg_count; i++)
+ for (uint i= 0; i < arg_count_field; i++)
{
if (i)
str->append(',');
diff --git a/sql/item_sum.h b/sql/item_sum.h
index 1c31f1a9b70..cac080c5807 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -204,18 +204,24 @@ class Item_sum_count_distinct :public Item_sum_int
uint key_length;
CHARSET_INFO *key_charset;
- // calculated based on max_heap_table_size. If reached,
- // walk the tree and dump it into MyISAM table
+ /*
+ Calculated based on max_heap_table_size. If reached,
+ walk the tree and dump it into MyISAM table
+ */
uint max_elements_in_tree;
- // the first few bytes of record ( at least one)
- // are just markers for deleted and NULLs. We want to skip them since
- // they will just bloat the tree without providing any valuable info
+ /*
+ The first few bytes of record ( at least one)
+ are just markers for deleted and NULLs. We want to skip them since
+ they will just bloat the tree without providing any valuable info
+ */
int rec_offset;
- // If there are no blobs, we can use a tree, which
- // is faster than heap table. In that case, we still use the table
- // to help get things set up, but we insert nothing in it
+ /*
+ If there are no blobs, we can use a tree, which
+ is faster than heap table. In that case, we still use the table
+ to help get things set up, but we insert nothing in it
+ */
bool use_tree;
bool always_null; // Set to 1 if the result is always NULL
@@ -236,7 +242,8 @@ class Item_sum_count_distinct :public Item_sum_int
Item_sum_count_distinct(THD *thd, Item_sum_count_distinct *item)
:Item_sum_int(thd, item), table(item->table),
used_table_cache(item->used_table_cache),
- field_lengths(item->field_lengths), tmp_table_param(item->tmp_table_param),
+ field_lengths(item->field_lengths),
+ tmp_table_param(item->tmp_table_param),
tree(item->tree), original(item), key_length(item->key_length),
max_elements_in_tree(item->max_elements_in_tree),
rec_offset(item->rec_offset), use_tree(item->use_tree),
@@ -318,18 +325,17 @@ public:
void fix_length_and_dec() {}
};
-/*
-
-variance(a) =
-
-= sum (ai - avg(a))^2 / count(a) )
-= sum (ai^2 - 2*ai*avg(a) + avg(a)^2) / count(a)
-= (sum(ai^2) - sum(2*ai*avg(a)) + sum(avg(a)^2))/count(a) =
-= (sum(ai^2) - 2*avg(a)*sum(a) + count(a)*avg(a)^2)/count(a) =
-= (sum(ai^2) - 2*sum(a)*sum(a)/count(a) + count(a)*sum(a)^2/count(a)^2 )/count(a) =
-= (sum(ai^2) - 2*sum(a)^2/count(a) + sum(a)^2/count(a) )/count(a) =
-= (sum(ai^2) - sum(a)^2/count(a))/count(a)
+/*
+ variance(a) =
+
+ = sum (ai - avg(a))^2 / count(a) )
+ = sum (ai^2 - 2*ai*avg(a) + avg(a)^2) / count(a)
+ = (sum(ai^2) - sum(2*ai*avg(a)) + sum(avg(a)^2))/count(a) =
+ = (sum(ai^2) - 2*avg(a)*sum(a) + count(a)*avg(a)^2)/count(a) =
+ = (sum(ai^2) - 2*sum(a)*sum(a)/count(a) + count(a)*sum(a)^2/count(a)^2 )/count(a) =
+ = (sum(ai^2) - 2*sum(a)^2/count(a) + sum(a)^2/count(a) )/count(a) =
+ = (sum(ai^2) - sum(a)^2/count(a))/count(a)
*/
class Item_sum_variance : public Item_sum_num
@@ -515,8 +521,9 @@ class Item_sum_xor :public Item_sum_bit
/*
-** user defined aggregates
+ User defined aggregates
*/
+
#ifdef HAVE_DLOPEN
class Item_udf_sum : public Item_sum
@@ -682,12 +689,13 @@ class Item_func_group_concat : public Item_sum
friend int group_concat_key_cmp_with_distinct(void* arg, byte* key1,
byte* key2);
- friend int group_concat_key_cmp_with_order(void* arg, byte* key1, byte* key2);
+ friend int group_concat_key_cmp_with_order(void* arg, byte* key1,
+ byte* key2);
friend int group_concat_key_cmp_with_distinct_and_order(void* arg,
byte* key1,
byte* key2);
friend int dump_leaf_key(byte* key, uint32 count __attribute__((unused)),
- Item_func_group_concat *group_concat_item);
+ Item_func_group_concat *group_concat_item);
public:
String result;
@@ -696,12 +704,13 @@ class Item_func_group_concat : public Item_sum
TREE *tree;
TABLE *table;
ORDER **order;
+ uint *field_offsets;
TABLE_LIST *tables_list;
ulong group_concat_max_len;
uint show_elements;
uint arg_count_order;
uint arg_count_field;
- uint arg_show_fields;
+ uint field_list_offset;
uint count_cut_values;
/*
Following is 0 normal object and pointer to original one for copy
@@ -712,38 +721,12 @@ class Item_func_group_concat : public Item_sum
Item_func_group_concat(bool is_distinct,List<Item> *is_select,
SQL_LIST *is_order,String *is_separator);
- Item_func_group_concat(THD *thd, Item_func_group_concat *item)
- :Item_sum(thd, item),item_thd(thd),
- tmp_table_param(item->tmp_table_param),
- max_elements_in_tree(item->max_elements_in_tree),
- warning(item->warning),
- warning_available(item->warning_available),
- key_length(item->key_length),
- rec_offset(item->rec_offset),
- tree_mode(item->tree_mode),
- distinct(item->distinct),
- warning_for_row(item->warning_for_row),
- separator(item->separator),
- tree(item->tree),
- table(item->table),
- order(item->order),
- tables_list(item->tables_list),
- group_concat_max_len(item->group_concat_max_len),
- show_elements(item->show_elements),
- arg_count_order(item->arg_count_order),
- arg_count_field(item->arg_count_field),
- arg_show_fields(item->arg_show_fields),
- count_cut_values(item->count_cut_values),
- original(item)
- {
- quick_group= item->quick_group;
- };
+ Item_func_group_concat(THD *thd, Item_func_group_concat *item);
~Item_func_group_concat();
void cleanup();
enum Sumfunctype sum_func () const {return GROUP_CONCAT_FUNC;}
const char *func_name() const { return "group_concat"; }
- enum Type type() const { return SUM_FUNC_ITEM; }
virtual Item_result result_type () const { return STRING_RESULT; }
void clear();
bool add();
diff --git a/sql/log.cc b/sql/log.cc
index 6b091484a82..0cd9e7172c3 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -507,7 +507,6 @@ int MYSQL_LOG::find_log_pos(LOG_INFO *linfo, const char *log_name,
RETURN VALUES
0 ok
LOG_INFO_EOF End of log-index-file found
- LOG_INFO_SEEK Could not allocate IO cache
LOG_INFO_IO Got IO error while reading file
*/
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 742cf6a1a82..84bbc496c9d 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -83,7 +83,7 @@ inline int ignored_error_code(int err_code)
pretty_print_str()
*/
-#ifndef MYSQL_CLIENT
+#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
static char *pretty_print_str(char *packet, char *str, int len)
{
char *end= str + len;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 0f75c7d14ef..62ec6bc1027 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -2364,9 +2364,8 @@ Now disabling --log-slave-updates.");
{
if (global_system_variables.log_warnings)
sql_print_error("Warning: Failed to lock memory. Errno: %d\n",errno);
+ locked_in_memory= 0;
}
- else
- locked_in_memory=1;
}
#else
locked_in_memory=0;
diff --git a/sql/protocol.cc b/sql/protocol.cc
index 40adc9e8961..61af6ffceaf 100644
--- a/sql/protocol.cc
+++ b/sql/protocol.cc
@@ -166,10 +166,10 @@ net_printf(THD *thd, uint errcode, ...)
const char *format;
#ifndef EMBEDDED_LIBRARY
const char *text_pos;
+ int head_length= NET_HEADER_SIZE;
#else
char text_pos[1024];
#endif
- int head_length= NET_HEADER_SIZE;
NET *net= &thd->net;
DBUG_ENTER("net_printf");
diff --git a/sql/set_var.cc b/sql/set_var.cc
index e3ed2a4cbd8..5c2cbecd52b 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -839,7 +839,8 @@ bool update_sys_var_str(sys_var_str *var_str, rw_lock_t *var_mutex,
{
char *res= 0, *old_value=(char *)(var ? var->value->str_value.ptr() : 0);
uint new_length= (var ? var->value->str_value.length() : 0);
- if (!old_value) old_value="";
+ if (!old_value)
+ old_value= (char*) "";
if (!(res= my_strdup_with_length(old_value, new_length, MYF(0))))
return 1;
/*
diff --git a/sql/slave.cc b/sql/slave.cc
index be9a5795233..fedf9e3dd69 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -285,8 +285,9 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,
In this case, we will use the same IO_CACHE pointer to
read data as the IO thread is using to write data.
*/
- if (my_b_tell((rli->cur_log=rli->relay_log.get_log_file())) == 0 &&
- check_binlog_magic(rli->cur_log,errmsg))
+ rli->cur_log= rli->relay_log.get_log_file();
+ if (my_b_tell(rli->cur_log) == 0 &&
+ check_binlog_magic(rli->cur_log, errmsg))
goto err;
rli->cur_log_old_open_count=rli->relay_log.get_open_count();
}
@@ -1672,7 +1673,18 @@ int init_master_info(MASTER_INFO* mi, const char* master_info_fname,
DBUG_ENTER("init_master_info");
if (mi->inited)
+ {
+ /*
+ We have to reset read position of relay-log-bin as we may have
+ already been reading from 'hotlog' when the slave was stopped
+ last time. If this case pos_in_file would be set and we would
+ get a crash when trying to read the signature for the binary
+ relay log.
+ */
+ my_b_seek(mi->rli.cur_log, (my_off_t) 0);
DBUG_RETURN(0);
+ }
+
mi->mysql=0;
mi->file_id=1;
fn_format(fname, master_info_fname, mysql_data_home, "", 4+32);
@@ -3616,13 +3628,16 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len)
mi->master_log_pos+= inc_pos;
DBUG_PRINT("info", ("master_log_pos: %d, event originating from the same server, ignored", (ulong) mi->master_log_pos));
}
- else /* write the event to the relay log */
+ else
+ {
+ /* write the event to the relay log */
if (likely(!(error= rli->relay_log.appendv(buf,event_len,0))))
{
mi->master_log_pos+= inc_pos;
DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos));
rli->relay_log.harvest_bytes_written(&rli->log_space_total);
}
+ }
err:
pthread_mutex_unlock(&mi->data_lock);
@@ -4090,8 +4105,9 @@ Before assert, my_b_tell(cur_log)=%s rli->event_relay_log_pos=%s",
if (rli->relay_log.is_active(rli->linfo.log_file_name))
{
#ifdef EXTRA_DEBUG
- sql_print_error("next log '%s' is currently active",
- rli->linfo.log_file_name);
+ if (global_system_variables.log_warnings)
+ sql_print_error("next log '%s' is currently active",
+ rli->linfo.log_file_name);
#endif
rli->cur_log= cur_log= rli->relay_log.get_log_file();
rli->cur_log_old_open_count= rli->relay_log.get_open_count();
@@ -4119,8 +4135,9 @@ Before assert, my_b_tell(cur_log)=%s rli->event_relay_log_pos=%s",
from hot to cold, but not from cold to hot). No need for LOCK_log.
*/
#ifdef EXTRA_DEBUG
- sql_print_error("next log '%s' is not active",
- rli->linfo.log_file_name);
+ if (global_system_variables.log_warnings)
+ sql_print_error("next log '%s' is not active",
+ rli->linfo.log_file_name);
#endif
// open_binlog() will check the magic header
if ((rli->cur_log_fd=open_binlog(cur_log,rli->linfo.log_file_name,
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 39061a6501b..e1a15eff475 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -1860,11 +1860,11 @@ my_bool Query_cache::write_result_data(Query_cache_block **result_block,
{
// It is success (nobody can prevent us write data)
STRUCT_UNLOCK(&structure_guard_mutex);
- byte *rest = (byte*) data;
- Query_cache_block *block = *result_block;
uint headers_len = (ALIGN_SIZE(sizeof(Query_cache_block)) +
ALIGN_SIZE(sizeof(Query_cache_result)));
#ifndef EMBEDDED_LIBRARY
+ Query_cache_block *block= *result_block;
+ byte *rest= (byte*) data;
// Now fill list of blocks that created by allocate_data_chain
do
{
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index ea47ca9f71d..81269a8cbcf 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -115,7 +115,6 @@ static int mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *unit,
TABLE *table;
int res;
select_union *derived_result;
- TABLE_LIST *tables= (TABLE_LIST *)first_select->table_list.first;
bool is_union= first_select->next_select() &&
first_select->next_select()->linkage == UNION_TYPE;
bool is_subsel= first_select->first_inner_unit() ? 1: 0;
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index f2764a6f1c0..d5a45dce0b7 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -21,12 +21,14 @@
#include "sql_acl.h"
static int check_null_fields(THD *thd,TABLE *entry);
+#ifndef EMBEDDED_LIBRARY
static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list);
static int write_delayed(THD *thd,TABLE *table, enum_duplicates dup,
char *query, uint query_length, int log_on);
static void end_delayed_insert(THD *thd);
extern "C" pthread_handler_decl(handle_delayed_insert,arg);
static void unlink_blobs(register TABLE *table);
+#endif
/* Define to force use of my_malloc() if the allocated memory block is big */
@@ -131,7 +133,9 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list,
TABLE *table;
List_iterator_fast<List_item> its(values_list);
List_item *values;
- char *query=thd->query;
+#ifndef EMBEDDED_LIBRARY
+ char *query= thd->query;
+#endif
thr_lock_type lock_type = table_list->lock_type;
TABLE_LIST *insert_table_list= (TABLE_LIST*)
thd->lex->select_lex.table_list.first;
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index df7d487194a..4c63a8b7c7f 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -1389,7 +1389,7 @@ create_total_list_n_last_return(THD *thd_arg,
}
}
}
-end:
+
if (slave_list_first)
{
*new_table_list= slave_list_first;
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index c86c7d4a81d..5538fb0e832 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -396,6 +396,7 @@ public:
SQL_LIST order_list; /* ORDER clause */
List<List_item> expr_list;
List<List_item> when_list; /* WHEN clause (expression) */
+ SQL_LIST *gorder_list;
ha_rows select_limit, offset_limit; /* LIMIT clause parameters */
// Arrays of pointers to top elements of all_fields list
Item **ref_pointer_array;
@@ -538,7 +539,6 @@ typedef struct st_lex
gptr yacc_yyss,yacc_yyvs;
THD *thd;
CHARSET_INFO *charset;
- SQL_LIST *gorder_list;
List<key_part_spec> col_list;
List<key_part_spec> ref_list;
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index aa3e40e1df8..4d9930535a7 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -48,7 +48,9 @@
extern "C" int gethostname(char *name, int namelen);
#endif
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
static int check_for_max_user_connections(THD *thd, USER_CONN *uc);
+#endif
static void decrease_user_connections(USER_CONN *uc);
static bool check_db_used(THD *thd,TABLE_LIST *tables);
static bool check_merge_table_access(THD *thd, char *db, TABLE_LIST *tables);
@@ -426,6 +428,8 @@ void init_max_user_conn(void)
1 error
*/
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
+
static int check_for_max_user_connections(THD *thd, USER_CONN *uc)
{
int error=0;
@@ -456,7 +460,7 @@ static int check_for_max_user_connections(THD *thd, USER_CONN *uc)
(void) pthread_mutex_unlock(&LOCK_user_conn);
DBUG_RETURN(error);
}
-
+#endif /* NO_EMBEDDED_ACCESS_CHECKS */
/*
Decrease user connection count
@@ -545,15 +549,15 @@ bool is_update_query(enum enum_sql_command command)
static bool check_mqh(THD *thd, uint check_command)
{
+#ifdef NO_EMBEDDED_ACCESS_CHECKS
+ return(0);
+#else
bool error=0;
time_t check_time = thd->start_time ? thd->start_time : time(NULL);
USER_CONN *uc=thd->user_connect;
DBUG_ENTER("check_mqh");
DBUG_ASSERT(uc != 0);
-#ifdef NO_EMBEDDED_ACCESS_CHECKS
- DBUG_RETURN(0);
-#else
/* If more than a hour since last check, reset resource checking */
if (check_time - uc->intime >= 3600)
{
@@ -3518,7 +3522,10 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
DBUG_ENTER("check_access");
DBUG_PRINT("enter",("want_access: %lu master_access: %lu", want_access,
thd->master_access));
- ulong db_access,dummy;
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
+ ulong db_access;
+#endif
+ ulong dummy;
if (save_priv)
*save_priv=0;
else
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 94fef4cafdc..66ea32c9af4 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1121,9 +1121,10 @@ static void reset_stmt_for_execute(Prepared_statement *stmt)
void mysql_stmt_execute(THD *thd, char *packet, uint packet_length)
{
ulong stmt_id= uint4korr(packet);
+#ifndef EMBEDDED_LIBRARY
uchar *packet_end= (uchar *) packet + packet_length - 1;
+#endif
Prepared_statement *stmt;
-
DBUG_ENTER("mysql_stmt_execute");
packet+= 9; /* stmt_id + 5 bytes of flags */
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 63fe8d77f1a..45857a89ca9 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -3608,7 +3608,6 @@ static void
make_join_readinfo(JOIN *join, uint options)
{
uint i;
- SELECT_LEX *select_lex= &join->thd->lex->select_lex;
bool statistics= test(!(join->select_options & SELECT_DESCRIBE));
DBUG_ENTER("make_join_readinfo");
@@ -3878,9 +3877,7 @@ JOIN::join_free(bool full)
else
{
for (tab= join_tab, end= tab+tables; tab != end; tab++)
- {
tab->cleanup();
- }
table= 0;
}
}
@@ -4787,7 +4784,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
else
return new Field_double(item_sum->max_length,maybe_null,
item->name, table, item_sum->decimals);
- case Item_sum::VARIANCE_FUNC: /* Place for sum & count */
+ case Item_sum::VARIANCE_FUNC: /* Place for sum & count */
case Item_sum::STD_FUNC:
if (group)
return new Field_string(sizeof(double)*2+sizeof(longlong),
@@ -4815,17 +4812,19 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
default:
// This case should never be choosen
DBUG_ASSERT(0);
+ thd->fatal_error();
return 0;
}
}
- thd->fatal_error();
- return 0; // Error
+ /* We never come here */
}
case Item::FIELD_ITEM:
case Item::DEFAULT_VALUE_ITEM:
- return create_tmp_field_from_field(thd, (*from_field=
- ((Item_field*) item)->field),
+ {
+ Item_field *field= (Item_field*) item;
+ return create_tmp_field_from_field(thd, (*from_field= field->field),
item, table, modify_item);
+ }
case Item::FUNC_ITEM:
case Item::COND_ITEM:
case Item::FIELD_AVG_ITEM:
@@ -8543,6 +8542,23 @@ bool JOIN::alloc_func_list()
}
+/*
+ Initialize 'sum_funcs' array with all Item_sum objects
+
+ SYNOPSIS
+ make_sum_func_list()
+ field_list All items
+ send_fields Items in select list
+ before_group_by Set to 1 if this is called before GROUP BY handling
+
+ NOTES
+ Calls ::setup() for all item_sum objects in field_list
+
+ RETURN
+ 0 ok
+ 1 error
+*/
+
bool JOIN::make_sum_func_list(List<Item> &field_list, List<Item> &send_fields,
bool before_group_by)
{
@@ -8579,7 +8595,7 @@ bool JOIN::make_sum_func_list(List<Item> &field_list, List<Item> &send_fields,
/*
- Change all funcs and sum_funcs to fields in tmp table, and create
+ Change all funcs and sum_funcs to fields in tmp table, and create
new list of all items.
change_to_use_tmp_fields()
@@ -9079,7 +9095,6 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
List<Item> field_list;
List<Item> item_list;
THD *thd=join->thd;
- SELECT_LEX *select_lex= &join->thd->lex->select_lex;
select_result *result=join->result;
Item *item_null= new Item_null();
CHARSET_INFO *cs= &my_charset_latin1;
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index f93004976f2..506a5deda91 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -31,9 +31,11 @@ static const char *grant_names[]={
"select","insert","update","delete","create","drop","reload","shutdown",
"process","file","grant","references","index","alter"};
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
static TYPELIB grant_types = { sizeof(grant_names)/sizeof(char **),
"grant_types",
grant_names};
+#endif
static int mysql_find_files(THD *thd,List<char> *files, const char *db,
const char *path, const char *wild, bool dir);
@@ -367,7 +369,9 @@ mysql_find_files(THD *thd,List<char> *files, const char *db,const char *path,
char *ext;
MY_DIR *dirp;
FILEINFO *file;
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
uint col_access=thd->col_access;
+#endif
TABLE_LIST table_list;
DBUG_ENTER("mysql_find_files");
@@ -829,7 +833,9 @@ int mysqld_show_create_db(THD *thd, char *dbname,
char path[FN_REFLEN];
char buff[2048];
String buffer(buff, sizeof(buff), system_charset_info);
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
uint db_access;
+#endif
bool found_libchar;
HA_CREATE_INFO create;
uint create_options = create_info ? create_info->options : 0;
@@ -1138,7 +1144,6 @@ append_identifier(THD *thd, String *packet, const char *name, uint length)
{
const char *name_end;
char quote_char;
- uint part_len;
if (thd->variables.sql_mode & MODE_ANSI_QUOTES)
quote_char= '\"';
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 12216125034..e02334b6ef7 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -3066,7 +3066,7 @@ sum_expr:
| GROUP_CONCAT_SYM '(' opt_distinct expr_list opt_gorder_clause
opt_gconcat_separator ')'
{
- $$=new Item_func_group_concat($3,$4,Lex->gorder_list,$6);
+ $$=new Item_func_group_concat($3,$4,Select->gorder_list,$6);
$4->empty();
};
@@ -3082,16 +3082,15 @@ opt_gconcat_separator:
opt_gorder_clause:
/* empty */
{
- LEX *lex=Lex;
- lex->gorder_list = NULL;
+ Select->gorder_list = NULL;
}
| order_clause
{
- LEX *lex=Lex;
- lex->gorder_list=
- (SQL_LIST*) sql_memdup((char*) &lex->current_select->order_list,
+ SELECT_LEX *select= Select;
+ select->gorder_list=
+ (SQL_LIST*) sql_memdup((char*) &select->order_list,
sizeof(st_sql_list));
- lex->current_select->order_list.empty();
+ select->order_list.empty();
};