diff options
author | unknown <bell@sanja.is.com.ua> | 2004-04-07 15:23:05 +0300 |
---|---|---|
committer | unknown <bell@sanja.is.com.ua> | 2004-04-07 15:23:05 +0300 |
commit | 0948f9769b3e3da2e064e65015b86644af4acd78 (patch) | |
tree | c7d0df7f0334621904d6713b3ea5f4ba1c9cefbe /sql | |
parent | 08594c4795d9e673f8f514142489ce01954deba5 (diff) | |
parent | 91fb27a3ca13beffcfec0fef93f12c580bad260f (diff) | |
download | mariadb-git-0948f9769b3e3da2e064e65015b86644af4acd78.tar.gz |
Merge sanja.is.com.ua:/home/bell/mysql/bk/mysql-4.1
into sanja.is.com.ua:/home/bell/mysql/bk/work-ps3-4.1
mysql-test/r/union.result:
Auto merged
sql/item.cc:
Auto merged
sql/item_sum.cc:
Auto merged
sql/item_sum.h:
Auto merged
sql/log_event.cc:
Auto merged
sql/slave.cc:
Auto merged
sql/sql_base.cc:
Auto merged
sql/sql_cache.cc:
Auto merged
sql/sql_insert.cc:
Auto merged
sql/sql_prepare.cc:
Auto merged
sql/sql_select.cc:
Auto merged
sql/sql_parse.cc:
Auto merged
Diffstat (limited to 'sql')
-rw-r--r-- | sql/derror.cc | 4 | ||||
-rw-r--r-- | sql/field.cc | 34 | ||||
-rw-r--r-- | sql/field.h | 1 | ||||
-rw-r--r-- | sql/filesort.cc | 15 | ||||
-rw-r--r-- | sql/ha_innodb.cc | 13 | ||||
-rw-r--r-- | sql/ha_myisam.cc | 7 | ||||
-rw-r--r-- | sql/init.cc | 2 | ||||
-rw-r--r-- | sql/item.cc | 1 | ||||
-rw-r--r-- | sql/item_cmpfunc.cc | 4 | ||||
-rw-r--r-- | sql/item_func.cc | 3 | ||||
-rw-r--r-- | sql/item_geofunc.cc | 16 | ||||
-rw-r--r-- | sql/item_sum.cc | 329 | ||||
-rw-r--r-- | sql/item_sum.h | 82 | ||||
-rw-r--r-- | sql/lex.h | 2 | ||||
-rw-r--r-- | sql/log.cc | 1 | ||||
-rw-r--r-- | sql/log_event.cc | 2 | ||||
-rw-r--r-- | sql/mysqld.cc | 13 | ||||
-rw-r--r-- | sql/protocol.cc | 7 | ||||
-rw-r--r-- | sql/set_var.cc | 3 | ||||
-rw-r--r-- | sql/slave.cc | 31 | ||||
-rw-r--r-- | sql/sql_base.cc | 2 | ||||
-rw-r--r-- | sql/sql_cache.cc | 4 | ||||
-rw-r--r-- | sql/sql_derived.cc | 1 | ||||
-rw-r--r-- | sql/sql_insert.cc | 6 | ||||
-rw-r--r-- | sql/sql_lex.cc | 2 | ||||
-rw-r--r-- | sql/sql_lex.h | 2 | ||||
-rw-r--r-- | sql/sql_parse.cc | 26 | ||||
-rw-r--r-- | sql/sql_prepare.cc | 5 | ||||
-rw-r--r-- | sql/sql_select.cc | 38 | ||||
-rw-r--r-- | sql/sql_show.cc | 9 | ||||
-rw-r--r-- | sql/sql_table.cc | 13 | ||||
-rw-r--r-- | sql/sql_yacc.yy | 69 | ||||
-rw-r--r-- | sql/table.cc | 3 |
33 files changed, 430 insertions, 320 deletions
diff --git a/sql/derror.cc b/sql/derror.cc index 53d0dc5b7e5..09f43d20044 100644 --- a/sql/derror.cc +++ b/sql/derror.cc @@ -50,7 +50,7 @@ static bool read_texts(const char *file_name,const char ***point, char name[FN_REFLEN]; const char *buff; uchar head[32],*pos; - CHARSET_INFO *cset; + CHARSET_INFO *cset; // For future DBUG_ENTER("read_texts"); *point=0; // If something goes wrong @@ -137,7 +137,7 @@ err1: if (file != FERR) VOID(my_close(file,MYF(MY_WME))); unireg_abort(1); - DBUG_RETURN(1); // Impossible + DBUG_RETURN(1); // keep compiler happy } /* read_texts */ diff --git a/sql/field.cc b/sql/field.cc index 238d5e36147..d099da2d959 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -161,13 +161,6 @@ static bool test_if_real(const char *str,int length, CHARSET_INFO *cs) } #endif -static inline uint field_length_without_space(const char *ptr, uint length) -{ - const char *end= ptr+length; - while (end > ptr && end[-1] == ' ') - end--; - return (uint) (end-ptr); -} /* Tables of filed type compatibility. @@ -306,7 +299,7 @@ Field::Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg, field_name(field_name_arg), query_id(0), key_start(0), part_of_key(0), part_of_sortkey(0), unireg_check(unireg_check_arg), - field_length(length_arg),null_bit(null_bit_arg),abs_offset(0) + field_length(length_arg),null_bit(null_bit_arg) { flags=null_ptr ? 0: NOT_NULL_FLAG; comment.str= (char*) ""; @@ -2789,14 +2782,8 @@ int Field_double::cmp(const char *a_ptr, const char *b_ptr) else #endif { -/* could this ALWAYS be 2 calls to doubleget() ?? */ -#if defined(__FLOAT_WORD_ORDER) && (__FLOAT_WORD_ORDER == __BIG_ENDIAN) doubleget(a, a_ptr); doubleget(b, b_ptr); -#else - memcpy_fixed(&a,a_ptr,sizeof(double)); - memcpy_fixed(&b,b_ptr,sizeof(double)); -#endif } return (a < b) ? -1 : (a > b) ? 1 : 0; } @@ -2816,12 +2803,7 @@ void Field_double::sort_string(char *to,uint length __attribute__((unused))) } else #endif -/* could this ALWAYS be 2 calls to doubleget() ?? */ -#if defined(__FLOAT_WORD_ORDER) && (__FLOAT_WORD_ORDER == __BIG_ENDIAN) doubleget(nr,ptr); -#else - memcpy_fixed(&nr,ptr,sizeof(nr)); -#endif change_double_for_sort(nr, (byte*) to); } @@ -5597,7 +5579,7 @@ uint32 calc_pack_length(enum_field_types type,uint32 length) case FIELD_TYPE_ENUM: abort(); return 0; // This shouldn't happen default: return 0; } - return 0; // This shouldn't happen + return 0; // Keep compiler happy } @@ -5630,6 +5612,18 @@ Field *make_field(char *ptr, uint32 field_length, null_pos=0; null_bit=0; } + + switch (field_type) + { + case FIELD_TYPE_DATE: + case FIELD_TYPE_NEWDATE: + case FIELD_TYPE_TIME: + case FIELD_TYPE_DATETIME: + case FIELD_TYPE_TIMESTAMP: + field_charset= &my_charset_bin; + default: break; + } + if (f_is_alpha(pack_flag)) { if (!f_is_packed(pack_flag)) diff --git a/sql/field.h b/sql/field.h index 258c18257f0..75bb96f2f6d 100644 --- a/sql/field.h +++ b/sql/field.h @@ -87,7 +87,6 @@ public: uint32 field_length; // Length of field uint16 flags; uchar null_bit; // Bit used to test null bit - uint abs_offset; // use only in group_concat Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,uchar null_bit_arg, utype unireg_check_arg, const char *field_name_arg, diff --git a/sql/filesort.cc b/sql/filesort.cc index a15ffb43f6f..8699a517661 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -663,9 +663,22 @@ static void make_sortkey(register SORTPARAM *param, for ( ; (field= addonf->field) ; addonf++) { if (addonf->null_bit && field->is_null()) + { nulls[addonf->null_offset]|= addonf->null_bit; +#ifdef HAVE_purify + bzero(to, addonf->length); +#endif + } else - field->pack((char *) to, field->ptr); + { + uchar *end= (uchar*) field->pack((char *) to, field->ptr); +#ifdef HAVE_purify + uint length= (uint) ((to + addonf->length) - end); + DBUG_ASSERT((int) length >= 0); + if (length) + bzero(end, length); +#endif + } to+= addonf->length; } } diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index 307bd13885c..80721ae373c 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -134,7 +134,6 @@ static mysql_byte* innobase_get_key(INNOBASE_SHARE *share,uint *length, my_bool not_used __attribute__((unused))); static INNOBASE_SHARE *get_share(const char *table_name); static void free_share(INNOBASE_SHARE *share); -static void innobase_print_error(const char* db_errpfx, char* buffer); /* General functions */ @@ -1292,18 +1291,6 @@ innobase_close_connection( return(0); } -/********************************************************************** -Prints an error message. */ -static -void -innobase_print_error( -/*=================*/ - const char* db_errpfx, /* in: error prefix text */ - char* buffer) /* in: error text */ -{ - sql_print_error("%s: %s", db_errpfx, buffer); -} - /***************************************************************************** ** InnoDB database tables diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index 6ac3c52fe40..427edde2f4b 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -841,14 +841,11 @@ void ha_myisam::deactivate_non_unique_index(ha_rows rows) if (file->state->records == 0 && (!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES)) mi_disable_non_unique_index(file,rows); - else - if (!file->bulk_insert && - (!rows || rows >= MI_MIN_ROWS_TO_USE_BULK_INSERT)) - { + else if (!file->bulk_insert && + (!rows || rows >= MI_MIN_ROWS_TO_USE_BULK_INSERT)) mi_init_bulk_insert(file, current_thd->variables.bulk_insert_buff_size, rows); - } } } enable_activate_all_index=1; diff --git a/sql/init.cc b/sql/init.cc index 084db57f8aa..4beb8db0c6f 100644 --- a/sql/init.cc +++ b/sql/init.cc @@ -46,7 +46,5 @@ void unireg_init(ulong options) log_10[i]= nr ; nr*= 10.0; } specialflag|=options; /* Set options from argv */ - - thread_stack_min=thread_stack - STACK_MIN_SIZE; DBUG_VOID_RETURN; } diff --git a/sql/item.cc b/sql/item.cc index 1efed78799a..a0ca28ebd99 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -771,7 +771,6 @@ String *Item_param::query_val_str(String* str) case INT_RESULT: case REAL_RESULT: return val_str(str); - break; default: str->set("'", 1, default_charset()); diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 24d60b51eab..ae6658c8e35 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -1472,16 +1472,12 @@ cmp_item* cmp_item::get_comparator(Item *item) switch (item->result_type()) { case STRING_RESULT: return new cmp_item_sort_string(item->collation.collation); - break; case INT_RESULT: return new cmp_item_int; - break; case REAL_RESULT: return new cmp_item_real; - break; case ROW_RESULT: return new cmp_item_row; - break; default: DBUG_ASSERT(0); break; diff --git a/sql/item_func.cc b/sql/item_func.cc index fdc0ee9d6a0..d7e778171a0 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -1143,7 +1143,6 @@ String *Item_func_min_max::val_str(String *str) // This case should never be choosen DBUG_ASSERT(0); return 0; - } return 0; // Keep compiler happy } @@ -2442,7 +2441,6 @@ Item_func_set_user_var::check() save_result.vint= args[0]->val_int(); break; } - break; case STRING_RESULT: { save_result.vstr= args[0]->val_str(&value); @@ -2494,7 +2492,6 @@ Item_func_set_user_var::update() INT_RESULT, &my_charset_bin, DERIVATION_NONE); break; } - break; case STRING_RESULT: { if (!save_result.vstr) // Null value diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index a1305e0b1d9..555c1a74eaf 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -66,7 +66,6 @@ String *Item_func_geometry_from_wkb::val_str(String *str) String arg_val; String *wkb= args[0]->val_str(&arg_val); Geometry_buffer buffer; - Geometry *geom; uint32 srid= 0; if ((arg_count == 2) && !args[1]->null_value) @@ -78,7 +77,7 @@ String *Item_func_geometry_from_wkb::val_str(String *str) str->q_append(srid); if ((null_value= (args[0]->null_value || - !(geom= Geometry::create_from_wkb(&buffer, wkb->ptr(), wkb->length())) || + !Geometry::create_from_wkb(&buffer, wkb->ptr(), wkb->length()) || str->append(*wkb)))) return 0; return str; @@ -126,12 +125,11 @@ String *Item_func_as_wkb::val_str(String *str) String arg_val; String *swkb= args[0]->val_str(&arg_val); Geometry_buffer buffer; - Geometry *geom; if ((null_value= (args[0]->null_value || - !(geom= Geometry::create_from_wkb(&buffer, swkb->ptr() + SRID_SIZE, - swkb->length() - SRID_SIZE))))) + !(Geometry::create_from_wkb(&buffer, swkb->ptr() + SRID_SIZE, + swkb->length() - SRID_SIZE))))) return 0; str->copy(swkb->ptr() + SRID_SIZE, swkb->length() - SRID_SIZE, @@ -701,10 +699,10 @@ longlong Item_func_srid::val_int() Geometry_buffer buffer; Geometry *geom; - null_value= !swkb || - !(geom= Geometry::create_from_wkb(&buffer, - swkb->ptr() + SRID_SIZE, - swkb->length() - SRID_SIZE)); + null_value= (!swkb || + !Geometry::create_from_wkb(&buffer, + swkb->ptr() + SRID_SIZE, + swkb->length() - SRID_SIZE)); if (null_value) return 0; diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 26b859c8aba..3213ff94ab5 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -41,7 +41,11 @@ Item_sum::Item_sum(List<Item> &list) list.empty(); // Fields are used } -// Constructor used in processing select with temporary tebles + +/* + Constructor used in processing select with temporary tebles +*/ + Item_sum::Item_sum(THD *thd, Item_sum *item): Item_result_field(thd, item), arg_count(item->arg_count), quick_group(item->quick_group) @@ -1538,10 +1542,17 @@ String *Item_sum_udf_str::val_str(String *str) /***************************************************************************** GROUP_CONCAT function - Syntax: - GROUP_CONCAT([DISTINCT] expr,... [ORDER BY col [ASC|DESC],...] - [SEPARATOR str_const]) + + SQL SYNTAX: + GROUP_CONCAT([DISTINCT] expr,... [ORDER BY col [ASC|DESC],...] + [SEPARATOR str_const]) + concat of values from "group by" operation + + BUGS + DISTINCT and ORDER BY only works if ORDER BY uses all fields and only fields + in expression list + Blobs doesn't work with DISTINCT or ORDER BY *****************************************************************************/ /* @@ -1552,25 +1563,28 @@ String *Item_sum_udf_str::val_str(String *str) int group_concat_key_cmp_with_distinct(void* arg, byte* key1, byte* key2) { - Item_func_group_concat* item= (Item_func_group_concat*)arg; + Item_func_group_concat* grp_item= (Item_func_group_concat*)arg; + Item **field_item, **end; + char *record= (char*) grp_item->table->record[0]; - for (uint i= 0; i < item->arg_count_field; i++) + for (field_item= grp_item->args, end= field_item + grp_item->arg_count_field; + field_item < end; + field_item++) { - Item *field_item= item->args[i]; - Field *field= field_item->real_item()->get_tmp_table_field(); + /* + We have to use get_tmp_table_field() instead of + real_item()->get_tmp_table_field() because we want the field in + the temporary table, not the original field + */ + Field *field= (*field_item)->get_tmp_table_field(); if (field) { - uint offset= field->abs_offset; - - int res= field->key_cmp(key1 + offset, key2 + offset); - /* - if key1 and key2 is not equal than field->key_cmp return offset. This - function must return value 1 for this case. - */ - if (res) - return 1; + int res; + uint offset= (uint) (field->ptr - record); + if ((res= field->key_cmp(key1 + offset, key2 + offset))) + return res; } - } + } return 0; } @@ -1582,26 +1596,34 @@ int group_concat_key_cmp_with_distinct(void* arg, byte* key1, int group_concat_key_cmp_with_order(void* arg, byte* key1, byte* key2) { - Item_func_group_concat* item= (Item_func_group_concat*)arg; + Item_func_group_concat* grp_item= (Item_func_group_concat*) arg; + ORDER **order_item, **end; + char *record= (char*) grp_item->table->record[0]; - for (uint i=0; i < item->arg_count_order; i++) + for (order_item= grp_item->order, end=order_item+ grp_item->arg_count_order; + order_item < end; + order_item++) { - ORDER *order_item= item->order[i]; - Item *item= *order_item->item; - Field *field= item->real_item()->get_tmp_table_field(); + Item *item= *(*order_item)->item; + /* + We have to use get_tmp_table_field() instead of + real_item()->get_tmp_table_field() because we want the field in + the temporary table, not the original field + */ + Field *field= item->get_tmp_table_field(); if (field) { - uint offset= field->abs_offset; - - bool dir= order_item->asc; - int res= field->key_cmp(key1 + offset, key2 + offset); - if (res) - return dir ? res : -res; + int res; + uint offset= (uint) (field->ptr - record); + if ((res= field->key_cmp(key1 + offset, key2 + offset))) + return (*order_item)->asc ? res : -res; } - } + } /* - We can't return 0 because tree class remove this item as double value. - */ + We can't return 0 because in that case the tree class would remove this + item as double value. This would cause problems for case-changes and + if the the returned values are not the same we do the sort on. + */ return 1; } @@ -1609,6 +1631,11 @@ int group_concat_key_cmp_with_order(void* arg, byte* key1, byte* key2) /* function of sort for syntax: GROUP_CONCAT(DISTINCT expr,... ORDER BY col,... ) + + BUG: + This doesn't work in the case when the order by contains data that + is not part of the field list because tree-insert will not notice + the duplicated values when inserting things sorted by ORDER BY */ int group_concat_key_cmp_with_distinct_and_order(void* arg,byte* key1, @@ -1621,58 +1648,61 @@ int group_concat_key_cmp_with_distinct_and_order(void* arg,byte* key1, /* - create result - item is pointer to Item_func_group_concat + Append data from current leaf to item->result */ int dump_leaf_key(byte* key, uint32 count __attribute__((unused)), - Item_func_group_concat *group_concat_item) + Item_func_group_concat *item) { char buff[MAX_FIELD_WIDTH]; - String tmp((char *)&buff,sizeof(buff),default_charset_info); - String tmp2((char *)&buff,sizeof(buff),default_charset_info); - + String tmp((char*) &buff, sizeof(buff), default_charset_info); + String tmp2((char *) &buff, sizeof(buff), default_charset_info); + char *record= (char*) item->table->record[0]; + tmp.length(0); - for (uint i= 0; i < group_concat_item->arg_show_fields; i++) + for (uint i= 0; i < item->arg_count_field; i++) { - Item *show_item= group_concat_item->args[i]; + Item *show_item= item->args[i]; if (!show_item->const_item()) { - Field *f= show_item->real_item()->get_tmp_table_field(); - char *sv= f->ptr; - f->ptr= (char *)key + f->abs_offset; - String *res= f->val_str(&tmp,&tmp2); - group_concat_item->result.append(*res); - f->ptr= sv; + /* + We have to use get_tmp_table_field() instead of + real_item()->get_tmp_table_field() because we want the field in + the temporary table, not the original field + */ + Field *field= show_item->get_tmp_table_field(); + String *res; + char *save_ptr= field->ptr; + uint offset= (uint) (save_ptr - record); + DBUG_ASSERT(offset < item->table->reclength); + field->ptr= (char *) key + offset; + res= field->val_str(&tmp,&tmp2); + item->result.append(*res); + field->ptr= save_ptr; } else { String *res= show_item->val_str(&tmp); if (res) - group_concat_item->result.append(*res); + item->result.append(*res); } } - if (group_concat_item->tree_mode) // Last item of tree + if (item->tree_mode) // Last item of tree { - group_concat_item->show_elements++; - if (group_concat_item->show_elements < - group_concat_item->tree->elements_in_tree) - group_concat_item->result.append(*group_concat_item->separator); + item->show_elements++; + if (item->show_elements < item->tree->elements_in_tree) + item->result.append(*item->separator); } else + item->result.append(*item->separator); + + /* stop if length of result more than group_concat_max_len */ + if (item->result.length() > item->group_concat_max_len) { - group_concat_item->result.append(*group_concat_item->separator); - } - /* - if length of result more than group_concat_max_len - stop ! - */ - if (group_concat_item->result.length() > - group_concat_item->group_concat_max_len) - { - group_concat_item->count_cut_values++; - group_concat_item->result.length(group_concat_item->group_concat_max_len); - group_concat_item->warning_for_row= TRUE; + item->count_cut_values++; + item->result.length(item->group_concat_max_len); + item->warning_for_row= TRUE; return 1; } return 0; @@ -1692,56 +1722,86 @@ Item_func_group_concat::Item_func_group_concat(bool is_distinct, SQL_LIST *is_order, String *is_separator) :Item_sum(), tmp_table_param(0), max_elements_in_tree(0), warning(0), - warning_available(0), key_length(0), rec_offset(0), + warning_available(0), key_length(0), tree_mode(0), distinct(is_distinct), warning_for_row(0), separator(is_separator), tree(&tree_base), table(0), order(0), tables_list(0), show_elements(0), arg_count_order(0), arg_count_field(0), - arg_show_fields(0), count_cut_values(0) - + count_cut_values(0) { + Item *item_select; + Item **arg_ptr; + original= 0; quick_group= 0; mark_as_sum_func(); order= 0; group_concat_max_len= current_thd->variables.group_concat_max_len; - - arg_show_fields= arg_count_field= is_select->elements; + arg_count_field= is_select->elements; arg_count_order= is_order ? is_order->elements : 0; - arg_count= arg_count_field; + arg_count= arg_count_field + arg_count_order; /* We need to allocate: - args - arg_count+arg_count_order (for possible order items in temporare - tables) + args - arg_count_field+arg_count_order + (for possible order items in temporare tables) order - arg_count_order */ - args= (Item**) sql_alloc(sizeof(Item*)*(arg_count+arg_count_order)+ - sizeof(ORDER*)*arg_count_order); - if (!args) + if (!(args= (Item**) sql_alloc(sizeof(Item*) * arg_count + + sizeof(ORDER*)*arg_count_order))) return; + order= (ORDER**)(args + arg_count); + /* fill args items of show and sort */ - int i= 0; List_iterator_fast<Item> li(*is_select); - Item *item_select; - for ( ; (item_select= li++) ; i++) - args[i]= item_select; + for (arg_ptr=args ; (item_select= li++) ; arg_ptr++) + *arg_ptr= item_select; if (arg_count_order) { - i= 0; - order= (ORDER**)(args + arg_count + arg_count_order); + ORDER **order_ptr= order; for (ORDER *order_item= (ORDER*) is_order->first; - order_item != NULL; - order_item= order_item->next) + order_item != NULL; + order_item= order_item->next) { - order[i++]= order_item; + (*order_ptr++)= order_item; + *arg_ptr= *order_item->item; + order_item->item= arg_ptr++; } } } + + +Item_func_group_concat::Item_func_group_concat(THD *thd, + Item_func_group_concat *item) + :Item_sum(thd, item),item_thd(thd), + tmp_table_param(item->tmp_table_param), + max_elements_in_tree(item->max_elements_in_tree), + warning(item->warning), + warning_available(item->warning_available), + key_length(item->key_length), + tree_mode(item->tree_mode), + distinct(item->distinct), + warning_for_row(item->warning_for_row), + separator(item->separator), + tree(item->tree), + table(item->table), + order(item->order), + tables_list(item->tables_list), + group_concat_max_len(item->group_concat_max_len), + show_elements(item->show_elements), + arg_count_order(item->arg_count_order), + arg_count_field(item->arg_count_field), + field_list_offset(item->field_list_offset), + count_cut_values(item->count_cut_values), + original(item) +{ + quick_group= item->quick_group; +} + void Item_func_group_concat::cleanup() @@ -1785,12 +1845,11 @@ Item_func_group_concat::~Item_func_group_concat() */ if (!original) { - THD *thd= current_thd; if (warning_available) { char warn_buff[MYSQL_ERRMSG_SIZE]; sprintf(warn_buff, ER(ER_CUT_VALUE_GROUP_CONCAT), count_cut_values); - warning->set_msg(thd, warn_buff); + warning->set_msg(current_thd, warn_buff); } } } @@ -1826,33 +1885,31 @@ bool Item_func_group_concat::add() copy_fields(tmp_table_param); copy_funcs(tmp_table_param->items_to_copy); - bool record_is_null= TRUE; - for (uint i= 0; i < arg_show_fields; i++) + for (uint i= 0; i < arg_count_field; i++) { Item *show_item= args[i]; if (!show_item->const_item()) { + /* + Here we use real_item as we want the original field data that should + be written to table->record[0] + */ Field *f= show_item->real_item()->get_tmp_table_field(); - if (!f->is_null()) - { - record_is_null= FALSE; - break; - } + if (f->is_null()) + return 0; // Skip row if it contains null } } - if (record_is_null) - return 0; + null_value= FALSE; if (tree_mode) { - if (!tree_insert(tree, table->record[0] + rec_offset, 0, tree->custom_arg)) + if (!tree_insert(tree, table->record[0], 0, tree->custom_arg)) return 1; } else { if (result.length() <= group_concat_max_len && !warning_for_row) - dump_leaf_key(table->record[0] + rec_offset, 1, - (Item_func_group_concat*)this); + dump_leaf_key(table->record[0], 1, this); } return 0; } @@ -1884,24 +1941,19 @@ Item_func_group_concat::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) thd->allow_sum_func= 0; maybe_null= 0; item_thd= thd; - for (i= 0 ; i < arg_count ; i++) - { - if (args[i]->fix_fields(thd, tables, args + i) || args[i]->check_cols(1)) - return 1; - maybe_null |= args[i]->maybe_null; - } + /* - Fix fields for order clause in function: - GROUP_CONCAT(expr,... ORDER BY col,... ) + Fix fields for select list and ORDER clause */ - for (i= 0 ; i < arg_count_order ; i++) + + for (i= 0 ; i < arg_count ; i++) { - // order_item->item can be changed by fix_fields() call - ORDER *order_item= order[i]; - if ((*order_item->item)->fix_fields(thd, tables, order_item->item) || - (*order_item->item)->check_cols(1)) + if (args[i]->fix_fields(thd, tables, args + i) || args[i]->check_cols(1)) return 1; + if (i < arg_count_field && args[i]->maybe_null) + maybe_null= 0; } + result_field= 0; null_value= 1; max_length= group_concat_max_len; @@ -1916,23 +1968,29 @@ Item_func_group_concat::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) bool Item_func_group_concat::setup(THD *thd) { - DBUG_ENTER("Item_func_group_concat::setup"); List<Item> list; SELECT_LEX *select_lex= thd->lex->current_select; + uint const_fields; + byte *record; + qsort_cmp2 compare_key; + DBUG_ENTER("Item_func_group_concat::setup"); if (select_lex->linkage == GLOBAL_OPTIONS_TYPE) DBUG_RETURN(1); + /* push all not constant fields to list and create temp table */ + const_fields= 0; always_null= 0; - for (uint i= 0; i < arg_count; i++) + for (uint i= 0; i < arg_count_field; i++) { Item *item= args[i]; if (list.push_back(item)) DBUG_RETURN(1); if (item->const_item()) { + const_fields++; (void) item->val_int(); if (item->null_value) always_null= 1; @@ -1952,12 +2010,19 @@ bool Item_func_group_concat::setup(THD *thd) count_field_types(tmp_table_param,all_fields,0); if (table) { + /* + We come here when we are getting the result from a temporary table, + not the original tables used in the query + */ free_tmp_table(thd, table); tmp_table_param->cleanup(); } /* - We have to create a temporary table for that we get descriptions of fields + We have to create a temporary table to get descriptions of fields (types, sizes and so on). + + Note that in the table, we first have the ORDER BY fields, then the + field list. */ if (!(table=create_tmp_table(thd, tmp_table_param, all_fields, 0, 0, 0, 0,select_lex->options | thd->options, @@ -1966,27 +2031,17 @@ bool Item_func_group_concat::setup(THD *thd) table->file->extra(HA_EXTRA_NO_ROWS); table->no_rows= 1; + key_length= table->reclength; + record= table->record[0]; - Field** field, **field_end; - field_end = (field = table->field) + table->fields; - uint offset = 0; - for (key_length = 0; field < field_end; ++field) - { - uint32 length= (*field)->pack_length(); - (*field)->abs_offset= offset; - offset+= length; - key_length += length; - } - rec_offset = table->reclength - key_length; - + /* Offset to first result field in table */ + field_list_offset= table->fields - (list.elements - const_fields); if (tree_mode) delete_tree(tree); - /* - choise function of sort - */ + + /* choose function of sort */ tree_mode= distinct || arg_count_order; - qsort_cmp2 compare_key; if (tree_mode) { if (arg_count_order) @@ -1998,21 +2053,20 @@ bool Item_func_group_concat::setup(THD *thd) } else { + compare_key= NULL; if (distinct) compare_key= (qsort_cmp2) group_concat_key_cmp_with_distinct; - else - compare_key= NULL; } /* - Create a tree of sort. Tree is used for a sort and a remove dubl - values (according with syntax of the function). If function does't + Create a tree of sort. Tree is used for a sort and a remove double + values (according with syntax of the function). If function doesn't contain DISTINCT and ORDER BY clauses, we don't create this tree. */ init_tree(tree, min(thd->variables.max_heap_table_size, - thd->variables.sortbuff_size/16), 0, + thd->variables.sortbuff_size/16), 0, key_length, compare_key, 0, NULL, (void*) this); - max_elements_in_tree= ((key_length) ? - thd->variables.max_heap_table_size/key_length : 1); + max_elements_in_tree= (key_length ? + thd->variables.max_heap_table_size/key_length : 1); }; /* @@ -2027,6 +2081,7 @@ bool Item_func_group_concat::setup(THD *thd) DBUG_RETURN(0); } + /* This is used by rollup to create a separate usable copy of the function */ void Item_func_group_concat::make_unique() @@ -2069,7 +2124,7 @@ void Item_func_group_concat::print(String *str) str->append("group_concat(", 13); if (distinct) str->append("distinct ", 9); - for (uint i= 0; i < arg_count; i++) + for (uint i= 0; i < arg_count_field; i++) { if (i) str->append(','); diff --git a/sql/item_sum.h b/sql/item_sum.h index 8a51a7e5a7c..107c19b7d85 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -197,18 +197,24 @@ class Item_sum_count_distinct :public Item_sum_int uint key_length; CHARSET_INFO *key_charset; - // calculated based on max_heap_table_size. If reached, - // walk the tree and dump it into MyISAM table + /* + Calculated based on max_heap_table_size. If reached, + walk the tree and dump it into MyISAM table + */ uint max_elements_in_tree; - // the first few bytes of record ( at least one) - // are just markers for deleted and NULLs. We want to skip them since - // they will just bloat the tree without providing any valuable info + /* + The first few bytes of record ( at least one) + are just markers for deleted and NULLs. We want to skip them since + they will just bloat the tree without providing any valuable info + */ int rec_offset; - // If there are no blobs, we can use a tree, which - // is faster than heap table. In that case, we still use the table - // to help get things set up, but we insert nothing in it + /* + If there are no blobs, we can use a tree, which + is faster than heap table. In that case, we still use the table + to help get things set up, but we insert nothing in it + */ bool use_tree; bool always_null; // Set to 1 if the result is always NULL @@ -312,18 +318,17 @@ public: void fix_length_and_dec() {} }; -/* - -variance(a) = - -= sum (ai - avg(a))^2 / count(a) ) -= sum (ai^2 - 2*ai*avg(a) + avg(a)^2) / count(a) -= (sum(ai^2) - sum(2*ai*avg(a)) + sum(avg(a)^2))/count(a) = -= (sum(ai^2) - 2*avg(a)*sum(a) + count(a)*avg(a)^2)/count(a) = -= (sum(ai^2) - 2*sum(a)*sum(a)/count(a) + count(a)*sum(a)^2/count(a)^2 )/count(a) = -= (sum(ai^2) - 2*sum(a)^2/count(a) + sum(a)^2/count(a) )/count(a) = -= (sum(ai^2) - sum(a)^2/count(a))/count(a) +/* + variance(a) = + + = sum (ai - avg(a))^2 / count(a) ) + = sum (ai^2 - 2*ai*avg(a) + avg(a)^2) / count(a) + = (sum(ai^2) - sum(2*ai*avg(a)) + sum(avg(a)^2))/count(a) = + = (sum(ai^2) - 2*avg(a)*sum(a) + count(a)*avg(a)^2)/count(a) = + = (sum(ai^2) - 2*sum(a)*sum(a)/count(a) + count(a)*sum(a)^2/count(a)^2 )/count(a) = + = (sum(ai^2) - 2*sum(a)^2/count(a) + sum(a)^2/count(a) )/count(a) = + = (sum(ai^2) - sum(a)^2/count(a))/count(a) */ class Item_sum_variance : public Item_sum_num @@ -509,8 +514,9 @@ class Item_sum_xor :public Item_sum_bit /* -** user defined aggregates + User defined aggregates */ + #ifdef HAVE_DLOPEN class Item_udf_sum : public Item_sum @@ -668,7 +674,6 @@ class Item_func_group_concat : public Item_sum MYSQL_ERROR *warning; bool warning_available; uint key_length; - int rec_offset; bool tree_mode; bool distinct; bool warning_for_row; @@ -676,12 +681,13 @@ class Item_func_group_concat : public Item_sum friend int group_concat_key_cmp_with_distinct(void* arg, byte* key1, byte* key2); - friend int group_concat_key_cmp_with_order(void* arg, byte* key1, byte* key2); + friend int group_concat_key_cmp_with_order(void* arg, byte* key1, + byte* key2); friend int group_concat_key_cmp_with_distinct_and_order(void* arg, byte* key1, byte* key2); friend int dump_leaf_key(byte* key, uint32 count __attribute__((unused)), - Item_func_group_concat *group_concat_item); + Item_func_group_concat *group_concat_item); public: String result; @@ -695,7 +701,7 @@ class Item_func_group_concat : public Item_sum uint show_elements; uint arg_count_order; uint arg_count_field; - uint arg_show_fields; + uint field_list_offset; uint count_cut_values; /* Following is 0 normal object and pointer to original one for copy @@ -706,38 +712,12 @@ class Item_func_group_concat : public Item_sum Item_func_group_concat(bool is_distinct,List<Item> *is_select, SQL_LIST *is_order,String *is_separator); - Item_func_group_concat(THD *thd, Item_func_group_concat *item) - :Item_sum(thd, item),item_thd(thd), - tmp_table_param(item->tmp_table_param), - max_elements_in_tree(item->max_elements_in_tree), - warning(item->warning), - warning_available(item->warning_available), - key_length(item->key_length), - rec_offset(item->rec_offset), - tree_mode(item->tree_mode), - distinct(item->distinct), - warning_for_row(item->warning_for_row), - separator(item->separator), - tree(item->tree), - table(item->table), - order(item->order), - tables_list(item->tables_list), - group_concat_max_len(item->group_concat_max_len), - show_elements(item->show_elements), - arg_count_order(item->arg_count_order), - arg_count_field(item->arg_count_field), - arg_show_fields(item->arg_show_fields), - count_cut_values(item->count_cut_values), - original(item) - { - quick_group= item->quick_group; - }; + Item_func_group_concat(THD *thd, Item_func_group_concat *item); ~Item_func_group_concat(); void cleanup(); enum Sumfunctype sum_func () const {return GROUP_CONCAT_FUNC;} const char *func_name() const { return "group_concat"; } - enum Type type() const { return SUM_FUNC_ITEM; } virtual Item_result result_type () const { return STRING_RESULT; } void clear(); bool add(); diff --git a/sql/lex.h b/sql/lex.h index ceef140b5f7..94ea0295f05 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -122,6 +122,7 @@ static SYMBOL symbols[] = { { "CURRENT_DATE", SYM(CURDATE)}, { "CURRENT_TIME", SYM(CURTIME)}, { "CURRENT_TIMESTAMP", SYM(NOW_SYM)}, + { "CURRENT_USER", SYM(CURRENT_USER)}, { "DATA", SYM(DATA_SYM)}, { "DATABASE", SYM(DATABASE)}, { "DATABASES", SYM(DATABASES)}, @@ -481,7 +482,6 @@ static SYMBOL sql_functions[] = { { "CAST", SYM(CAST_SYM)}, { "CEIL", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ceiling)}, { "CEILING", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ceiling)}, - { "CURRENT_USER", F_SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_current_user)}, { "BIT_LENGTH", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_bit_length)}, { "CENTROID", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_centroid)}, { "CHAR_LENGTH", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_char_length)}, diff --git a/sql/log.cc b/sql/log.cc index 6b091484a82..0cd9e7172c3 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -507,7 +507,6 @@ int MYSQL_LOG::find_log_pos(LOG_INFO *linfo, const char *log_name, RETURN VALUES 0 ok LOG_INFO_EOF End of log-index-file found - LOG_INFO_SEEK Could not allocate IO cache LOG_INFO_IO Got IO error while reading file */ diff --git a/sql/log_event.cc b/sql/log_event.cc index 8ee2f055fc6..3b92e0956ba 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -83,7 +83,7 @@ inline int ignored_error_code(int err_code) pretty_print_str() */ -#ifndef MYSQL_CLIENT +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) static char *pretty_print_str(char *packet, char *str, int len) { char *end= str + len; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 0f75c7d14ef..c85f6ed4d8d 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -2105,13 +2105,11 @@ static int init_common_variables(const char *conf_file_name, int argc, strmov(fn_ext(pidfile_name),".pid"); // Add proper extension #ifndef DBUG_OFF - if (*(MYSQL_SERVER_SUFFIX)) - strxmov(strend(server_version),MYSQL_SERVER_SUFFIX,"-debug",NullS); + if (!*(MYSQL_SERVER_SUFFIX)) + strmov(strend(server_version),"-debug"); else - strmov(strend(server_version),"--debug"); -#else - strmov(strend(server_version),MYSQL_SERVER_SUFFIX); #endif + strmov(strend(server_version),MYSQL_SERVER_SUFFIX); load_defaults(conf_file_name, groups, &argc, &argv); defaults_argv=argv; @@ -2364,9 +2362,8 @@ Now disabling --log-slave-updates."); { if (global_system_variables.log_warnings) sql_print_error("Warning: Failed to lock memory. Errno: %d\n",errno); + locked_in_memory= 0; } - else - locked_in_memory=1; } #else locked_in_memory=0; @@ -2530,6 +2527,8 @@ int main(int argc, char **argv) } } #endif + thread_stack_min=thread_stack - STACK_MIN_SIZE; + (void) thr_setconcurrency(concurrency); // 10 by default /* diff --git a/sql/protocol.cc b/sql/protocol.cc index 40adc9e8961..fb4d2a57ab6 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -166,10 +166,10 @@ net_printf(THD *thd, uint errcode, ...) const char *format; #ifndef EMBEDDED_LIBRARY const char *text_pos; + int head_length= NET_HEADER_SIZE; #else char text_pos[1024]; #endif - int head_length= NET_HEADER_SIZE; NET *net= &thd->net; DBUG_ENTER("net_printf"); @@ -530,7 +530,10 @@ bool Protocol::send_fields(List<Item> *list, uint flag) /* Store fixed length fields */ pos= (char*) local_packet->ptr()+local_packet->length(); *pos++= 12; // Length of packed fields - int2store(pos, field.charsetnr); + if (item->collation.collation == &my_charset_bin || thd_charset == NULL) + int2store(pos, field.charsetnr); + else + int2store(pos, thd_charset->number); int4store(pos+2, field.length); pos[6]= field.type; int2store(pos+7,field.flags); diff --git a/sql/set_var.cc b/sql/set_var.cc index e3ed2a4cbd8..5c2cbecd52b 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -839,7 +839,8 @@ bool update_sys_var_str(sys_var_str *var_str, rw_lock_t *var_mutex, { char *res= 0, *old_value=(char *)(var ? var->value->str_value.ptr() : 0); uint new_length= (var ? var->value->str_value.length() : 0); - if (!old_value) old_value=""; + if (!old_value) + old_value= (char*) ""; if (!(res= my_strdup_with_length(old_value, new_length, MYF(0)))) return 1; /* diff --git a/sql/slave.cc b/sql/slave.cc index 53e72673a0e..67b9c7515e1 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -285,8 +285,9 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, In this case, we will use the same IO_CACHE pointer to read data as the IO thread is using to write data. */ - if (my_b_tell((rli->cur_log=rli->relay_log.get_log_file())) == 0 && - check_binlog_magic(rli->cur_log,errmsg)) + rli->cur_log= rli->relay_log.get_log_file(); + if (my_b_tell(rli->cur_log) == 0 && + check_binlog_magic(rli->cur_log, errmsg)) goto err; rli->cur_log_old_open_count=rli->relay_log.get_open_count(); } @@ -1673,7 +1674,18 @@ int init_master_info(MASTER_INFO* mi, const char* master_info_fname, DBUG_ENTER("init_master_info"); if (mi->inited) + { + /* + We have to reset read position of relay-log-bin as we may have + already been reading from 'hotlog' when the slave was stopped + last time. If this case pos_in_file would be set and we would + get a crash when trying to read the signature for the binary + relay log. + */ + my_b_seek(mi->rli.cur_log, (my_off_t) 0); DBUG_RETURN(0); + } + mi->mysql=0; mi->file_id=1; fn_format(fname, master_info_fname, mysql_data_home, "", 4+32); @@ -3617,13 +3629,16 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) mi->master_log_pos+= inc_pos; DBUG_PRINT("info", ("master_log_pos: %d, event originating from the same server, ignored", (ulong) mi->master_log_pos)); } - else /* write the event to the relay log */ + else + { + /* write the event to the relay log */ if (likely(!(error= rli->relay_log.appendv(buf,event_len,0)))) { mi->master_log_pos+= inc_pos; DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); rli->relay_log.harvest_bytes_written(&rli->log_space_total); } + } err: pthread_mutex_unlock(&mi->data_lock); @@ -4091,8 +4106,9 @@ Before assert, my_b_tell(cur_log)=%s rli->event_relay_log_pos=%s", if (rli->relay_log.is_active(rli->linfo.log_file_name)) { #ifdef EXTRA_DEBUG - sql_print_error("next log '%s' is currently active", - rli->linfo.log_file_name); + if (global_system_variables.log_warnings) + sql_print_error("next log '%s' is currently active", + rli->linfo.log_file_name); #endif rli->cur_log= cur_log= rli->relay_log.get_log_file(); rli->cur_log_old_open_count= rli->relay_log.get_open_count(); @@ -4120,8 +4136,9 @@ Before assert, my_b_tell(cur_log)=%s rli->event_relay_log_pos=%s", from hot to cold, but not from cold to hot). No need for LOCK_log. */ #ifdef EXTRA_DEBUG - sql_print_error("next log '%s' is not active", - rli->linfo.log_file_name); + if (global_system_variables.log_warnings) + sql_print_error("next log '%s' is not active", + rli->linfo.log_file_name); #endif // open_binlog() will check the magic header if ((rli->cur_log_fd=open_binlog(cur_log,rli->linfo.log_file_name, diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 0862de073d3..93050419afa 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -1807,7 +1807,7 @@ Field *find_field_in_table(THD *thd,TABLE *table,const char *name,uint length, if (cached_field_index < table->fields && !my_strcasecmp(system_charset_info, table->field[cached_field_index]->field_name, name)) - field_ptr= table->field + cached_field_index; + field_ptr= table->field + cached_field_index; else if (table->name_hash.records) field_ptr= (Field**)hash_search(&table->name_hash,(byte*) name, length); diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 39061a6501b..e1a15eff475 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -1860,11 +1860,11 @@ my_bool Query_cache::write_result_data(Query_cache_block **result_block, { // It is success (nobody can prevent us write data) STRUCT_UNLOCK(&structure_guard_mutex); - byte *rest = (byte*) data; - Query_cache_block *block = *result_block; uint headers_len = (ALIGN_SIZE(sizeof(Query_cache_block)) + ALIGN_SIZE(sizeof(Query_cache_result))); #ifndef EMBEDDED_LIBRARY + Query_cache_block *block= *result_block; + byte *rest= (byte*) data; // Now fill list of blocks that created by allocate_data_chain do { diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index ea47ca9f71d..81269a8cbcf 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -115,7 +115,6 @@ static int mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *unit, TABLE *table; int res; select_union *derived_result; - TABLE_LIST *tables= (TABLE_LIST *)first_select->table_list.first; bool is_union= first_select->next_select() && first_select->next_select()->linkage == UNION_TYPE; bool is_subsel= first_select->first_inner_unit() ? 1: 0; diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 0d0dff81427..241b293262f 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -21,12 +21,14 @@ #include "sql_acl.h" static int check_null_fields(THD *thd,TABLE *entry); +#ifndef EMBEDDED_LIBRARY static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list); static int write_delayed(THD *thd,TABLE *table, enum_duplicates dup, char *query, uint query_length, int log_on); static void end_delayed_insert(THD *thd); extern "C" pthread_handler_decl(handle_delayed_insert,arg); static void unlink_blobs(register TABLE *table); +#endif /* Define to force use of my_malloc() if the allocated memory block is big */ @@ -127,7 +129,9 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, TABLE *table; List_iterator_fast<List_item> its(values_list); List_item *values; - char *query=thd->query; +#ifndef EMBEDDED_LIBRARY + char *query= thd->query; +#endif thr_lock_type lock_type = table_list->lock_type; TABLE_LIST *insert_table_list= (TABLE_LIST*) thd->lex->select_lex.table_list.first; diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index df7d487194a..4c63a8b7c7f 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -1389,7 +1389,7 @@ create_total_list_n_last_return(THD *thd_arg, } } } -end: + if (slave_list_first) { *new_table_list= slave_list_first; diff --git a/sql/sql_lex.h b/sql/sql_lex.h index c86c7d4a81d..5538fb0e832 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -396,6 +396,7 @@ public: SQL_LIST order_list; /* ORDER clause */ List<List_item> expr_list; List<List_item> when_list; /* WHEN clause (expression) */ + SQL_LIST *gorder_list; ha_rows select_limit, offset_limit; /* LIMIT clause parameters */ // Arrays of pointers to top elements of all_fields list Item **ref_pointer_array; @@ -538,7 +539,6 @@ typedef struct st_lex gptr yacc_yyss,yacc_yyvs; THD *thd; CHARSET_INFO *charset; - SQL_LIST *gorder_list; List<key_part_spec> col_list; List<key_part_spec> ref_list; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 100db9d1686..57e1079253b 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -48,7 +48,9 @@ extern "C" int gethostname(char *name, int namelen); #endif +#ifndef NO_EMBEDDED_ACCESS_CHECKS static int check_for_max_user_connections(THD *thd, USER_CONN *uc); +#endif static void decrease_user_connections(USER_CONN *uc); static bool check_db_used(THD *thd,TABLE_LIST *tables); static bool check_merge_table_access(THD *thd, char *db, TABLE_LIST *tables); @@ -426,6 +428,8 @@ void init_max_user_conn(void) 1 error */ +#ifndef NO_EMBEDDED_ACCESS_CHECKS + static int check_for_max_user_connections(THD *thd, USER_CONN *uc) { int error=0; @@ -456,7 +460,7 @@ static int check_for_max_user_connections(THD *thd, USER_CONN *uc) (void) pthread_mutex_unlock(&LOCK_user_conn); DBUG_RETURN(error); } - +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ /* Decrease user connection count @@ -545,15 +549,15 @@ bool is_update_query(enum enum_sql_command command) static bool check_mqh(THD *thd, uint check_command) { +#ifdef NO_EMBEDDED_ACCESS_CHECKS + return(0); +#else bool error=0; time_t check_time = thd->start_time ? thd->start_time : time(NULL); USER_CONN *uc=thd->user_connect; DBUG_ENTER("check_mqh"); DBUG_ASSERT(uc != 0); -#ifdef NO_EMBEDDED_ACCESS_CHECKS - DBUG_RETURN(0); -#else /* If more than a hour since last check, reset resource checking */ if (check_time - uc->intime >= 3600) { @@ -3158,7 +3162,7 @@ unsent_create_error: net_printf(thd,ER_WRONG_DB_NAME, lex->name); break; } - if (check_access(thd,DROP_ACL,lex->name,0,1,0)) + if (check_access(thd,SELECT_ACL,lex->name,0,1,0)) break; if (thd->locked_tables || thd->active_transaction()) { @@ -3548,7 +3552,10 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, DBUG_ENTER("check_access"); DBUG_PRINT("enter",("want_access: %lu master_access: %lu", want_access, thd->master_access)); - ulong db_access,dummy; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + ulong db_access; +#endif + ulong dummy; if (save_priv) *save_priv=0; else @@ -3740,6 +3747,10 @@ static bool check_db_used(THD *thd,TABLE_LIST *tables) #define used_stack(A,B) (long) (B - A) #endif +#ifndef DBUG_OFF +long max_stack_used; +#endif + #ifndef EMBEDDED_LIBRARY bool check_stack_overrun(THD *thd,char *buf __attribute__((unused))) { @@ -3752,6 +3763,9 @@ bool check_stack_overrun(THD *thd,char *buf __attribute__((unused))) thd->fatal_error(); return 1; } +#ifndef DBUG_OFF + max_stack_used= max(max_stack_used, stack_used); +#endif return 0; } #endif /* EMBEDDED_LIBRARY */ diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 2b1adbf7afc..8072b151c5c 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -802,7 +802,7 @@ static int mysql_test_select_fields(Prepared_statement *stmt, if (check_table_access(thd, privilege, tables,0)) DBUG_RETURN(1); } - else if (check_access(thd, privilege, "*any*",0,0,0)) + else if (check_access(thd, privilege, any_db,0,0,0)) DBUG_RETURN(1); #endif if ((&lex->select_lex != lex->all_selects_list && @@ -1123,9 +1123,10 @@ static void reset_stmt_for_execute(Prepared_statement *stmt) void mysql_stmt_execute(THD *thd, char *packet, uint packet_length) { ulong stmt_id= uint4korr(packet); +#ifndef EMBEDDED_LIBRARY uchar *packet_end= (uchar *) packet + packet_length - 1; +#endif Prepared_statement *stmt; - DBUG_ENTER("mysql_stmt_execute"); packet+= 9; /* stmt_id + 5 bytes of flags */ diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 598b53fe7dd..86b1dcfb4ee 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -3610,7 +3610,6 @@ static void make_join_readinfo(JOIN *join, uint options) { uint i; - SELECT_LEX *select_lex= &join->thd->lex->select_lex; bool statistics= test(!(join->select_options & SELECT_DESCRIBE)); DBUG_ENTER("make_join_readinfo"); @@ -3880,9 +3879,7 @@ JOIN::join_free(bool full) else { for (tab= join_tab, end= tab+tables; tab != end; tab++) - { tab->cleanup(); - } table= 0; } } @@ -4789,7 +4786,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, else return new Field_double(item_sum->max_length,maybe_null, item->name, table, item_sum->decimals); - case Item_sum::VARIANCE_FUNC: /* Place for sum & count */ + case Item_sum::VARIANCE_FUNC: /* Place for sum & count */ case Item_sum::STD_FUNC: if (group) return new Field_string(sizeof(double)*2+sizeof(longlong), @@ -4817,17 +4814,19 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, default: // This case should never be choosen DBUG_ASSERT(0); + thd->fatal_error(); return 0; } } - thd->fatal_error(); - return 0; // Error + /* We never come here */ } case Item::FIELD_ITEM: case Item::DEFAULT_VALUE_ITEM: - return create_tmp_field_from_field(thd, (*from_field= - ((Item_field*) item)->field), + { + Item_field *field= (Item_field*) item; + return create_tmp_field_from_field(thd, (*from_field= field->field), item, table, modify_item); + } case Item::FUNC_ITEM: case Item::COND_ITEM: case Item::FIELD_AVG_ITEM: @@ -8405,12 +8404,11 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, Item *pos; List_iterator_fast<Item> li(all_fields); Copy_field *copy; - DBUG_ENTER("setup_copy_fields"); res_selected_fields.empty(); res_all_fields.empty(); List_iterator_fast<Item> itr(res_all_fields); - uint i, border= all_fields.elements - elements; + DBUG_ENTER("setup_copy_fields"); if (!(copy=param->copy_field= new Copy_field[param->field_count])) goto err2; @@ -8545,6 +8543,23 @@ bool JOIN::alloc_func_list() } +/* + Initialize 'sum_funcs' array with all Item_sum objects + + SYNOPSIS + make_sum_func_list() + field_list All items + send_fields Items in select list + before_group_by Set to 1 if this is called before GROUP BY handling + + NOTES + Calls ::setup() for all item_sum objects in field_list + + RETURN + 0 ok + 1 error +*/ + bool JOIN::make_sum_func_list(List<Item> &field_list, List<Item> &send_fields, bool before_group_by) { @@ -8581,7 +8596,7 @@ bool JOIN::make_sum_func_list(List<Item> &field_list, List<Item> &send_fields, /* - Change all funcs and sum_funcs to fields in tmp table, and create + Change all funcs and sum_funcs to fields in tmp table, and create new list of all items. change_to_use_tmp_fields() @@ -9081,7 +9096,6 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, List<Item> field_list; List<Item> item_list; THD *thd=join->thd; - SELECT_LEX *select_lex= &join->thd->lex->select_lex; select_result *result=join->result; Item *item_null= new Item_null(); CHARSET_INFO *cs= &my_charset_latin1; diff --git a/sql/sql_show.cc b/sql/sql_show.cc index f93004976f2..021064c6bfc 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -31,9 +31,11 @@ static const char *grant_names[]={ "select","insert","update","delete","create","drop","reload","shutdown", "process","file","grant","references","index","alter"}; +#ifndef NO_EMBEDDED_ACCESS_CHECKS static TYPELIB grant_types = { sizeof(grant_names)/sizeof(char **), "grant_types", grant_names}; +#endif static int mysql_find_files(THD *thd,List<char> *files, const char *db, const char *path, const char *wild, bool dir); @@ -367,7 +369,9 @@ mysql_find_files(THD *thd,List<char> *files, const char *db,const char *path, char *ext; MY_DIR *dirp; FILEINFO *file; +#ifndef NO_EMBEDDED_ACCESS_CHECKS uint col_access=thd->col_access; +#endif TABLE_LIST table_list; DBUG_ENTER("mysql_find_files"); @@ -829,7 +833,9 @@ int mysqld_show_create_db(THD *thd, char *dbname, char path[FN_REFLEN]; char buff[2048]; String buffer(buff, sizeof(buff), system_charset_info); +#ifndef NO_EMBEDDED_ACCESS_CHECKS uint db_access; +#endif bool found_libchar; HA_CREATE_INFO create; uint create_options = create_info ? create_info->options : 0; @@ -1138,7 +1144,6 @@ append_identifier(THD *thd, String *packet, const char *name, uint length) { const char *name_end; char quote_char; - uint part_len; if (thd->variables.sql_mode & MODE_ANSI_QUOTES) quote_char= '\"'; @@ -1274,7 +1279,7 @@ store_create_info(THD *thd, TABLE *table, String *packet) /* - Again we are using CURRENT_TIMESTAMP instead of NOW becaus eit is + Again we are using CURRENT_TIMESTAMP instead of NOW because it is more standard */ has_now_default= table->timestamp_field == field && diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 5e358f76d3f..3352c3a5f17 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -628,15 +628,15 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, if (!timestamps) { sql_field->unireg_check= Field::TIMESTAMP_DNUN_FIELD; - ++timestamps_with_niladic; + timestamps_with_niladic++; } else sql_field->unireg_check= Field::NONE; } - else if(sql_field->unireg_check != Field::NONE) - ++timestamps_with_niladic; + else if (sql_field->unireg_check != Field::NONE) + timestamps_with_niladic++; - ++timestamps; + timestamps++; /* fall-through */ default: sql_field->pack_flag=(FIELDFLAG_NUMBER | @@ -1963,11 +1963,6 @@ int mysql_create_like_table(THD* thd, TABLE_LIST* table, /* Create a new table by copying from source table */ -#ifndef DBUG_OFF - // The code stated below is for test synchronization.test Bug #2385 - if (test_flags & TEST_SYNCHRONIZATION) - sleep(3); -#endif if (my_copy(src_path, dst_path, MYF(MY_WME|MY_DONT_OVERWRITE_FILE))) goto err; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 2d713ef5f5e..2daba654d47 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -210,6 +210,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize); %token CONCURRENT %token CONSTRAINT %token CONVERT_SYM +%token CURRENT_USER %token DATABASES %token DATA_SYM %token DEFAULT @@ -2684,6 +2685,8 @@ simple_expr: $$= new Item_func_curtime_local($3); Lex->safe_to_cache_query=0; } + | CURRENT_USER optional_braces + { $$= create_func_current_user(); } | DATE_ADD_INTERVAL '(' expr ',' interval_expr interval ')' { $$= new Item_date_add_interval($3,$5,$6,0); } | DATE_SUB_INTERVAL '(' expr ',' interval_expr interval ')' @@ -3060,11 +3063,15 @@ sum_expr: { $$=new Item_sum_variance($3); } | SUM_SYM '(' in_sum_expr ')' { $$=new Item_sum_sum($3); } - | GROUP_CONCAT_SYM '(' opt_distinct expr_list opt_gorder_clause - opt_gconcat_separator ')' + | GROUP_CONCAT_SYM '(' opt_distinct + { Select->in_sum_expr++; } + expr_list opt_gorder_clause + opt_gconcat_separator + ')' { - $$=new Item_func_group_concat($3,$4,Lex->gorder_list,$6); - $4->empty(); + Select->in_sum_expr--; + $$=new Item_func_group_concat($3,$5,Select->gorder_list,$7); + $5->empty(); }; opt_distinct: @@ -3079,16 +3086,15 @@ opt_gconcat_separator: opt_gorder_clause: /* empty */ { - LEX *lex=Lex; - lex->gorder_list = NULL; + Select->gorder_list = NULL; } | order_clause { - LEX *lex=Lex; - lex->gorder_list= - (SQL_LIST*) sql_memdup((char*) &lex->current_select->order_list, + SELECT_LEX *select= Select; + select->gorder_list= + (SQL_LIST*) sql_memdup((char*) &select->order_list, sizeof(st_sql_list)); - lex->current_select->order_list.empty(); + select->order_list.empty(); }; @@ -4170,6 +4176,29 @@ show_param: { Lex->sql_command= SQLCOM_SHOW_LOGS; WARN_DEPRECATED("SHOW BDB LOGS", "SHOW ENGINE BDB LOGS"); } | LOGS_SYM { Lex->sql_command= SQLCOM_SHOW_LOGS; WARN_DEPRECATED("SHOW LOGS", "SHOW ENGINE BDB LOGS"); } + | GRANTS + { + LEX *lex=Lex; + lex->sql_command= SQLCOM_SHOW_GRANTS; + THD *thd= lex->thd; + LEX_USER *curr_user; + if (!(curr_user= (LEX_USER*) thd->alloc(sizeof(st_lex_user)))) + YYABORT; + curr_user->user.str= thd->priv_user; + curr_user->user.length= strlen(thd->priv_user); + if (*thd->priv_host != 0) + { + curr_user->host.str= thd->priv_host; + curr_user->host.length= strlen(thd->priv_host); + } + else + { + curr_user->host.str= (char *) "%"; + curr_user->host.length= 1; + } + curr_user->password.str=NullS; + lex->grant_user= curr_user; + } | GRANTS FOR_SYM user { LEX *lex=Lex; @@ -4760,7 +4789,25 @@ user: if (!($$=(LEX_USER*) thd->alloc(sizeof(st_lex_user)))) YYABORT; $$->user = $1; $$->host=$3; - }; + } + | CURRENT_USER optional_braces + { + THD *thd= YYTHD; + if (!($$=(LEX_USER*) thd->alloc(sizeof(st_lex_user)))) + YYABORT; + $$->user.str= thd->priv_user; + $$->user.length= strlen(thd->priv_user); + if (*thd->priv_host != 0) + { + $$->host.str= thd->priv_host; + $$->host.length= strlen(thd->priv_host); + } + else + { + $$->host.str= (char *) "%"; + $$->host.length= 1; + } + }; /* Keyword that we allow for identifiers */ diff --git a/sql/table.cc b/sql/table.cc index dcd0d39d855..23d99466a83 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -465,7 +465,6 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, goto err_not_open; /* purecov: inspected */ } reg_field->comment=comment; - reg_field->set_charset(charset); if (!(reg_field->flags & NOT_NULL_FLAG)) { if ((null_bit<<=1) == 256) @@ -1288,7 +1287,7 @@ bool check_db_name(char *name) /* Used to catch empty names and names with end space */ bool last_char_is_space= TRUE; - if (lower_case_table_names) + if (lower_case_table_names && name != any_db) my_casedn_str(files_charset_info, name); while (*name) |