diff options
Diffstat (limited to 'sql')
-rw-r--r-- | sql/field.cc | 3 | ||||
-rw-r--r-- | sql/field.h | 1 | ||||
-rw-r--r-- | sql/ha_blackhole.cc | 122 | ||||
-rw-r--r-- | sql/ha_blackhole.h | 18 | ||||
-rw-r--r-- | sql/ha_federated.cc | 18 | ||||
-rw-r--r-- | sql/ha_federated.h | 1 | ||||
-rw-r--r-- | sql/ha_myisam.cc | 8 | ||||
-rw-r--r-- | sql/ha_myisammrg.cc | 8 | ||||
-rw-r--r-- | sql/handler.cc | 4 | ||||
-rw-r--r-- | sql/item.cc | 16 | ||||
-rw-r--r-- | sql/item.h | 22 | ||||
-rw-r--r-- | sql/item_cmpfunc.cc | 69 | ||||
-rw-r--r-- | sql/item_func.cc | 8 | ||||
-rw-r--r-- | sql/item_func.h | 50 | ||||
-rw-r--r-- | sql/item_geofunc.cc | 2 | ||||
-rw-r--r-- | sql/item_subselect.cc | 4 | ||||
-rw-r--r-- | sql/key.cc | 13 | ||||
-rw-r--r-- | sql/mysql_priv.h | 2 | ||||
-rw-r--r-- | sql/mysqld.cc | 100 | ||||
-rw-r--r-- | sql/opt_range.cc | 38 | ||||
-rw-r--r-- | sql/protocol.cc | 10 | ||||
-rw-r--r-- | sql/set_var.cc | 96 | ||||
-rw-r--r-- | sql/set_var.h | 1 | ||||
-rw-r--r-- | sql/sql_class.cc | 17 | ||||
-rw-r--r-- | sql/sql_class.h | 7 | ||||
-rw-r--r-- | sql/sql_insert.cc | 66 | ||||
-rw-r--r-- | sql/sql_select.cc | 39 | ||||
-rw-r--r-- | sql/sql_select.h | 11 | ||||
-rw-r--r-- | sql/sql_string.cc | 5 | ||||
-rw-r--r-- | sql/sql_table.cc | 7 | ||||
-rw-r--r-- | sql/sql_yacc.yy | 60 | ||||
-rw-r--r-- | sql/udf_example.c | 10 |
32 files changed, 593 insertions, 243 deletions
diff --git a/sql/field.cc b/sql/field.cc index 0a7c6a04f39..3f00b6750bc 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -5259,6 +5259,9 @@ void Field_date::sql_type(String &res) const 1 Value was cut during conversion 2 Wrong date string 3 Datetime value that was cut (warning level NOTE) + This is used by opt_range.cc:get_mm_leaf(). Note that there is a + nearly-identical class Field_date doesn't ever return 3 from its + store function. */ int Field_newdate::store(const char *from,uint len,CHARSET_INFO *cs) diff --git a/sql/field.h b/sql/field.h index 8c01931fa21..05c22092e53 100644 --- a/sql/field.h +++ b/sql/field.h @@ -25,6 +25,7 @@ #define NOT_FIXED_DEC 31 #define DATETIME_DEC 6 +const uint32 max_field_size= (uint32) 4294967295U; class Send_field; class Protocol; diff --git a/sql/ha_blackhole.cc b/sql/ha_blackhole.cc index 3f4285ec595..93d085bbc88 100644 --- a/sql/ha_blackhole.cc +++ b/sql/ha_blackhole.cc @@ -22,6 +22,14 @@ #ifdef HAVE_BLACKHOLE_DB #include "ha_blackhole.h" +/* Static declarations for shared structures */ + +static pthread_mutex_t blackhole_mutex; +static HASH blackhole_open_tables; +static int blackhole_init= FALSE; + +static st_blackhole_share *get_share(const char *table_name); +static void free_share(st_blackhole_share *share); /* Blackhole storage engine handlerton */ @@ -30,7 +38,7 @@ handlerton blackhole_hton= { SHOW_OPTION_YES, "/dev/null storage engine (anything you write to it disappears)", DB_TYPE_BLACKHOLE_DB, - NULL, + blackhole_db_init, 0, /* slot */ 0, /* savepoint size. */ NULL, /* close_connection */ @@ -70,15 +78,18 @@ const char **ha_blackhole::bas_ext() const int ha_blackhole::open(const char *name, int mode, uint test_if_locked) { DBUG_ENTER("ha_blackhole::open"); - thr_lock_init(&thr_lock); - thr_lock_data_init(&thr_lock,&lock,NULL); + + if (!(share= get_share(name))) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + + thr_lock_data_init(&share->lock, &lock, NULL); DBUG_RETURN(0); } int ha_blackhole::close(void) { DBUG_ENTER("ha_blackhole::close"); - thr_lock_delete(&thr_lock); + free_share(share); DBUG_RETURN(0); } @@ -161,17 +172,23 @@ int ha_blackhole::external_lock(THD *thd, int lock_type) } -uint ha_blackhole::lock_count(void) const -{ - DBUG_ENTER("ha_blackhole::lock_count"); - DBUG_RETURN(0); -} - THR_LOCK_DATA **ha_blackhole::store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type) { DBUG_ENTER("ha_blackhole::store_lock"); + if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) + { + if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && + lock_type <= TL_WRITE) && !thd->in_lock_tables) + lock_type = TL_WRITE_ALLOW_WRITE; + + if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables) + lock_type = TL_READ; + + lock.type= lock_type; + } + *to++= &lock; DBUG_RETURN(to); } @@ -226,4 +243,89 @@ int ha_blackhole::index_last(byte * buf) DBUG_RETURN(HA_ERR_END_OF_FILE); } + +static st_blackhole_share *get_share(const char *table_name) +{ + st_blackhole_share *share; + uint length; + + length= (uint) strlen(table_name); + pthread_mutex_lock(&blackhole_mutex); + + if (!(share= (st_blackhole_share*) hash_search(&blackhole_open_tables, + (byte*) table_name, length))) + { + if (!(share= (st_blackhole_share*) my_malloc(sizeof(st_blackhole_share) + + length, + MYF(MY_WME | MY_ZEROFILL)))) + goto error; + + share->table_name_length= length; + strmov(share->table_name, table_name); + + if (my_hash_insert(&blackhole_open_tables, (byte*) share)) + { + my_free((gptr) share, MYF(0)); + share= NULL; + goto error; + } + + thr_lock_init(&share->lock); + } + share->use_count++; + +error: + pthread_mutex_unlock(&blackhole_mutex); + return share; +} + +static void free_share(st_blackhole_share *share) +{ + pthread_mutex_lock(&blackhole_mutex); + if (!--share->use_count) + hash_delete(&blackhole_open_tables, (byte*) share); + pthread_mutex_unlock(&blackhole_mutex); +} + + +static byte* blackhole_get_key(st_blackhole_share *share, uint *length, + my_bool not_used __attribute__((unused))) +{ + *length= share->table_name_length; + return (byte*) share->table_name; +} + + +bool blackhole_db_init() +{ + DBUG_ENTER("blackhole_db_init"); + if (pthread_mutex_init(&blackhole_mutex, MY_MUTEX_INIT_FAST)) + goto error; + if (hash_init(&blackhole_open_tables, &my_charset_bin, 32, 0, 0, + (hash_get_key) blackhole_get_key, 0, 0)) + { + VOID(pthread_mutex_destroy(&blackhole_mutex)); + } + else + { + blackhole_init= TRUE; + DBUG_RETURN(FALSE); + } +error: + have_blackhole_db= SHOW_OPTION_DISABLED; // If we couldn't use handler + DBUG_RETURN(TRUE); +} + + +bool blackhole_db_end() +{ + if (blackhole_init) + { + hash_free(&blackhole_open_tables); + VOID(pthread_mutex_destroy(&blackhole_mutex)); + } + blackhole_init= 0; + return FALSE; +} + #endif /* HAVE_BLACKHOLE_DB */ diff --git a/sql/ha_blackhole.h b/sql/ha_blackhole.h index 4dcb37c637d..45ed0351457 100644 --- a/sql/ha_blackhole.h +++ b/sql/ha_blackhole.h @@ -17,6 +17,18 @@ #pragma interface /* gcc class implementation */ #endif + +/* + Shared structure for correct LOCK operation +*/ +struct st_blackhole_share { + THR_LOCK lock; + uint use_count; + uint table_name_length; + char table_name[1]; +}; + + /* Class definition for the blackhole storage engine "Dumbest named feature ever" @@ -24,7 +36,7 @@ class ha_blackhole: public handler { THR_LOCK_DATA lock; /* MySQL lock */ - THR_LOCK thr_lock; + st_blackhole_share *share; public: ha_blackhole(TABLE *table_arg); @@ -76,10 +88,12 @@ public: void position(const byte *record); int info(uint flag); int external_lock(THD *thd, int lock_type); - uint lock_count(void) const; int create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); }; + +bool blackhole_db_init(void); +bool blackhole_db_end(void); diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc index d7f2309657b..ac1e0962ffb 100644 --- a/sql/ha_federated.cc +++ b/sql/ha_federated.cc @@ -2166,6 +2166,24 @@ error: } +/* + This method is used exlusevely by filesort() to check if we + can create sorting buffers of necessary size. + If the handler returns more records that it declares + here server can just crash on filesort(). + We cannot guarantee that's not going to happen with + the FEDERATED engine, as we have records==0 always if the + client is a VIEW, and for the table the number of + records can inpredictably change during execution. + So we return maximum possible value here. +*/ + +ha_rows ha_federated::estimate_rows_upper_bound() +{ + return HA_POS_ERROR; +} + + /* Initialized at each key walk (called multiple times unlike rnd_init()) */ int ha_federated::index_init(uint keynr) diff --git a/sql/ha_federated.h b/sql/ha_federated.h index dc4f976c578..349c596ae5a 100644 --- a/sql/ha_federated.h +++ b/sql/ha_federated.h @@ -277,6 +277,7 @@ public: int update_row(const byte *old_data, byte *new_data); int delete_row(const byte *buf); int index_init(uint keynr); + ha_rows estimate_rows_upper_bound(); int index_read(byte *buf, const byte *key, uint key_len, enum ha_rkey_function find_flag); int index_read_idx(byte *buf, uint idx, const byte *key, diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index a24486385fd..46afa7a8f45 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -1602,10 +1602,14 @@ int ha_myisam::index_next_same(byte * buf, const byte *key __attribute__((unused)), uint length __attribute__((unused))) { + int error; DBUG_ASSERT(inited==INDEX); statistic_increment(table->in_use->status_var.ha_read_next_count, - &LOCK_status); - int error=mi_rnext_same(file,buf); + &LOCK_status); + do + { + error= mi_rnext_same(file,buf); + } while (error == HA_ERR_RECORD_DELETED); table->status=error ? STATUS_NOT_FOUND: 0; return error; } diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc index 60aa4bd6adc..78492d2843d 100644 --- a/sql/ha_myisammrg.cc +++ b/sql/ha_myisammrg.cc @@ -294,9 +294,13 @@ int ha_myisammrg::index_next_same(byte * buf, const byte *key __attribute__((unused)), uint length __attribute__((unused))) { + int error; statistic_increment(table->in_use->status_var.ha_read_next_count, - &LOCK_status); - int error=myrg_rnext_same(file,buf); + &LOCK_status); + do + { + error= myrg_rnext_same(file,buf); + } while (error == HA_ERR_RECORD_DELETED); table->status=error ? STATUS_NOT_FOUND: 0; return error; } diff --git a/sql/handler.cc b/sql/handler.cc index bfb7e8c369f..27204ae725b 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -515,6 +515,10 @@ int ha_panic(enum ha_panic_function flag) if (have_berkeley_db == SHOW_OPTION_YES) error|=berkeley_end(); #endif +#ifdef HAVE_BLACKHOLE_DB + if (have_blackhole_db == SHOW_OPTION_YES) + error|= blackhole_db_end(); +#endif #ifdef HAVE_INNOBASE_DB if (have_innodb == SHOW_OPTION_YES) error|=innobase_end(); diff --git a/sql/item.cc b/sql/item.cc index a75d0159848..975b517284f 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1231,14 +1231,14 @@ bool Item_name_const::fix_fields(THD *thd, Item **ref) s.length(0); if (value_item->fix_fields(thd, &value_item) || - name_item->fix_fields(thd, &name_item)) - return TRUE; - if (!(value_item->const_item() && name_item->const_item())) + name_item->fix_fields(thd, &name_item) || + !value_item->const_item() || + !name_item->const_item() || + !(item_name= name_item->val_str(&s))) // Can't have a NULL name + { + my_error(ER_RESERVED_SYNTAX, MYF(0), "NAME_CONST"); return TRUE; - - if (!(item_name= name_item->val_str(&s))) - return TRUE; /* Can't have a NULL name */ - + } set_name(item_name->ptr(), (uint) item_name->length(), system_charset_info); max_length= value_item->max_length; decimals= value_item->decimals; @@ -3651,7 +3651,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) } /* Search in SELECT and GROUP lists of the outer select. */ - if (outer_context->resolve_in_select_list) + if (place != IN_WHERE && place != IN_ON) { if (!(ref= resolve_ref_in_select_and_group(thd, this, select))) return -1; /* Some error occurred (e.g. ambiguous names). */ diff --git a/sql/item.h b/sql/item.h index fbf4b94b276..5f511557f47 100644 --- a/sql/item.h +++ b/sql/item.h @@ -2454,8 +2454,18 @@ class Item_cache: public Item protected: Item *example; table_map used_table_map; + /* + Field that this object will get value from. This is set/used by + index-based subquery engines to detect and remove the equality injected + by IN->EXISTS transformation. + For all other uses of Item_cache, cached_field doesn't matter. + */ + Field *cached_field; public: - Item_cache(): example(0), used_table_map(0) {fixed= 1; null_value= 1;} + Item_cache(): example(0), used_table_map(0), cached_field(0) + { + fixed= 1; null_value= 1; + } void set_used_tables(table_map map) { used_table_map= map; } @@ -2467,6 +2477,8 @@ public: decimals= item->decimals; collation.set(item->collation); unsigned_flag= item->unsigned_flag; + if (item->type() == FIELD_ITEM) + cached_field= ((Item_field *)item)->field; return 0; }; virtual void store(Item *)= 0; @@ -2477,6 +2489,14 @@ public: // to prevent drop fixed flag (no need parent cleanup call) void cleanup() {} void print(String *str); + bool eq_def(Field *field) + { + return cached_field ? cached_field->eq_def (field) : FALSE; + } + bool eq(const Item *item, bool binary_cmp) const + { + return this == item; + } }; diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index ee7a929024e..98bcb256138 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -24,7 +24,8 @@ #include <m_ctype.h> #include "sql_select.h" -static bool convert_constant_item(THD *thd, Field *field, Item **item); +static bool convert_constant_item(THD *thd, Item_field *field_item, + Item **item); static Item_result item_store_type(Item_result a, Item *item, my_bool unsigned_flag) @@ -317,7 +318,7 @@ longlong Item_func_nop_all::val_int() SYNOPSIS convert_constant_item() thd thread handle - field item will be converted using the type of this field + field_item item will be converted using the type of this field item [in/out] reference to the item to convert DESCRIPTION @@ -340,29 +341,47 @@ longlong Item_func_nop_all::val_int() 1 Item was replaced with an integer version of the item */ -static bool convert_constant_item(THD *thd, Field *field, Item **item) +static bool convert_constant_item(THD *thd, Item_field *field_item, + Item **item) { + Field *field= field_item->field; + int result= 0; + if (!(*item)->with_subselect && (*item)->const_item()) { /* For comparison purposes allow invalid dates like 2000-01-32 */ ulong orig_sql_mode= thd->variables.sql_mode; enum_check_fields orig_count_cuted_fields= thd->count_cuted_fields; + ulonglong orig_field_val; /* original field value if valid */ + LINT_INIT(orig_field_val); thd->variables.sql_mode= (orig_sql_mode & ~MODE_NO_ZERO_DATE) | MODE_INVALID_DATES; thd->count_cuted_fields= CHECK_FIELD_IGNORE; - if (!(*item)->save_in_field(field, 1) && !((*item)->null_value)) + /* + Store the value of the field if it references an outer field because + the call to save_in_field below overrides that value. + */ + if (field_item->depended_from) + orig_field_val= field->val_int(); + if (!(*item)->is_null() && !(*item)->save_in_field(field, 1)) { Item *tmp=new Item_int_with_ref(field->val_int(), *item, test(field->flags & UNSIGNED_FLAG)); - thd->variables.sql_mode= orig_sql_mode; if (tmp) thd->change_item_tree(item, tmp); - return 1; // Item was replaced + result= 1; // Item was replaced + } + /* Restore the original field value. */ + if (field_item->depended_from) + { + result= field->store(orig_field_val, TRUE); + /* orig_field_val must be a valid value that can be restored back. */ + DBUG_ASSERT(!result); } thd->variables.sql_mode= orig_sql_mode; thd->count_cuted_fields= orig_count_cuted_fields; } - return 0; + return result; } @@ -410,15 +429,14 @@ void Item_bool_func2::fix_length_and_dec() thd= current_thd; if (!thd->is_context_analysis_only()) { - Item *arg_real_item= args[0]->real_item(); - if (arg_real_item->type() == FIELD_ITEM) + if (args[0]->real_item()->type() == FIELD_ITEM) { - Field *field=((Item_field*) arg_real_item)->field; - if (field->can_be_compared_as_longlong() && - !(arg_real_item->is_datetime() && + Item_field *field_item= (Item_field*) (args[0]->real_item()); + if (field_item->field->can_be_compared_as_longlong() && + !(field_item->is_datetime() && args[1]->result_type() == STRING_RESULT)) { - if (convert_constant_item(thd, field,&args[1])) + if (convert_constant_item(thd, field_item, &args[1])) { cmp.set_cmp_func(this, tmp_arg, tmp_arg+1, INT_RESULT); // Works for all types. @@ -427,15 +445,14 @@ void Item_bool_func2::fix_length_and_dec() } } } - arg_real_item= args[1]->real_item(); - if (arg_real_item->type() == FIELD_ITEM) + if (args[1]->real_item()->type() == FIELD_ITEM) { - Field *field=((Item_field*) arg_real_item)->field; - if (field->can_be_compared_as_longlong() && - !(arg_real_item->is_datetime() && + Item_field *field_item= (Item_field*) (args[1]->real_item()); + if (field_item->field->can_be_compared_as_longlong() && + !(field_item->is_datetime() && args[0]->result_type() == STRING_RESULT)) { - if (convert_constant_item(thd, field,&args[0])) + if (convert_constant_item(thd, field_item, &args[0])) { cmp.set_cmp_func(this, tmp_arg, tmp_arg+1, INT_RESULT); // Works for all types. @@ -1901,16 +1918,16 @@ void Item_func_between::fix_length_and_dec() thd->lex->sql_command != SQLCOM_CREATE_VIEW && thd->lex->sql_command != SQLCOM_SHOW_CREATE) { - Field *field=((Item_field*) (args[0]->real_item()))->field; - if (field->can_be_compared_as_longlong()) + Item_field *field_item= (Item_field*) (args[0]->real_item()); + if (field_item->field->can_be_compared_as_longlong()) { /* The following can't be recoded with || as convert_constant_item changes the argument */ - if (convert_constant_item(thd, field,&args[1])) + if (convert_constant_item(thd, field_item, &args[1])) cmp_type=INT_RESULT; // Works for all types. - if (convert_constant_item(thd, field,&args[2])) + if (convert_constant_item(thd, field_item, &args[2])) cmp_type=INT_RESULT; // Works for all types. } } @@ -3527,13 +3544,13 @@ void Item_func_in::fix_length_and_dec() thd->lex->sql_command != SQLCOM_SHOW_CREATE && cmp_type != INT_RESULT) { - Field *field= ((Item_field*) (args[0]->real_item()))->field; - if (field->can_be_compared_as_longlong()) + Item_field *field_item= (Item_field*) (args[0]->real_item()); + if (field_item->field->can_be_compared_as_longlong()) { bool all_converted= TRUE; for (arg=args+1, arg_end=args+arg_count; arg != arg_end ; arg++) { - if (!convert_constant_item (thd, field, &arg[0])) + if (!convert_constant_item (thd, field_item, &arg[0])) all_converted= FALSE; } if (all_converted) diff --git a/sql/item_func.cc b/sql/item_func.cc index fa7821b589c..3a743037267 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -2968,6 +2968,12 @@ udf_handler::fix_fields(THD *thd, Item_result_field *func, func->max_length=min(initid.max_length,MAX_BLOB_WIDTH); func->maybe_null=initid.maybe_null; const_item_cache=initid.const_item; + /* + Keep used_tables_cache in sync with const_item_cache. + See the comment in Item_udf_func::update_used tables. + */ + if (!const_item_cache && !used_tables_cache) + used_tables_cache= RAND_TABLE_BIT; func->decimals=min(initid.decimals,NOT_FIXED_DEC); } initialized=1; @@ -4596,6 +4602,8 @@ void Item_func_get_user_var::fix_length_and_dec() if (var_entry) { + unsigned_flag= var_entry->unsigned_flag; + collation.set(var_entry->collation); switch (var_entry->type) { case REAL_RESULT: diff --git a/sql/item_func.h b/sql/item_func.h index a5e162f344f..940586fce01 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -1016,6 +1016,56 @@ public: fixed= 1; return res; } + void update_used_tables() + { + /* + TODO: Make a member in UDF_INIT and return if a UDF is deterministic or + not. + Currently UDF_INIT has a member (const_item) that is an in/out + parameter to the init() call. + The code in udf_handler::fix_fields also duplicates the arguments + handling code in Item_func::fix_fields(). + + The lack of information if a UDF is deterministic makes writing + a correct update_used_tables() for UDFs impossible. + One solution to this would be : + - Add a is_deterministic member of UDF_INIT + - (optionally) deprecate the const_item member of UDF_INIT + - Take away the duplicate code from udf_handler::fix_fields() and + make Item_udf_func call Item_func::fix_fields() to process its + arguments as for any other function. + - Store the deterministic flag returned by <udf>_init into the + udf_handler. + - Don't implement Item_udf_func::fix_fields, implement + Item_udf_func::fix_length_and_dec() instead (similar to non-UDF + functions). + - Override Item_func::update_used_tables to call + Item_func::update_used_tables() and add a RAND_TABLE_BIT to the + result of Item_func::update_used_tables() if the UDF is + non-deterministic. + - (optionally) rename RAND_TABLE_BIT to NONDETERMINISTIC_BIT to + better describe its usage. + + The above would require a change of the UDF API. + Until that change is done here's how the current code works: + We call Item_func::update_used_tables() only when we know that + the function depends on real non-const tables and is deterministic. + This can be done only because we know that the optimizer will + call update_used_tables() only when there's possibly a new const + table. So update_used_tables() can only make a Item_func more + constant than it is currently. + That's why we don't need to do anything if a function is guaranteed + to return non-constant (it's non-deterministic) or is already a + const. + */ + if ((used_tables_cache & ~PSEUDO_TABLE_BITS) && + !(used_tables_cache & RAND_TABLE_BIT)) + { + Item_func::update_used_tables(); + if (!const_item_cache && !used_tables_cache) + used_tables_cache= RAND_TABLE_BIT; + } + } void cleanup(); Item_result result_type () const { return udf.result_type(); } table_map not_null_tables() const { return 0; } diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 966cefea9fe..d088f68fc0c 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -34,7 +34,7 @@ void Item_geometry_func::fix_length_and_dec() { collation.set(&my_charset_bin); decimals=0; - max_length=MAX_BLOB_WIDTH; + max_length= max_field_size; maybe_null= 1; } diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 57c3b391507..b3710841dfb 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -1800,7 +1800,9 @@ int subselect_single_select_engine::exec() DBUG_RETURN(1); } } - if (select_lex->uncacheable && executed) + if (select_lex->uncacheable && + select_lex->uncacheable != UNCACHEABLE_EXPLAIN + && executed) { if (join->reinit()) { diff --git a/sql/key.cc b/sql/key.cc index 2bdde46b6b3..1c044f3dc7d 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -106,19 +106,6 @@ void key_copy(byte *to_key, byte *from_record, KEY *key_info, uint key_length) key_part->null_bit); key_length--; } - if (key_part->type == HA_KEYTYPE_BIT) - { - Field_bit *field= (Field_bit *) (key_part->field); - if (field->bit_len) - { - uchar bits= get_rec_bits((uchar*) from_record + - key_part->null_offset + - (key_part->null_bit == 128), - field->bit_ofs, field->bit_len); - *to_key++= bits; - key_length--; - } - } if (key_part->key_part_flag & HA_BLOB_PART || key_part->key_part_flag & HA_VAR_LENGTH_PART) { diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 47396748da6..4334dedfe4e 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -186,7 +186,7 @@ MY_LOCALE *my_locale_by_number(uint number); #define QUERY_ALLOC_PREALLOC_SIZE 8192 #define TRANS_ALLOC_BLOCK_SIZE 4096 #define TRANS_ALLOC_PREALLOC_SIZE 4096 -#define RANGE_ALLOC_BLOCK_SIZE 2048 +#define RANGE_ALLOC_BLOCK_SIZE 4096 #define ACL_ALLOC_BLOCK_SIZE 1024 #define UDF_ALLOC_BLOCK_SIZE 1024 #define TABLE_ALLOC_BLOCK_SIZE 1024 diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 2e1b56431a4..6a814219662 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -184,7 +184,7 @@ typedef fp_except fp_except_t; this on freebsd */ -inline void reset_floating_point_exceptions() +inline void set_proper_floating_point_mode() { /* Don't fall for overflow, underflow,divide-by-zero or loss of precision */ #if defined(__i386__) @@ -195,8 +195,22 @@ inline void reset_floating_point_exceptions() FP_X_IMP)); #endif } +#elif defined(__sgi) +/* for IRIX to use set_fpc_csr() */ +#include <sys/fpu.h> + +inline void set_proper_floating_point_mode() +{ + /* Enable denormalized DOUBLE values support for IRIX */ + { + union fpc_csr n; + n.fc_word = get_fpc_csr(); + n.fc_struct.flush = 0; + set_fpc_csr(n.fc_word); + } +} #else -#define reset_floating_point_exceptions() +#define set_proper_floating_point_mode() #endif /* __FreeBSD__ && HAVE_IEEEFP_H */ } /* cplusplus */ @@ -3129,7 +3143,7 @@ static int init_server_components() query_cache_init(); query_cache_resize(query_cache_size); randominit(&sql_rand,(ulong) server_start_time,(ulong) server_start_time/2); - reset_floating_point_exceptions(); + set_proper_floating_point_mode(); init_thr_lock(); #ifdef HAVE_REPLICATION init_slave_list(); @@ -4967,7 +4981,7 @@ Disable with --skip-bdb (will save memory).", {"concurrent-insert", OPT_CONCURRENT_INSERT, "Use concurrent insert with MyISAM. Disable with --concurrent-insert=0", (gptr*) &myisam_concurrent_insert, (gptr*) &myisam_concurrent_insert, - 0, GET_LONG, OPT_ARG, 1, 0, 2, 0, 0, 0}, + 0, GET_ULONG, OPT_ARG, 1, 0, 2, 0, 0, 0}, {"console", OPT_CONSOLE, "Write error output on screen; Don't remove the console window on windows.", (gptr*) &opt_console, (gptr*) &opt_console, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -5136,7 +5150,7 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite, {"innodb_max_purge_lag", OPT_INNODB_MAX_PURGE_LAG, "Desired maximum length of the purge queue (0 = no limit)", (gptr*) &srv_max_purge_lag, - (gptr*) &srv_max_purge_lag, 0, GET_LONG, REQUIRED_ARG, 0, 0, ~0L, + (gptr*) &srv_max_purge_lag, 0, GET_ULONG, REQUIRED_ARG, 0, 0, ULONG_MAX, 0, 1L, 0}, {"innodb_rollback_on_timeout", OPT_INNODB_ROLLBACK_ON_TIMEOUT, "Roll back the complete transaction on lock wait timeout, for 4.x compatibility (disabled by default)", @@ -5246,7 +5260,8 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite, #ifdef HAVE_MMAP {"log-tc-size", OPT_LOG_TC_SIZE, "Size of transaction coordinator log.", (gptr*) &opt_tc_log_size, (gptr*) &opt_tc_log_size, 0, GET_ULONG, - REQUIRED_ARG, TC_LOG_MIN_SIZE, TC_LOG_MIN_SIZE, ~0L, 0, TC_LOG_PAGE_SIZE, 0}, + REQUIRED_ARG, TC_LOG_MIN_SIZE, TC_LOG_MIN_SIZE, ULONG_MAX, 0, + TC_LOG_PAGE_SIZE, 0}, #endif {"log-update", OPT_UPDATE_LOG, "The update log is deprecated since version 5.0, is replaced by the binary \ @@ -5669,8 +5684,8 @@ log and this option does nothing anymore.", NO_ARG, 0, 0, 0, 0, 0, 0}, {"warnings", 'W', "Deprecated; use --log-warnings instead.", (gptr*) &global_system_variables.log_warnings, - (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, ~0L, - 0, 0, 0}, + (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, + 1, 0, ULONG_MAX, 0, 0, 0}, { "back_log", OPT_BACK_LOG, "The number of outstanding connection requests MySQL can have. This comes into play when the main MySQL thread gets very many connection requests in a very short time.", (gptr*) &back_log, (gptr*) &back_log, 0, GET_ULONG, @@ -5679,29 +5694,29 @@ log and this option does nothing anymore.", { "bdb_cache_size", OPT_BDB_CACHE_SIZE, "The buffer that is allocated to cache index and rows for BDB tables.", (gptr*) &berkeley_cache_size, (gptr*) &berkeley_cache_size, 0, GET_ULONG, - REQUIRED_ARG, KEY_CACHE_SIZE, 20*1024, (long) ~0, 0, IO_SIZE, 0}, + REQUIRED_ARG, KEY_CACHE_SIZE, 20*1024, ULONG_MAX, 0, IO_SIZE, 0}, /* QQ: The following should be removed soon! (bdb_max_lock preferred) */ {"bdb_lock_max", OPT_BDB_MAX_LOCK, "Synonym for bdb_max_lock.", (gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG, - REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0}, + REQUIRED_ARG, 10000, 0, ULONG_MAX, 0, 1, 0}, {"bdb_log_buffer_size", OPT_BDB_LOG_BUFFER_SIZE, "The buffer that is allocated to cache index and rows for BDB tables.", (gptr*) &berkeley_log_buffer_size, (gptr*) &berkeley_log_buffer_size, 0, - GET_ULONG, REQUIRED_ARG, 0, 256*1024L, ~0L, 0, 1024, 0}, + GET_ULONG, REQUIRED_ARG, 256*1024L, 256*1024L, ULONG_MAX, 0, 1024, 0}, {"bdb_max_lock", OPT_BDB_MAX_LOCK, "The maximum number of locks you can have active on a BDB table.", (gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG, - REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0}, + REQUIRED_ARG, 10000, 0, ULONG_MAX, 0, 1, 0}, #endif /* HAVE_BERKELEY_DB */ {"binlog_cache_size", OPT_BINLOG_CACHE_SIZE, "The size of the cache to hold the SQL statements for the binary log during a transaction. If you often use big, multi-statement transactions you can increase this to get more performance.", (gptr*) &binlog_cache_size, (gptr*) &binlog_cache_size, 0, GET_ULONG, - REQUIRED_ARG, 32*1024L, IO_SIZE, ~0L, 0, IO_SIZE, 0}, + REQUIRED_ARG, 32*1024L, IO_SIZE, ULONG_MAX, 0, IO_SIZE, 0}, {"bulk_insert_buffer_size", OPT_BULK_INSERT_BUFFER_SIZE, "Size of tree cache used in bulk insert optimisation. Note that this is a limit per thread!", (gptr*) &global_system_variables.bulk_insert_buff_size, (gptr*) &max_system_variables.bulk_insert_buff_size, - 0, GET_ULONG, REQUIRED_ARG, 8192*1024, 0, ~0L, 0, 1, 0}, + 0, GET_ULONG, REQUIRED_ARG, 8192*1024, 0, ULONG_MAX, 0, 1, 0}, {"connect_timeout", OPT_CONNECT_TIMEOUT, "The number of seconds the mysqld server is waiting for a connect packet before responding with 'Bad handshake'.", (gptr*) &connect_timeout, (gptr*) &connect_timeout, @@ -5724,7 +5739,7 @@ log and this option does nothing anymore.", {"delayed_insert_limit", OPT_DELAYED_INSERT_LIMIT, "After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.", (gptr*) &delayed_insert_limit, (gptr*) &delayed_insert_limit, 0, GET_ULONG, - REQUIRED_ARG, DELAYED_LIMIT, 1, ~0L, 0, 1, 0}, + REQUIRED_ARG, DELAYED_LIMIT, 1, ULONG_MAX, 0, 1, 0}, {"delayed_insert_timeout", OPT_DELAYED_INSERT_TIMEOUT, "How long a INSERT DELAYED thread should wait for INSERT statements before terminating.", (gptr*) &delayed_insert_timeout, (gptr*) &delayed_insert_timeout, 0, @@ -5732,7 +5747,7 @@ log and this option does nothing anymore.", { "delayed_queue_size", OPT_DELAYED_QUEUE_SIZE, "What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.", (gptr*) &delayed_queue_size, (gptr*) &delayed_queue_size, 0, GET_ULONG, - REQUIRED_ARG, DELAYED_QUEUE_SIZE, 1, ~0L, 0, 1, 0}, + REQUIRED_ARG, DELAYED_QUEUE_SIZE, 1, ULONG_MAX, 0, 1, 0}, {"div_precision_increment", OPT_DIV_PRECINCREMENT, "Precision of the result of '/' operator will be increased on that value.", (gptr*) &global_system_variables.div_precincrement, @@ -5769,16 +5784,16 @@ log and this option does nothing anymore.", (gptr*) &ft_stopword_file, (gptr*) &ft_stopword_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, { "group_concat_max_len", OPT_GROUP_CONCAT_MAX_LEN, - "The maximum length of the result of function group_concat.", + "The maximum length of the result of function group_concat.", (gptr*) &global_system_variables.group_concat_max_len, (gptr*) &max_system_variables.group_concat_max_len, 0, GET_ULONG, - REQUIRED_ARG, 1024, 4, (long) ~0, 0, 1, 0}, + REQUIRED_ARG, 1024, 4, ULONG_MAX, 0, 1, 0}, #ifdef HAVE_INNOBASE_DB {"innodb_additional_mem_pool_size", OPT_INNODB_ADDITIONAL_MEM_POOL_SIZE, "Size of a memory pool InnoDB uses to store data dictionary information and other internal data structures.", (gptr*) &innobase_additional_mem_pool_size, (gptr*) &innobase_additional_mem_pool_size, 0, GET_LONG, REQUIRED_ARG, - 1*1024*1024L, 512*1024L, ~0L, 0, 1024, 0}, + 1*1024*1024L, 512*1024L, LONG_MAX, 0, 1024, 0}, {"innodb_autoextend_increment", OPT_INNODB_AUTOEXTEND_INCREMENT, "Data file autoextend increment in megabytes", (gptr*) &srv_auto_extend_increment, @@ -5802,7 +5817,7 @@ log and this option does nothing anymore.", SQL query after it has once got the ticket", (gptr*) &srv_n_free_tickets_to_enter, (gptr*) &srv_n_free_tickets_to_enter, - 0, GET_LONG, REQUIRED_ARG, 500L, 1L, ~0L, 0, 1L, 0}, + 0, GET_ULONG, REQUIRED_ARG, 500L, 1L, ULONG_MAX, 0, 1L, 0}, {"innodb_file_io_threads", OPT_INNODB_FILE_IO_THREADS, "Number of file I/O threads in InnoDB.", (gptr*) &innobase_file_io_threads, (gptr*) &innobase_file_io_threads, 0, GET_LONG, REQUIRED_ARG, 4, 4, 64, 0, @@ -5818,7 +5833,7 @@ log and this option does nothing anymore.", {"innodb_log_buffer_size", OPT_INNODB_LOG_BUFFER_SIZE, "The size of the buffer which InnoDB uses to write log to the log files on disk.", (gptr*) &innobase_log_buffer_size, (gptr*) &innobase_log_buffer_size, 0, - GET_LONG, REQUIRED_ARG, 1024*1024L, 256*1024L, ~0L, 0, 1024, 0}, + GET_LONG, REQUIRED_ARG, 1024*1024L, 256*1024L, LONG_MAX, 0, 1024, 0}, {"innodb_log_file_size", OPT_INNODB_LOG_FILE_SIZE, "Size of each log file in a log group.", (gptr*) &innobase_log_file_size, (gptr*) &innobase_log_file_size, 0, @@ -5836,24 +5851,24 @@ log and this option does nothing anymore.", {"innodb_open_files", OPT_INNODB_OPEN_FILES, "How many files at the maximum InnoDB keeps open at the same time.", (gptr*) &innobase_open_files, (gptr*) &innobase_open_files, 0, - GET_LONG, REQUIRED_ARG, 300L, 10L, ~0L, 0, 1L, 0}, + GET_LONG, REQUIRED_ARG, 300L, 10L, LONG_MAX, 0, 1L, 0}, {"innodb_sync_spin_loops", OPT_INNODB_SYNC_SPIN_LOOPS, "Count of spin-loop rounds in InnoDB mutexes", (gptr*) &srv_n_spin_wait_rounds, (gptr*) &srv_n_spin_wait_rounds, - 0, GET_LONG, REQUIRED_ARG, 20L, 0L, ~0L, 0, 1L, 0}, + 0, GET_ULONG, REQUIRED_ARG, 20L, 0L, ULONG_MAX, 0, 1L, 0}, {"innodb_thread_concurrency", OPT_INNODB_THREAD_CONCURRENCY, "Helps in performance tuning in heavily concurrent environments. " "Sets the maximum number of threads allowed inside InnoDB. Value 0" " will disable the thread throttling.", (gptr*) &srv_thread_concurrency, (gptr*) &srv_thread_concurrency, - 0, GET_LONG, REQUIRED_ARG, 8, 0, 1000, 0, 1, 0}, + 0, GET_ULONG, REQUIRED_ARG, 8, 0, 1000, 0, 1, 0}, {"innodb_thread_sleep_delay", OPT_INNODB_THREAD_SLEEP_DELAY, "Time of innodb thread sleeping before joining InnoDB queue (usec). Value 0" " disable a sleep", (gptr*) &srv_thread_sleep_delay, (gptr*) &srv_thread_sleep_delay, - 0, GET_LONG, REQUIRED_ARG, 10000L, 0L, ~0L, 0, 1L, 0}, + 0, GET_ULONG, REQUIRED_ARG, 10000L, 0L, ULONG_MAX, 0, 1L, 0}, #endif /* HAVE_INNOBASE_DB */ {"interactive_timeout", OPT_INTERACTIVE_TIMEOUT, "The number of seconds the server waits for activity on an interactive connection before closing it.", @@ -5883,7 +5898,7 @@ log and this option does nothing anymore.", (gptr*) &dflt_key_cache_var.param_age_threshold, (gptr*) 0, 0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG, - 300, 100, ~0L, 0, 100, 0}, + 300, 100, ULONG_MAX, 0, 100, 0}, {"key_cache_block_size", OPT_KEY_CACHE_BLOCK_SIZE, "The default size of key cache blocks", (gptr*) &dflt_key_cache_var.param_block_size, @@ -5919,7 +5934,7 @@ log and this option does nothing anymore.", {"max_binlog_cache_size", OPT_MAX_BINLOG_CACHE_SIZE, "Can be used to restrict the total size used to cache a multi-transaction query.", (gptr*) &max_binlog_cache_size, (gptr*) &max_binlog_cache_size, 0, - GET_ULONG, REQUIRED_ARG, ~0L, IO_SIZE, ~0L, 0, IO_SIZE, 0}, + GET_ULONG, REQUIRED_ARG, ULONG_MAX, IO_SIZE, ULONG_MAX, 0, IO_SIZE, 0}, {"max_binlog_size", OPT_MAX_BINLOG_SIZE, "Binary log will be rotated automatically when the size exceeds this \ value. Will also apply to relay logs if max_relay_log_size is 0. \ @@ -5929,7 +5944,7 @@ The minimum value for this variable is 4096.", {"max_connect_errors", OPT_MAX_CONNECT_ERRORS, "If there is more than this number of interrupted connections from a host this host will be blocked from further connections.", (gptr*) &max_connect_errors, (gptr*) &max_connect_errors, 0, GET_ULONG, - REQUIRED_ARG, MAX_CONNECT_ERRORS, 1, ~0L, 0, 1, 0}, + REQUIRED_ARG, MAX_CONNECT_ERRORS, 1, ULONG_MAX, 0, 1, 0}, {"max_connections", OPT_MAX_CONNECTIONS, "The number of simultaneous clients allowed.", (gptr*) &max_connections, (gptr*) &max_connections, 0, GET_ULONG, REQUIRED_ARG, 100, 1, 16384, 0, 1, @@ -5972,7 +5987,7 @@ The minimum value for this variable is 4096.", "Limit assumed max number of seeks when looking up rows based on a key", (gptr*) &global_system_variables.max_seeks_for_key, (gptr*) &max_system_variables.max_seeks_for_key, 0, GET_ULONG, - REQUIRED_ARG, ~0L, 1, ~0L, 0, 1, 0 }, + REQUIRED_ARG, ULONG_MAX, 1, ULONG_MAX, 0, 1, 0 }, {"max_sort_length", OPT_MAX_SORT_LENGTH, "The number of bytes to use when sorting BLOB or TEXT values (only the first max_sort_length bytes of each value are used; the rest are ignored).", (gptr*) &global_system_variables.max_sort_length, @@ -5987,20 +6002,20 @@ The minimum value for this variable is 4096.", "Maximum number of temporary tables a client can keep open at a time.", (gptr*) &global_system_variables.max_tmp_tables, (gptr*) &max_system_variables.max_tmp_tables, 0, GET_ULONG, - REQUIRED_ARG, 32, 1, ~0L, 0, 1, 0}, + REQUIRED_ARG, 32, 1, ULONG_MAX, 0, 1, 0}, {"max_user_connections", OPT_MAX_USER_CONNECTIONS, "The maximum number of active connections for a single user (0 = no limit).", (gptr*) &max_user_connections, (gptr*) &max_user_connections, 0, GET_UINT, - REQUIRED_ARG, 0, 1, ~0, 0, 1, 0}, + REQUIRED_ARG, 0, 0, (uint) ~0, 0, 1, 0}, {"max_write_lock_count", OPT_MAX_WRITE_LOCK_COUNT, "After this many write locks, allow some read locks to run in between.", (gptr*) &max_write_lock_count, (gptr*) &max_write_lock_count, 0, GET_ULONG, - REQUIRED_ARG, ~0L, 1, ~0L, 0, 1, 0}, + REQUIRED_ARG, ULONG_MAX, 1, ULONG_MAX, 0, 1, 0}, {"multi_range_count", OPT_MULTI_RANGE_COUNT, "Number of key ranges to request at once.", (gptr*) &global_system_variables.multi_range_count, (gptr*) &max_system_variables.multi_range_count, 0, - GET_ULONG, REQUIRED_ARG, 256, 1, ~0L, 0, 1, 0}, + GET_ULONG, REQUIRED_ARG, 256, 1, ULONG_MAX, 0, 1, 0}, {"myisam_block_size", OPT_MYISAM_BLOCK_SIZE, "Block size to be used for MyISAM index pages.", (gptr*) &opt_myisam_block_size, @@ -6028,7 +6043,7 @@ The minimum value for this variable is 4096.", "Number of threads to use when repairing MyISAM tables. The value of 1 disables parallel repair.", (gptr*) &global_system_variables.myisam_repair_threads, (gptr*) &max_system_variables.myisam_repair_threads, 0, - GET_ULONG, REQUIRED_ARG, 1, 1, ~0L, 0, 1, 0}, + GET_ULONG, REQUIRED_ARG, 1, 1, ULONG_MAX, 0, 1, 0}, {"myisam_sort_buffer_size", OPT_MYISAM_SORT_BUFFER_SIZE, "The buffer that is allocated when sorting the index when doing a REPAIR or when creating indexes with CREATE INDEX or ALTER TABLE.", (gptr*) &global_system_variables.myisam_sort_buff_size, @@ -6054,7 +6069,7 @@ The minimum value for this variable is 4096.", "If a read on a communication port is interrupted, retry this many times before giving up.", (gptr*) &global_system_variables.net_retry_count, (gptr*) &max_system_variables.net_retry_count,0, - GET_ULONG, REQUIRED_ARG, MYSQLD_NET_RETRY_COUNT, 1, ~0L, 0, 1, 0}, + GET_ULONG, REQUIRED_ARG, MYSQLD_NET_RETRY_COUNT, 1, ULONG_MAX, 0, 1, 0}, {"net_write_timeout", OPT_NET_WRITE_TIMEOUT, "Number of seconds to wait for a block to be written to a connection before aborting the write.", (gptr*) &global_system_variables.net_write_timeout, @@ -6083,7 +6098,7 @@ The minimum value for this variable is 4096.", "Allocation block size for query parsing and execution", (gptr*) &global_system_variables.query_alloc_block_size, (gptr*) &max_system_variables.query_alloc_block_size, 0, GET_ULONG, - REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, ~0L, 0, 1024, 0}, + REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, ULONG_MAX, 0, 1024, 0}, #ifdef HAVE_QUERY_CACHE {"query_cache_limit", OPT_QUERY_CACHE_LIMIT, "Don't cache results that are bigger than this.", @@ -6116,12 +6131,13 @@ The minimum value for this variable is 4096.", (gptr*) &global_system_variables.query_prealloc_size, (gptr*) &max_system_variables.query_prealloc_size, 0, GET_ULONG, REQUIRED_ARG, QUERY_ALLOC_PREALLOC_SIZE, QUERY_ALLOC_PREALLOC_SIZE, - ~0L, 0, 1024, 0}, + ULONG_MAX, 0, 1024, 0}, {"range_alloc_block_size", OPT_RANGE_ALLOC_BLOCK_SIZE, "Allocation block size for storing ranges during optimization", (gptr*) &global_system_variables.range_alloc_block_size, (gptr*) &max_system_variables.range_alloc_block_size, 0, GET_ULONG, - REQUIRED_ARG, RANGE_ALLOC_BLOCK_SIZE, 4096, ~0L, 0, 1024, 0}, + REQUIRED_ARG, RANGE_ALLOC_BLOCK_SIZE, RANGE_ALLOC_BLOCK_SIZE, ULONG_MAX, + 0, 1024, 0}, {"read_buffer_size", OPT_RECORD_BUFFER, "Each thread that does a sequential scan allocates a buffer of this size for each table it scans. If you do many sequential scans, you may want to increase this value.", (gptr*) &global_system_variables.read_buff_size, @@ -6191,7 +6207,7 @@ The minimum value for this variable is 4096.", "Synchronously flush binary log to disk after every #th event. " "Use 0 (default) to disable synchronous flushing.", (gptr*) &sync_binlog_period, (gptr*) &sync_binlog_period, 0, GET_ULONG, - REQUIRED_ARG, 0, 0, ~0L, 0, 1, 0}, + REQUIRED_ARG, 0, 0, ULONG_MAX, 0, 1, 0}, {"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default.", (gptr*) &opt_sync_frm, (gptr*) &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, @@ -6215,7 +6231,7 @@ The minimum value for this variable is 4096.", {"thread_stack", OPT_THREAD_STACK, "The stack size for each thread.", (gptr*) &thread_stack, (gptr*) &thread_stack, 0, GET_ULONG, REQUIRED_ARG,DEFAULT_THREAD_STACK, - 1024L*128L, ~0L, 0, 1024, 0}, + 1024L*128L, ULONG_MAX, 0, 1024, 0}, { "time_format", OPT_TIME_FORMAT, "The TIME format (for future).", (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_TIME], @@ -6231,12 +6247,12 @@ The minimum value for this variable is 4096.", "Allocation block size for various transaction-related structures", (gptr*) &global_system_variables.trans_alloc_block_size, (gptr*) &max_system_variables.trans_alloc_block_size, 0, GET_ULONG, - REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, ~0L, 0, 1024, 0}, + REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, ULONG_MAX, 0, 1024, 0}, {"transaction_prealloc_size", OPT_TRANS_PREALLOC_SIZE, "Persistent buffer for various transaction-related structures", (gptr*) &global_system_variables.trans_prealloc_size, (gptr*) &max_system_variables.trans_prealloc_size, 0, GET_ULONG, - REQUIRED_ARG, TRANS_ALLOC_PREALLOC_SIZE, 1024, ~0L, 0, 1024, 0}, + REQUIRED_ARG, TRANS_ALLOC_PREALLOC_SIZE, 1024, ULONG_MAX, 0, 1024, 0}, {"updatable_views_with_limit", OPT_UPDATABLE_VIEWS_WITH_LIMIT, "1 = YES = Don't issue an error message (warning only) if a VIEW without presence of a key of the underlying table is used in queries with a LIMIT clause for updating. 0 = NO = Prohibit update of a VIEW, which does not contain a key of the underlying table and the query uses a LIMIT clause (usually get from GUI tools).", (gptr*) &global_system_variables.updatable_views_with_limit, diff --git a/sql/opt_range.cc b/sql/opt_range.cc index facc8e09c27..a8cf0f67635 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -250,6 +250,9 @@ public: Field *field; char *min_value,*max_value; // Pointer to range + /* + eq_tree() requires that left == right == 0 if the type is MAYBE_KEY. + */ SEL_ARG *left,*right; /* R-B tree children */ SEL_ARG *next,*prev; /* Links for bi-directional interval list */ SEL_ARG *parent; /* R-B tree parent */ @@ -265,7 +268,7 @@ public: SEL_ARG(Field *field, uint8 part, char *min_value, char *max_value, uint8 min_flag, uint8 max_flag, uint8 maybe_flag); SEL_ARG(enum Type type_arg) - :min_flag(0),elements(1),use_count(1),left(0),next_key_part(0), + :min_flag(0),elements(1),use_count(1),left(0),right(0),next_key_part(0), color(BLACK), type(type_arg) {} inline bool is_same(SEL_ARG *arg) @@ -1978,12 +1981,18 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, keys_to_use.intersect(head->keys_in_use_for_query); if (!keys_to_use.is_clear_all()) { +#ifndef EMBEDDED_LIBRARY // Avoid compiler warning + char buff[STACK_BUFF_ALLOC]; +#endif MEM_ROOT alloc; SEL_TREE *tree= NULL; KEY_PART *key_parts; KEY *key_info; PARAM param; + if (check_stack_overrun(thd, 2*STACK_MIN_SIZE, buff)) + DBUG_RETURN(0); // Fatal error flag is set + /* set up parameter that is passed to all functions */ param.thd= thd; param.baseflag=head->file->table_flags(); @@ -4405,6 +4414,7 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, { tree= new (alloc) SEL_ARG(field, 0, 0); tree->type= SEL_ARG::IMPOSSIBLE; + goto end; } else { @@ -4413,8 +4423,32 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, for the cases like int_field > 999999999999999999999999 as well. */ tree= 0; + if (err == 3 && field->type() == FIELD_TYPE_DATE && + (type == Item_func::GT_FUNC || type == Item_func::GE_FUNC || + type == Item_func::LT_FUNC || type == Item_func::LE_FUNC) ) + { + /* + We were saving DATETIME into a DATE column, the conversion went ok + but a non-zero time part was cut off. + + In MySQL's SQL dialect, DATE and DATETIME are compared as datetime + values. Index over a DATE column uses DATE comparison. Changing + from one comparison to the other is possible: + + datetime(date_col)< '2007-12-10 12:34:55' -> date_col<='2007-12-10' + datetime(date_col)<='2007-12-10 12:34:55' -> date_col<='2007-12-10' + + datetime(date_col)> '2007-12-10 12:34:55' -> date_col>='2007-12-10' + datetime(date_col)>='2007-12-10 12:34:55' -> date_col>='2007-12-10' + + but we'll need to convert '>' to '>=' and '<' to '<='. This will + be done together with other types at the end of this function + (grep for field_is_equal_to_item) + */ + } + else + goto end; } - goto end; } if (err < 0) { diff --git a/sql/protocol.cc b/sql/protocol.cc index 2bdbe83eea1..ff58d96f59b 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -110,13 +110,14 @@ void net_send_error(THD *thd, uint sql_errno, const char *err) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, sql_errno, err); } + /* Abort multi-result sets */ + thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS; + net_send_error_packet(thd, sql_errno, err); thd->is_fatal_error=0; // Error message is given thd->net.report_error= 0; - /* Abort multi-result sets */ - thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS; DBUG_VOID_RETURN; } @@ -409,7 +410,10 @@ void net_send_error_packet(THD *thd, uint sql_errno, const char *err) { NET *net= &thd->net; uint length; - char buff[MYSQL_ERRMSG_SIZE+2], *pos; + /* + buff[]: sql_errno:2 + ('#':1 + SQLSTATE_LENGTH:5) + MYSQL_ERRMSG_SIZE:512 + */ + char buff[2+1+SQLSTATE_LENGTH+MYSQL_ERRMSG_SIZE], *pos; DBUG_ENTER("send_error_packet"); diff --git a/sql/set_var.cc b/sql/set_var.cc index 1f88509d1b6..d0362edaa29 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -113,6 +113,10 @@ static int check_max_delayed_threads(THD *thd, set_var *var); static void fix_thd_mem_root(THD *thd, enum_var_type type); static void fix_trans_mem_root(THD *thd, enum_var_type type); static void fix_server_id(THD *thd, enum_var_type type); +static ulonglong fix_unsigned(THD *thd, ulonglong num, + const struct my_option *option_limits); +static bool get_unsigned(THD *thd, set_var *var); +static void throw_bounds_warning(THD *thd, const char *name, ulonglong num); static KEY_CACHE *create_key_cache(const char *name, uint length); void fix_sql_mode_var(THD *thd, enum_var_type type); static byte *get_error_count(THD *thd); @@ -1448,6 +1452,39 @@ static void fix_server_id(THD *thd, enum_var_type type) } +static void throw_bounds_warning(THD *thd, const char *name, ulonglong num) +{ + char buf[22]; + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE, + ER(ER_TRUNCATED_WRONG_VALUE), name, + ullstr(num, buf)); +} + +static ulonglong fix_unsigned(THD *thd, ulonglong num, + const struct my_option *option_limits) +{ + bool fixed= FALSE; + ulonglong out= getopt_ull_limit_value(num, option_limits, &fixed); + + if (fixed) + throw_bounds_warning(thd, option_limits->name, num); + return out; +} + +static bool get_unsigned(THD *thd, set_var *var) +{ + if (var->value->unsigned_flag) + var->save_result.ulonglong_value= (ulonglong) var->value->val_int(); + else + { + longlong v= var->value->val_int(); + var->save_result.ulonglong_value= (ulonglong) ((v < 0) ? 0 : v); + } + return 0; +} + + sys_var_long_ptr:: sys_var_long_ptr(const char *name_arg, ulong *value_ptr_arg, sys_after_update_func after_update_arg) @@ -1458,9 +1495,7 @@ sys_var_long_ptr(const char *name_arg, ulong *value_ptr_arg, bool sys_var_long_ptr_global::check(THD *thd, set_var *var) { - longlong v= var->value->val_int(); - var->save_result.ulonglong_value= v < 0 ? 0 : v; - return 0; + return get_unsigned(thd, var); } bool sys_var_long_ptr_global::update(THD *thd, set_var *var) @@ -1468,9 +1503,20 @@ bool sys_var_long_ptr_global::update(THD *thd, set_var *var) ulonglong tmp= var->save_result.ulonglong_value; pthread_mutex_lock(guard); if (option_limits) - *value= (ulong) getopt_ull_limit_value(tmp, option_limits); + *value= (ulong) fix_unsigned(thd, tmp, option_limits); else + { +#if SIZEOF_LONG < SIZEOF_LONG_LONG + /* Avoid overflows on 32 bit systems */ + if (tmp > ULONG_MAX) + { + tmp= ULONG_MAX; + throw_bounds_warning(thd, name, var->save_result.ulonglong_value); + } +#endif *value= (ulong) tmp; + } + pthread_mutex_unlock(guard); return 0; } @@ -1489,7 +1535,7 @@ bool sys_var_ulonglong_ptr::update(THD *thd, set_var *var) ulonglong tmp= var->save_result.ulonglong_value; pthread_mutex_lock(&LOCK_global_system_variables); if (option_limits) - *value= (ulonglong) getopt_ull_limit_value(tmp, option_limits); + *value= (ulonglong) fix_unsigned(thd, tmp, option_limits); else *value= (ulonglong) tmp; pthread_mutex_unlock(&LOCK_global_system_variables); @@ -1532,45 +1578,36 @@ byte *sys_var_enum::value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) bool sys_var_thd_ulong::check(THD *thd, set_var *var) { - return (sys_var_thd::check(thd, var) || + return (get_unsigned(thd, var) || (check_func && (*check_func)(thd, var))); } bool sys_var_thd_ulong::update(THD *thd, set_var *var) { ulonglong tmp= var->save_result.ulonglong_value; - char buf[22]; - bool truncated= false; /* Don't use bigger value than given with --maximum-variable-name=.. */ if ((ulong) tmp > max_system_variables.*offset) { - truncated= true; - llstr(tmp, buf); + throw_bounds_warning(thd, name, tmp); tmp= max_system_variables.*offset; } -#if SIZEOF_LONG == 4 - /* Avoid overflows on 32 bit systems */ - if (tmp > (ulonglong) ~(ulong) 0) + if (option_limits) + tmp= (ulong) fix_unsigned(thd, tmp, option_limits); +#if SIZEOF_LONG < SIZEOF_LONG_LONG + else if (tmp > ULONG_MAX) { - truncated= true; - llstr(tmp, buf); - tmp= ((ulonglong) ~(ulong) 0); + tmp= ULONG_MAX; + throw_bounds_warning(thd, name, var->save_result.ulonglong_value); } #endif - if (truncated) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - ER_TRUNCATED_WRONG_VALUE, - ER(ER_TRUNCATED_WRONG_VALUE), name, - buf); - if (option_limits) - tmp= (ulong) getopt_ull_limit_value(tmp, option_limits); if (var->type == OPT_GLOBAL) global_system_variables.*offset= (ulong) tmp; else thd->variables.*offset= (ulong) tmp; + return 0; } @@ -1605,7 +1642,7 @@ bool sys_var_thd_ha_rows::update(THD *thd, set_var *var) tmp= max_system_variables.*offset; if (option_limits) - tmp= (ha_rows) getopt_ull_limit_value(tmp, option_limits); + tmp= (ha_rows) fix_unsigned(thd, tmp, option_limits); if (var->type == OPT_GLOBAL) { /* Lock is needed to make things safe on 32 bit systems */ @@ -1641,6 +1678,11 @@ byte *sys_var_thd_ha_rows::value_ptr(THD *thd, enum_var_type type, return (byte*) &(thd->variables.*offset); } +bool sys_var_thd_ulonglong::check(THD *thd, set_var *var) +{ + return get_unsigned(thd, var); +} + bool sys_var_thd_ulonglong::update(THD *thd, set_var *var) { ulonglong tmp= var->save_result.ulonglong_value; @@ -1649,7 +1691,7 @@ bool sys_var_thd_ulonglong::update(THD *thd, set_var *var) tmp= max_system_variables.*offset; if (option_limits) - tmp= getopt_ull_limit_value(tmp, option_limits); + tmp= fix_unsigned(thd, tmp, option_limits); if (var->type == OPT_GLOBAL) { /* Lock is needed to make things safe on 32 bit systems */ @@ -2508,7 +2550,7 @@ bool sys_var_key_buffer_size::update(THD *thd, set_var *var) } key_cache->param_buff_size= - (ulonglong) getopt_ull_limit_value(tmp, option_limits); + (ulonglong) fix_unsigned(thd, tmp, option_limits); /* If key cache didn't existed initialize it, else resize it */ key_cache->in_init= 1; @@ -2556,7 +2598,7 @@ bool sys_var_key_cache_long::update(THD *thd, set_var *var) goto end; *((ulong*) (((char*) key_cache) + offset))= - (ulong) getopt_ull_limit_value(tmp, option_limits); + (ulong) fix_unsigned(thd, tmp, option_limits); /* Don't create a new key cache if it didn't exist diff --git a/sql/set_var.h b/sql/set_var.h index 99db38933c2..c37cc400e43 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -365,6 +365,7 @@ public: void set_default(THD *thd, enum_var_type type); SHOW_TYPE show_type() { return SHOW_LONGLONG; } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); + bool check(THD *thd, set_var *var); bool check_default(enum_var_type type) { return type == OPT_GLOBAL && !option_limits; diff --git a/sql/sql_class.cc b/sql/sql_class.cc index ef199b6f883..93f5a34d5c6 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1219,16 +1219,18 @@ select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u) } } field_term_length=exchange->field_term->length(); - field_term_char= field_term_length ? (*exchange->field_term)[0] : INT_MAX; + field_term_char= field_term_length ? + (int) (uchar) (*exchange->field_term)[0] : INT_MAX; if (!exchange->line_term->length()) exchange->line_term=exchange->field_term; // Use this if it exists - field_sep_char= (exchange->enclosed->length() ? (*exchange->enclosed)[0] : - field_term_char); - escape_char= (exchange->escaped->length() ? (*exchange->escaped)[0] : -1); + field_sep_char= (exchange->enclosed->length() ? + (int) (uchar) (*exchange->enclosed)[0] : field_term_char); + escape_char= (exchange->escaped->length() ? + (int) (uchar) (*exchange->escaped)[0] : -1); is_ambiguous_field_sep= test(strchr(ESCAPE_CHARS, field_sep_char)); is_unsafe_field_sep= test(strchr(NUMERIC_CHARS, field_sep_char)); line_sep_char= (exchange->line_term->length() ? - (*exchange->line_term)[0] : INT_MAX); + (int) (uchar) (*exchange->line_term)[0] : INT_MAX); if (!field_term_length) exchange->opt_enclosed=0; if (!exchange->enclosed->length()) @@ -1385,10 +1387,11 @@ bool select_export::send_data(List<Item> &items) Don't escape field_term_char by doubling - doubling is only valid for ENCLOSED BY characters: */ - (enclosed || !is_ambiguous_field_term || *pos != field_term_char)) + (enclosed || !is_ambiguous_field_term || + (int) (uchar) *pos != field_term_char)) { char tmp_buff[2]; - tmp_buff[0]= ((int) *pos == field_sep_char && + tmp_buff[0]= ((int) (uchar) *pos == field_sep_char && is_ambiguous_field_sep) ? field_sep_char : escape_char; tmp_buff[1]= *pos ? *pos : '0'; diff --git a/sql/sql_class.h b/sql/sql_class.h index 9f294d09d5a..daeb16de7e0 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -2054,17 +2054,17 @@ class select_insert :public select_result_interceptor { TABLE_LIST *table_list; TABLE *table; List<Item> *fields; - ulonglong last_insert_id; + ulonglong autoinc_value_of_last_inserted_row; // not autogenerated + ulonglong autoinc_value_of_first_inserted_row; // autogenerated COPY_INFO info; bool insert_into_view; - bool is_bulk_insert_mode; select_insert(TABLE_LIST *table_list_par, TABLE *table_par, List<Item> *fields_par, List<Item> *update_fields, List<Item> *update_values, enum_duplicates duplic, bool ignore); ~select_insert(); int prepare(List<Item> &list, SELECT_LEX_UNIT *u); - int prepare2(void); + virtual int prepare2(void); bool send_data(List<Item> &items); virtual void store_values(List<Item> &values); void send_error(uint errcode,const char *err); @@ -2099,6 +2099,7 @@ public: void send_error(uint errcode,const char *err); bool send_eof(); void abort(); + int prepare2(void) { return 0; } }; #include <myisam.h> diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 7273fbf74e3..cf9e93b8518 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -2644,9 +2644,9 @@ select_insert::select_insert(TABLE_LIST *table_list_par, TABLE *table_par, enum_duplicates duplic, bool ignore_check_option_errors) :table_list(table_list_par), table(table_par), fields(fields_par), - last_insert_id(0), - insert_into_view(table_list_par && table_list_par->view != 0), - is_bulk_insert_mode(FALSE) + autoinc_value_of_last_inserted_row(0), + autoinc_value_of_first_inserted_row(0), + insert_into_view(table_list_par && table_list_par->view != 0) { bzero((char*) &info,sizeof(info)); info.handle_duplicates= duplic; @@ -2755,14 +2755,14 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) Is table which we are changing used somewhere in other parts of query */ - if (!(lex->current_select->options & OPTION_BUFFER_RESULT) && - unique_table(thd, table_list, table_list->next_global, 0)) + if (unique_table(thd, table_list, table_list->next_global, 0)) { /* Using same table for INSERT and SELECT */ lex->current_select->options|= OPTION_BUFFER_RESULT; lex->current_select->join->select_options|= OPTION_BUFFER_RESULT; } - else if (!thd->prelocked_mode) + else if (!(lex->current_select->options & OPTION_BUFFER_RESULT) && + !thd->prelocked_mode) { /* We must not yet prepare the result table if it is the same as one of the @@ -2831,11 +2831,8 @@ int select_insert::prepare2(void) { DBUG_ENTER("select_insert::prepare2"); if (thd->lex->current_select->options & OPTION_BUFFER_RESULT && - !thd->prelocked_mode && !is_bulk_insert_mode) - { + !thd->prelocked_mode) table->file->start_bulk_insert((ha_rows) 0); - is_bulk_insert_mode= TRUE; - } DBUG_RETURN(0); } @@ -2902,14 +2899,24 @@ bool select_insert::send_data(List<Item> &values) if (table->next_number_field) { /* + If no value has been autogenerated so far, we need to remember the + value we just saw, we may need to send it to client in the end. + */ + if (!thd->insert_id_used) + autoinc_value_of_last_inserted_row= table->next_number_field->val_int(); + /* Clear auto-increment field for the next record, if triggers are used we will clear it twice, but this should be cheap. */ table->next_number_field->reset(); - if (!last_insert_id && thd->insert_id_used) - last_insert_id= thd->last_insert_id; + if (!autoinc_value_of_last_inserted_row && thd->insert_id_used) + autoinc_value_of_last_inserted_row= thd->last_insert_id; } } + + if (thd->insert_id_used && !autoinc_value_of_first_inserted_row) + autoinc_value_of_first_inserted_row= thd->last_insert_id; + DBUG_RETURN(error); } @@ -2938,11 +2945,11 @@ bool select_insert::send_eof() { int error, error2; bool changed, transactional_table= table->file->has_transactions(); + ulonglong id; THD::killed_state killed_status= thd->killed; DBUG_ENTER("select_insert::send_eof"); error= (!thd->prelocked_mode) ? table->file->end_bulk_insert():0; - is_bulk_insert_mode= FALSE; table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); @@ -2960,8 +2967,17 @@ bool select_insert::send_eof() DBUG_ASSERT(transactional_table || !changed || thd->transaction.stmt.modified_non_trans_table); - if (last_insert_id) - thd->insert_id(info.copied ? last_insert_id : 0); // For binary log + // For binary log + if (autoinc_value_of_last_inserted_row) + { + if (info.copied) + thd->insert_id(autoinc_value_of_last_inserted_row); + else + { + autoinc_value_of_first_inserted_row= 0; + thd->insert_id(0); + } + } /* Write to binlog before commiting transaction */ if (mysql_bin_log.is_open()) { @@ -2988,7 +3004,9 @@ bool select_insert::send_eof() thd->row_count_func= info.copied + info.deleted + ((thd->client_capabilities & CLIENT_FOUND_ROWS) ? info.touched : info.updated); - ::send_ok(thd, (ulong) thd->row_count_func, last_insert_id, buff); + id= autoinc_value_of_first_inserted_row > 0 ? + autoinc_value_of_first_inserted_row : thd->last_insert_id; + ::send_ok(thd, (ulong) thd->row_count_func, id, buff); DBUG_RETURN(0); } @@ -3017,8 +3035,17 @@ void select_insert::abort() */ if (thd->transaction.stmt.modified_non_trans_table) { - if (last_insert_id) - thd->insert_id(last_insert_id); // For binary log + // For binary log + if (autoinc_value_of_last_inserted_row) + { + if (info.copied) + thd->insert_id(autoinc_value_of_last_inserted_row); + else + { + autoinc_value_of_first_inserted_row= 0; + thd->insert_id(0); + } + } if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, thd->query, thd->query_length, @@ -3278,10 +3305,7 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u) if (info.handle_duplicates == DUP_UPDATE) table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE); if (!thd->prelocked_mode) - { table->file->start_bulk_insert((ha_rows) 0); - is_bulk_insert_mode= TRUE; - } thd->abort_on_warning= (!info.ignore && (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 5699f1d1868..d6cb072ce9b 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -217,6 +217,7 @@ static void select_describe(JOIN *join, bool need_tmp_table,bool need_order, bool distinct, const char *message=NullS); static Item *remove_additional_cond(Item* conds); static void add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab); +static bool test_if_ref(Item_field *left_item,Item *right_item); /* @@ -681,9 +682,6 @@ err: without "checking NULL", remove the predicates that were pushed down into the subquery. - We can remove the equalities that will be guaranteed to be true by the - fact that subquery engine will be using index lookup. - If the subquery compares scalar values, we can remove the condition that was wrapped into trig_cond (it will be checked when needed by the subquery engine) @@ -693,6 +691,12 @@ err: and non-NULL values, we'll do a full table scan and will rely on the equalities corresponding to non-NULL parts of left tuple to filter out non-matching records. + + TODO: We can remove the equalities that will be guaranteed to be true by the + fact that subquery engine will be using index lookup. This must be done only + for cases where there are no conversion errors of significance, e.g. 257 + that is searched in a byte. But this requires homogenization of the return + codes of all Field*::store() methods. */ void JOIN::remove_subq_pushed_predicates(Item **where) @@ -700,17 +704,13 @@ void JOIN::remove_subq_pushed_predicates(Item **where) if (conds->type() == Item::FUNC_ITEM && ((Item_func *)this->conds)->functype() == Item_func::EQ_FUNC && ((Item_func *)conds)->arguments()[0]->type() == Item::REF_ITEM && - ((Item_func *)conds)->arguments()[1]->type() == Item::FIELD_ITEM) + ((Item_func *)conds)->arguments()[1]->type() == Item::FIELD_ITEM && + test_if_ref ((Item_field *)((Item_func *)conds)->arguments()[1], + ((Item_func *)conds)->arguments()[0])) { *where= 0; return; } - if (conds->type() == Item::COND_ITEM && - ((class Item_func *)this->conds)->functype() == - Item_func::COND_AND_FUNC) - { - *where= remove_additional_cond(conds); - } } @@ -1236,7 +1236,7 @@ JOIN::optimize() { if (!having) { - Item *where= 0; + Item *where= conds; if (join_tab[0].type == JT_EQ_REF && join_tab[0].ref.items[0]->name == in_left_expr_name) { @@ -2332,6 +2332,11 @@ static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select, { int error; DBUG_ENTER("get_quick_record_count"); +#ifndef EMBEDDED_LIBRARY // Avoid compiler warning + char buff[STACK_BUFF_ALLOC]; +#endif + if (check_stack_overrun(thd, STACK_MIN_SIZE, buff)) + DBUG_RETURN(0); // Fatal error flag is set if (select) { select->head=table; @@ -3686,7 +3691,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, found_eq_constant=0; for (i=0 ; i < keyuse->elements-1 ; i++,use++) { - if (!use->used_tables) + if (!use->used_tables && use->optimize != KEY_OPTIMIZE_REF_OR_NULL) use->table->const_key_parts[use->key]|= use->keypart_map; if (use->keypart != FT_KEYPART) { @@ -10302,7 +10307,8 @@ Next_select_func setup_end_select_func(JOIN *join) /* Set up select_end */ if (table) { - if (table->group && tmp_tbl->sum_func_count) + if (table->group && tmp_tbl->sum_func_count && + !tmp_tbl->precomputed_group_by) { if (table->s->keys) { @@ -11933,8 +11939,12 @@ static bool test_if_ref(Item_field *left_item,Item *right_item) Item *ref_item=part_of_refkey(field->table,field); if (ref_item && ref_item->eq(right_item,1)) { + right_item= right_item->real_item(); if (right_item->type() == Item::FIELD_ITEM) return (field->eq_def(((Item_field *) right_item)->field)); + /* remove equalities injected by IN->EXISTS transformation */ + else if (right_item->type() == Item::CACHE_ITEM) + return ((Item_cache *)right_item)->eq_def (field); if (right_item->const_item() && !(right_item->is_null())) { /* @@ -15373,7 +15383,8 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, { if (tab->use_quick == 2) { - char buf[MAX_KEY/8+1]; + /* 4 bits per 1 hex digit + terminating '\0' */ + char buf[MAX_KEY / 4 + 1]; extra.append(STRING_WITH_LEN("; Range checked for each " "record (index map: 0x")); extra.append(tab->keys.print(buf)); diff --git a/sql/sql_select.h b/sql/sql_select.h index 4fc32e7fdb3..42be8d3ec68 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -548,14 +548,17 @@ public: enum store_key_result copy() { enum store_key_result result; - enum_check_fields saved_count_cuted_fields= - to_field->table->in_use->count_cuted_fields; + THD *thd= to_field->table->in_use; + enum_check_fields saved_count_cuted_fields= thd->count_cuted_fields; + ulong sql_mode= thd->variables.sql_mode; + thd->variables.sql_mode&= ~(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE); - to_field->table->in_use->count_cuted_fields= CHECK_FIELD_IGNORE; + thd->count_cuted_fields= CHECK_FIELD_IGNORE; result= copy_inner(); - to_field->table->in_use->count_cuted_fields= saved_count_cuted_fields; + thd->count_cuted_fields= saved_count_cuted_fields; + thd->variables.sql_mode= sql_mode; return result; } diff --git a/sql/sql_string.cc b/sql/sql_string.cc index 9d7df73cd7a..606a9ddb26d 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -858,7 +858,7 @@ outp: with optional left padding (for binary -> UCS2 conversion) SYNOPSIS - well_formed_copy_nhars() + well_formed_copy_nchars() to Store result here to_length Maxinum length of "to" string to_cs Character set of "to" string @@ -997,7 +997,10 @@ outp: goto outp; } else + { + from= from_prev; break; + } } *from_end_pos= from; res= to - to_start; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 0f4abb48f33..9e1a8151e3a 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2780,8 +2780,11 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST *src_table, strxmov(src_path, (*tmp_table)->s->path, reg_ext, NullS); else { - strxmov(src_path, mysql_data_home, "/", src_table->db, "/", - src_table->table_name, reg_ext, NullS); + char *tablename_pos= strxmov(src_path, mysql_data_home, "/", NullS); + strxmov(tablename_pos, src_table->db, "/", src_table->table_name, + reg_ext, NullS); + if (lower_case_table_names) + my_casedn_str(files_charset_info, tablename_pos); /* Resolve symlinks (for windows) */ fn_format(src_path, src_path, "", "", MYF(MY_UNPACK_FILENAME)); if (access(src_path, F_OK)) diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 6e231ef2447..bd85ce55edb 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1137,7 +1137,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %type <variable> internal_variable_name -%type <select_lex> subselect subselect_init +%type <select_lex> subselect take_first_select get_select_lex %type <boolfunc2creator> comp_op @@ -1160,8 +1160,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); field_opt_list opt_binary table_lock_list table_lock ref_list opt_on_delete opt_on_delete_list opt_on_delete_item use opt_delete_options opt_delete_option varchar nchar nvarchar - opt_outer table_list table_name table_alias_ref_list table_alias_ref - opt_option opt_place + opt_outer table_list table_name opt_option opt_place opt_attribute opt_attribute_list attribute column_list column_list_id opt_column_list grant_privileges grant_ident grant_list grant_option object_privilege object_privilege_list user_list rename_list @@ -6563,20 +6562,6 @@ table_name: } ; -table_alias_ref_list: - table_alias_ref - | table_alias_ref_list ',' table_alias_ref; - -table_alias_ref: - table_ident - { - if (!Select->add_table_to_list(YYTHD, $1, NULL, - TL_OPTION_UPDATING | TL_OPTION_ALIAS, - Lex->lock_option )) - MYSQL_YYABORT; - } - ; - if_exists: /* empty */ { $$= 0; } | IF EXISTS { $$= 1; } @@ -6847,7 +6832,7 @@ single_multi: if (multi_delete_set_locks_and_link_aux_tables(Lex)) MYSQL_YYABORT; } - | FROM table_alias_ref_list + | FROM table_wild_list { mysql_init_multi_delete(Lex); } USING join_table_list where_clause { @@ -9505,35 +9490,22 @@ union_option: | ALL { $$=0; } ; +take_first_select: /* empty */ + { + $$= Lex->current_select->master_unit()->first_select(); + }; + subselect: - SELECT_SYM subselect_start subselect_init subselect_end + SELECT_SYM subselect_start select_init2 take_first_select + subselect_end { - $$= $3; + $$= $4; } - | '(' subselect_start subselect ')' - { - THD *thd= YYTHD; - /* - note that a local variable can't be used for - $3 as it's used in local variable construction - and some compilers can't guarnatee the order - in which the local variables are initialized. - */ - List_iterator<Item> it($3->item_list); - Item *item; - /* - we must fill the items list for the "derived table". - */ - while ((item= it++)) - add_item_to_list(thd, item); - } - union_clause subselect_end { $$= $3; }; - -subselect_init: - select_init2 - { - $$= Lex->current_select->master_unit()->first_select(); - }; + | '(' subselect_start select_paren take_first_select + subselect_end ')' + { + $$= $4; + }; subselect_start: { diff --git a/sql/udf_example.c b/sql/udf_example.c index df3a69755ad..4ca6133da03 100644 --- a/sql/udf_example.c +++ b/sql/udf_example.c @@ -648,13 +648,11 @@ my_bool sequence_init(UDF_INIT *initid, UDF_ARGS *args, char *message) return 1; } bzero(initid->ptr,sizeof(longlong)); - /* - Fool MySQL to think that this function is a constant - This will ensure that MySQL only evalutes the function - when the rows are sent to the client and not before any ORDER BY - clauses + /* + sequence() is a non-deterministic function : it has different value + even if called with the same arguments. */ - initid->const_item=1; + initid->const_item=0; return 0; } |