diff options
author | unknown <konstantin@mysql.com> | 2006-02-02 16:57:34 +0300 |
---|---|---|
committer | unknown <konstantin@mysql.com> | 2006-02-02 16:57:34 +0300 |
commit | 97dae00306364c94cdab8fd4a66a51d19bbd96af (patch) | |
tree | 8b45ee033b876bd5d51ce4afe7627ae87815b79c /sql | |
parent | f0263ac452817f83d28047a20399fdef1e232e9a (diff) | |
parent | d9e7af2684205baebe325fc78d6d34b6ffb727e2 (diff) | |
download | mariadb-git-97dae00306364c94cdab8fd4a66a51d19bbd96af.tar.gz |
Merge mysql.com:/home/kostja/mysql/tmp_merge
into mysql.com:/home/kostja/mysql/mysql-5.1-merge
client/mysqlbinlog.cc:
Auto merged
client/mysqldump.c:
Auto merged
include/my_base.h:
Auto merged
libmysql/libmysql.c:
Auto merged
sql/field.h:
Auto merged
sql/ha_myisam.cc:
Auto merged
sql/log_event.cc:
Auto merged
sql/opt_range.cc:
Auto merged
sql/opt_range.h:
Auto merged
sql/repl_failsafe.cc:
Auto merged
sql/set_var.cc:
Auto merged
sql/slave.cc:
Auto merged
sql/slave.h:
Auto merged
sql/sql_acl.cc:
Auto merged
sql/sql_db.cc:
Auto merged
sql/sql_handler.cc:
Auto merged
sql/sql_help.cc:
Auto merged
sql/sql_parse.cc:
Auto merged
sql/sql_repl.cc:
Auto merged
sql/sql_show.cc:
Auto merged
sql/sql_test.cc:
Auto merged
sql-common/client.c:
Auto merged
sql/table.cc:
Auto merged
storage/myisam/mi_delete.c:
Auto merged
storage/myisam/myisampack.c:
Auto merged
storage/myisam/sort.c:
Auto merged
mysys/my_bitmap.c:
Manual merge
sql/field.cc:
e
Manual merge
sql/item.cc:
Manual merge (new Field_bit_as_char constructor signature)
sql/item_func.cc:
Manual merge: use local
sql/sql_insert.cc:
Manual merge: add VOID() around bitmap_init
Diffstat (limited to 'sql')
-rw-r--r-- | sql/field.cc | 26 | ||||
-rw-r--r-- | sql/field.h | 4 | ||||
-rw-r--r-- | sql/ha_myisam.cc | 17 | ||||
-rw-r--r-- | sql/item.cc | 4 | ||||
-rw-r--r-- | sql/log_event.cc | 4 | ||||
-rw-r--r-- | sql/opt_range.cc | 7 | ||||
-rw-r--r-- | sql/opt_range.h | 2 | ||||
-rw-r--r-- | sql/repl_failsafe.cc | 3 | ||||
-rw-r--r-- | sql/set_var.cc | 1 | ||||
-rw-r--r-- | sql/slave.cc | 38 | ||||
-rw-r--r-- | sql/slave.h | 2 | ||||
-rw-r--r-- | sql/sql_acl.cc | 6 | ||||
-rw-r--r-- | sql/sql_db.cc | 112 | ||||
-rw-r--r-- | sql/sql_help.cc | 2 | ||||
-rw-r--r-- | sql/sql_insert.cc | 2 | ||||
-rw-r--r-- | sql/sql_parse.cc | 12 | ||||
-rw-r--r-- | sql/sql_repl.cc | 12 | ||||
-rw-r--r-- | sql/sql_show.cc | 23 | ||||
-rw-r--r-- | sql/sql_test.cc | 2 | ||||
-rw-r--r-- | sql/table.cc | 5 | ||||
-rw-r--r-- | sql/uniques.cc | 122 |
21 files changed, 216 insertions, 190 deletions
diff --git a/sql/field.cc b/sql/field.cc index 37cab37f7d2..48f8f7f8f32 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1565,7 +1565,6 @@ Field *Field::new_key_field(MEM_ROOT *root, struct st_table *new_table, bool Field::quote_data(String *unquoted_string) { char escaped_string[IO_SIZE]; - char *unquoted_string_buffer= (char *)(unquoted_string->ptr()); DBUG_ENTER("Field::quote_data"); if (!needs_quotes()) @@ -4545,8 +4544,6 @@ int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs) error= 1; } } - if (error > 1) - error= 2; #ifdef WORDS_BIGENDIAN if (table->s->db_low_byte_first) @@ -7113,7 +7110,7 @@ void Field_blob::get_key_image(char *buff, uint length, imagetype type) } get_ptr(&blob); gobj= Geometry::construct(&buffer, blob, blob_length); - if (gobj->get_mbr(&mbr, &dummy)) + if (!gobj || gobj->get_mbr(&mbr, &dummy)) bzero(buff, SIZEOF_STORED_DOUBLE*4); else { @@ -7442,7 +7439,7 @@ void Field_geom::get_key_image(char *buff, uint length, imagetype type) } get_ptr(&blob); gobj= Geometry::construct(&buffer, blob, blob_length); - if (gobj->get_mbr(&mbr, &dummy)) + if (!gobj || gobj->get_mbr(&mbr, &dummy)) bzero(buff, SIZEOF_STORED_DOUBLE*4); else { @@ -8239,16 +8236,13 @@ const char *Field_bit::unpack(char *to, const char *from) */ Field_bit_as_char::Field_bit_as_char(char *ptr_arg, uint32 len_arg, - uchar *null_ptr_arg, uchar null_bit_arg, - uchar *bit_ptr_arg, uchar bit_ofs_arg, - enum utype unireg_check_arg, + uchar *null_ptr_arg, uchar null_bit_arg, + enum utype unireg_check_arg, const char *field_name_arg) - :Field_bit(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, bit_ptr_arg, - bit_ofs_arg, unireg_check_arg, field_name_arg), + :Field_bit(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, 0, 0, + unireg_check_arg, field_name_arg), create_length(len_arg) { - bit_ptr= 0; - bit_ofs= 0; bit_len= 0; field_length= ((len_arg + 7) & ~7) / 8; } @@ -8950,10 +8944,10 @@ Field *make_field(TABLE_SHARE *share, char *ptr, uint32 field_length, field_charset); case FIELD_TYPE_BIT: return f_bit_as_char(pack_flag) ? - new Field_bit_as_char(ptr, field_length, null_pos, null_bit, - bit_ptr, bit_offset, unireg_check, field_name) : - new Field_bit(ptr, field_length, null_pos, null_bit, bit_ptr, - bit_offset, unireg_check, field_name); + new Field_bit_as_char(ptr, field_length, null_pos, null_bit, + unireg_check, field_name) : + new Field_bit(ptr, field_length, null_pos, null_bit, bit_ptr, + bit_offset, unireg_check, field_name); default: // Impossible (Wrong version) break; diff --git a/sql/field.h b/sql/field.h index 806d0c3328a..15c54f65ef7 100644 --- a/sql/field.h +++ b/sql/field.h @@ -1373,12 +1373,12 @@ public: } }; - + class Field_bit_as_char: public Field_bit { public: uchar create_length; Field_bit_as_char(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, - uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg, + uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg); enum ha_base_keytype key_type() const { return HA_KEYTYPE_BINARY; } uint32 max_length() { return (uint32) create_length; } diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index 2e785f13ea3..5f0adba6e8c 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -286,7 +286,8 @@ int ha_myisam::dump(THD* thd, int fd) if (fd < 0) { - my_net_write(net, "", 0); + if (my_net_write(net, "", 0)) + error = errno ? errno : EPIPE; net_flush(net); } @@ -420,12 +421,14 @@ int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt) { uint old_testflag=param.testflag; param.testflag|=T_MEDIUM; - init_io_cache(¶m.read_cache, file->dfile, - my_default_record_cache_size, READ_CACHE, - share->pack.header_length, 1, MYF(MY_WME)); - error |= chk_data_link(¶m, file, param.testflag & T_EXTEND); - end_io_cache(&(param.read_cache)); - param.testflag=old_testflag; + if (!(error= init_io_cache(¶m.read_cache, file->dfile, + my_default_record_cache_size, READ_CACHE, + share->pack.header_length, 1, MYF(MY_WME)))) + { + error= chk_data_link(¶m, file, param.testflag & T_EXTEND); + end_io_cache(&(param.read_cache)); + } + param.testflag= old_testflag; } } if (!error) diff --git a/sql/item.cc b/sql/item.cc index fa5c2b5cc3b..6c410a3b8bd 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -985,7 +985,7 @@ Item_case_expr::this_item_addr(THD *thd, Item **) void Item_case_expr::print(String *str) { - str->append(STRING_WITH_LEN("case_expr@")); + VOID(str->append(STRING_WITH_LEN("case_expr@"))); str->qs_append(m_case_expr_id); } @@ -3868,7 +3868,7 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table, bool fixed_length) name); break; case MYSQL_TYPE_BIT: - field= new Field_bit_as_char(NULL, max_length, null_ptr, 0, NULL, 0, + field= new Field_bit_as_char(NULL, max_length, null_ptr, 0, Field::NONE, name); break; default: diff --git a/sql/log_event.cc b/sql/log_event.cc index 15eb4c3aaf0..64a692d394d 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -3342,6 +3342,10 @@ void Intvar_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) case INSERT_ID_EVENT: msg="INSERT_ID"; break; + case INVALID_INT_EVENT: + default: // cannot happen + msg="INVALID_INT"; + break; } fprintf(file, "%s=%s;\n", msg, llstr(val,llbuff)); fflush(file); diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 9e20fa28239..887690aecc1 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -753,7 +753,6 @@ SQL_SELECT *make_select(TABLE *head, table_map const_tables, table_map read_tables, COND *conds, bool allow_null_cond, int *error) - { SQL_SELECT *select; DBUG_ENTER("make_select"); @@ -7059,10 +7058,7 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, if (!quick) return 0; /* no ranges found */ if (quick->init()) - { - delete quick; goto err; - } quick->records= records; if (cp_buffer_from_ref(thd,ref) && thd->is_fatal_error || @@ -8404,7 +8400,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree) ha_rows cur_records; SEL_ARG *cur_index_tree= NULL; ha_rows cur_quick_prefix_records= 0; - uint cur_param_idx; + uint cur_param_idx=MAX_KEY; key_map cur_used_key_parts; uint pk= param->table->s->primary_key; @@ -8620,6 +8616,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree) */ if (cur_read_cost < best_read_cost - (DBL_EPSILON * cur_read_cost)) { + DBUG_ASSERT(tree != 0 || cur_param_idx == MAX_KEY); index_info= cur_index_info; index= cur_index; best_read_cost= cur_read_cost; diff --git a/sql/opt_range.h b/sql/opt_range.h index a27b3607d73..bc2496b0769 100644 --- a/sql/opt_range.h +++ b/sql/opt_range.h @@ -706,7 +706,7 @@ class SQL_SELECT :public Sql_alloc { class FT_SELECT: public QUICK_RANGE_SELECT { public: FT_SELECT(THD *thd, TABLE *table, uint key) : - QUICK_RANGE_SELECT (thd, table, key, 1) { init(); } + QUICK_RANGE_SELECT (thd, table, key, 1) { VOID(init()); } ~FT_SELECT() { file->ft_end(); } int init() { return error=file->ft_init(); } int reset() { return 0; } diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc index 960e0ac86cc..34dcd80a236 100644 --- a/sql/repl_failsafe.cc +++ b/sql/repl_failsafe.cc @@ -930,7 +930,8 @@ bool load_master_data(THD* thd) host was specified; there could have been a problem when replication started, which led to relay log's IO_CACHE to not be inited. */ - flush_master_info(active_mi, 0); + if (flush_master_info(active_mi, 0)) + sql_print_error("Failed to flush master info file"); } mysql_free_result(master_status_res); } diff --git a/sql/set_var.cc b/sql/set_var.cc index 1a86cd1aef7..17ae2352d95 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -2495,7 +2495,6 @@ bool sys_var_slave_skip_counter::update(THD *thd, set_var *var) bool sys_var_sync_binlog_period::update(THD *thd, set_var *var) { - pthread_mutex_t *lock_log= mysql_bin_log.get_log_lock(); sync_binlog_period= (ulong) var->save_result.ulonglong_value; return 0; } diff --git a/sql/slave.cc b/sql/slave.cc index edca614159a..763db31c05d 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1742,7 +1742,8 @@ static void write_ignored_events_info_to_relay_log(THD *thd, MASTER_INFO *mi) " to the relay log, " "SHOW SLAVE STATUS may be inaccurate"); rli->relay_log.harvest_bytes_written(&rli->log_space_total); - flush_master_info(mi, 1); + if (flush_master_info(mi, 1)) + sql_print_error("Failed to flush master info file"); delete ev; } else @@ -2233,7 +2234,7 @@ bool show_master_info(THD* thd, MASTER_INFO* mi) pthread_mutex_unlock(&mi->rli.data_lock); pthread_mutex_unlock(&mi->data_lock); - + if (my_net_write(&thd->net, (char*)thd->packet.ptr(), packet->length())) DBUG_RETURN(TRUE); } @@ -2241,8 +2242,13 @@ bool show_master_info(THD* thd, MASTER_INFO* mi) DBUG_RETURN(FALSE); } - -bool flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache) +/* + RETURN + 2 - flush relay log failed + 1 - flush master info failed + 0 - all ok +*/ +int flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache) { IO_CACHE* file = &mi->file; char lbuf[22]; @@ -2261,8 +2267,9 @@ bool flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache) When we come to this place in code, relay log may or not be initialized; the caller is responsible for setting 'flush_relay_log_cache' accordingly. */ - if (flush_relay_log_cache) - flush_io_cache(mi->rli.relay_log.get_log_file()); + if (flush_relay_log_cache && + flush_io_cache(mi->rli.relay_log.get_log_file())) + DBUG_RETURN(2); /* We flushed the relay log BEFORE the master.info file, because if we crash @@ -2274,13 +2281,13 @@ bool flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache) */ /* - In certain cases this code may create master.info files that seems - corrupted, because of extra lines filled with garbage in the end - file (this happens if new contents take less space than previous - contents of file). But because of number of lines in the first line + In certain cases this code may create master.info files that seems + corrupted, because of extra lines filled with garbage in the end + file (this happens if new contents take less space than previous + contents of file). But because of number of lines in the first line of file we don't care about this garbage. */ - + my_b_seek(file, 0L); my_b_printf(file, "%u\n%s\n%s\n%s\n%s\n%s\n%d\n%d\n%d\n%s\n%s\n%s\n%s\n%s\n", LINES_IN_MASTER_INFO_WITH_SSL, @@ -2289,8 +2296,7 @@ bool flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache) mi->password, mi->port, mi->connect_retry, (int)(mi->ssl), mi->ssl_ca, mi->ssl_capath, mi->ssl_cert, mi->ssl_cipher, mi->ssl_key); - flush_io_cache(file); - DBUG_RETURN(0); + DBUG_RETURN(-flush_io_cache(file)); } @@ -3355,7 +3361,11 @@ reconnect done to recover from failed read"); sql_print_error("Slave I/O thread could not queue event from master"); goto err; } - flush_master_info(mi, 1); /* sure that we can flush the relay log */ + if (flush_master_info(mi, 1)) + { + sql_print_error("Failed to flush master info file"); + goto err; + } /* See if the relay logs take too much space. We don't lock mi->rli.log_space_lock here; this dirty read saves time diff --git a/sql/slave.h b/sql/slave.h index 6870aaca752..0b77d7f7c4f 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -231,7 +231,7 @@ int queue_event(MASTER_INFO* mi,const char* buf,ulong event_len); int init_slave(); void init_slave_skip_errors(const char* arg); -bool flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache); +int flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache); bool flush_relay_log_info(RELAY_LOG_INFO* rli); int register_slave_on_master(MYSQL* mysql); int terminate_slave_threads(MASTER_INFO* mi, int thread_mask, diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index dc56880a1a0..10599729672 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -951,7 +951,7 @@ bool acl_getroot_no_password(Security_context *sctx, char *user, char *host, DBUG_PRINT("enter", ("Host: '%s', Ip: '%s', User: '%s', db: '%s'", (host ? host : "(NULL)"), (ip ? ip : "(NULL)"), - (user ? user : "(NULL)"), (db ? db : "(NULL)"))); + user, (db ? db : "(NULL)"))); sctx->user= user; sctx->host= host; sctx->ip= ip; @@ -980,7 +980,7 @@ bool acl_getroot_no_password(Security_context *sctx, char *user, char *host, for (i=0 ; i < acl_users.elements ; i++) { acl_user= dynamic_element(&acl_users,i,ACL_USER*); - if ((!acl_user->user && (!user || !user[0])) || + if ((!acl_user->user && !user[0]) || (acl_user->user && strcmp(user, acl_user->user) == 0)) { if (compare_hostname(&acl_user->host, host, ip)) @@ -4980,8 +4980,6 @@ static int handle_grant_struct(uint struct_no, bool drop, } if (! user) user= ""; - if (! host) - host= ""; #ifdef EXTRA_DEBUG DBUG_PRINT("loop",("scan struct: %u index: %u user: '%s' host: '%s'", struct_no, idx, user, host)); diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 5ffa4fd76ed..e2e336eb891 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -287,7 +287,7 @@ static bool write_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create) } -/* +/* Load database options file load_db_opt() @@ -313,68 +313,72 @@ bool load_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create) bzero((char*) create,sizeof(*create)); create->default_table_charset= thd->variables.collation_server; - + /* Check if options for this database are already in the hash */ if (!get_dbopt(path, create)) - DBUG_RETURN(0); - + DBUG_RETURN(0); + /* Otherwise, load options from the .opt file */ - if ((file=my_open(path, O_RDONLY | O_SHARE, MYF(0))) >= 0) - { - IO_CACHE cache; - init_io_cache(&cache, file, IO_SIZE, READ_CACHE, 0, 0, MYF(0)); + if ((file=my_open(path, O_RDONLY | O_SHARE, MYF(0))) < 0) + goto err1; - while ((int) (nbytes= my_b_gets(&cache, (char*) buf, sizeof(buf))) > 0) + IO_CACHE cache; + if (init_io_cache(&cache, file, IO_SIZE, READ_CACHE, 0, 0, MYF(0))) + goto err2; + + while ((int) (nbytes= my_b_gets(&cache, (char*) buf, sizeof(buf))) > 0) + { + char *pos= buf+nbytes-1; + /* Remove end space and control characters */ + while (pos > buf && !my_isgraph(&my_charset_latin1, pos[-1])) + pos--; + *pos=0; + if ((pos= strchr(buf, '='))) { - char *pos= buf+nbytes-1; - /* Remove end space and control characters */ - while (pos > buf && !my_isgraph(&my_charset_latin1, pos[-1])) - pos--; - *pos=0; - if ((pos= strchr(buf, '='))) + if (!strncmp(buf,"default-character-set", (pos-buf))) { - if (!strncmp(buf,"default-character-set", (pos-buf))) - { - /* - Try character set name, and if it fails - try collation name, probably it's an old - 4.1.0 db.opt file, which didn't have - separate default-character-set and - default-collation commands. - */ - if (!(create->default_table_charset= - get_charset_by_csname(pos+1, MY_CS_PRIMARY, MYF(0))) && - !(create->default_table_charset= - get_charset_by_name(pos+1, MYF(0)))) - { - sql_print_error("Error while loading database options: '%s':",path); - sql_print_error(ER(ER_UNKNOWN_CHARACTER_SET),pos+1); - create->default_table_charset= default_charset_info; - } - } - else if (!strncmp(buf,"default-collation", (pos-buf))) - { - if (!(create->default_table_charset= get_charset_by_name(pos+1, - MYF(0)))) - { - sql_print_error("Error while loading database options: '%s':",path); - sql_print_error(ER(ER_UNKNOWN_COLLATION),pos+1); - create->default_table_charset= default_charset_info; - } - } + /* + Try character set name, and if it fails + try collation name, probably it's an old + 4.1.0 db.opt file, which didn't have + separate default-character-set and + default-collation commands. + */ + if (!(create->default_table_charset= + get_charset_by_csname(pos+1, MY_CS_PRIMARY, MYF(0))) && + !(create->default_table_charset= + get_charset_by_name(pos+1, MYF(0)))) + { + sql_print_error("Error while loading database options: '%s':",path); + sql_print_error(ER(ER_UNKNOWN_CHARACTER_SET),pos+1); + create->default_table_charset= default_charset_info; + } + } + else if (!strncmp(buf,"default-collation", (pos-buf))) + { + if (!(create->default_table_charset= get_charset_by_name(pos+1, + MYF(0)))) + { + sql_print_error("Error while loading database options: '%s':",path); + sql_print_error(ER(ER_UNKNOWN_COLLATION),pos+1); + create->default_table_charset= default_charset_info; + } } } - end_io_cache(&cache); - my_close(file,MYF(0)); - /* - Put the loaded value into the hash. - Note that another thread could've added the same - entry to the hash after we called get_dbopt(), - but it's not an error, as put_dbopt() takes this - possibility into account. - */ - error= put_dbopt(path, create); } + /* + Put the loaded value into the hash. + Note that another thread could've added the same + entry to the hash after we called get_dbopt(), + but it's not an error, as put_dbopt() takes this + possibility into account. + */ + error= put_dbopt(path, create); + + end_io_cache(&cache); +err2: + my_close(file,MYF(0)); +err1: DBUG_RETURN(error); } diff --git a/sql/sql_help.cc b/sql/sql_help.cc index 0715aeeaa5b..ea9bca57cc6 100644 --- a/sql/sql_help.cc +++ b/sql/sql_help.cc @@ -567,7 +567,7 @@ SQL_SELECT *prepare_simple_select(THD *thd, Item *cond, SQL_SELECT *res= make_select(table, 0, 0, cond, 0, error); if (*error || (res && res->check_quick(thd, 0, HA_POS_ERROR)) || - (res->quick && res->quick->reset())) + (res && res->quick && res->quick->reset())) { delete res; res=0; diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index a265d32e7c2..6655491ca57 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -686,7 +686,7 @@ static bool check_view_insertability(THD * thd, TABLE_LIST *view) DBUG_ASSERT(view->table != 0 && view->field_translation != 0); - bitmap_init(&used_fields, used_fields_buff, table->s->fields, 0); + VOID(bitmap_init(&used_fields, used_fields_buff, table->s->fields, 0)); bitmap_clear_all(&used_fields); view->contain_auto_increment= 0; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index ed7e7dfb684..e8fcc18b86e 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1615,6 +1615,11 @@ bool dispatch_command(enum enum_server_command command, THD *thd, statistic_increment(thd->status_var.com_other, &LOCK_status); thd->enable_slow_log= opt_log_slow_admin_statements; db= thd->alloc(db_len + tbl_len + 2); + if (!db) + { + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); + break; + } tbl_name= strmake(db, packet + 1, db_len)+1; strmake(tbl_name, packet + db_len + 2, tbl_len); mysql_table_dump(thd, db, tbl_name, -1); @@ -1628,14 +1633,14 @@ bool dispatch_command(enum enum_server_command command, THD *thd, statistic_increment(thd->status_var.com_other, &LOCK_status); char *user= (char*) packet; char *passwd= strend(user)+1; - /* + /* Old clients send null-terminated string ('\0' for empty string) for password. New clients send the size (1 byte) + string (not null terminated, so also '\0' for empty string). */ - char db_buff[NAME_LEN+1]; // buffer to store db in utf8 + char db_buff[NAME_LEN+1]; // buffer to store db in utf8 char *db= passwd; - uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ? + uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ? *passwd++ : strlen(passwd); db+= passwd_len + 1; #ifndef EMBEDDED_LIBRARY @@ -6631,6 +6636,7 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, #ifdef HAVE_REPLICATION if (options & REFRESH_MASTER) { + DBUG_ASSERT(thd); tmp_write_to_binlog= 0; if (reset_master(thd)) { diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index ed056f62fe3..bf9a6078a8f 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -1201,7 +1201,12 @@ bool change_master(THD* thd, MASTER_INFO* mi) Relay log's IO_CACHE may not be inited, if rli->inited==0 (server was never a slave before). */ - flush_master_info(mi, 0); + if (flush_master_info(mi, 0)) + { + my_error(ER_RELAY_LOG_INIT, MYF(0), "Failed to flush master info file"); + unlock_slave_threads(mi); + DBUG_RETURN(TRUE); + } if (need_relay_log_purge) { relay_log_purge= 1; @@ -1311,14 +1316,15 @@ bool mysql_show_binlog_events(THD* thd) bool ret = TRUE; IO_CACHE log; File file = -1; - Format_description_log_event *description_event= new - Format_description_log_event(3); /* MySQL 4.0 by default */ Log_event::init_show_field_list(&field_list); if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(TRUE); + Format_description_log_event *description_event= new + Format_description_log_event(3); /* MySQL 4.0 by default */ + /* Wait for handlers to insert any pending information into the binlog. For e.g. ndb which updates the binlog asynchronously diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 1e051ab34c4..b381c6198c0 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -781,7 +781,7 @@ append_identifier(THD *thd, String *packet, const char *name, uint length) it's a keyword */ - packet->reserve(length*2 + 2); + VOID(packet->reserve(length*2 + 2)); quote_char= (char) q; packet->append("e_char, 1, system_charset_info); @@ -1097,13 +1097,13 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, if (key_part->field) append_identifier(thd,packet,key_part->field->field_name, strlen(key_part->field->field_name)); - if (!key_part->field || + if (key_part->field && (key_part->length != table->field[key_part->fieldnr-1]->key_length() && !(key_info->flags & HA_FULLTEXT))) { buff[0] = '('; - char* end=int10_to_str((long) key_part->length / + char* end=int10_to_str((long) key_part->length / key_part->field->charset()->mbmaxlen, buff + 1,10); *end++ = ')'; @@ -1856,7 +1856,8 @@ LEX_STRING *make_lex_string(THD *thd, LEX_STRING *lex_str, { MEM_ROOT *mem= thd->mem_root; if (allocate_lex_string) - lex_str= (LEX_STRING *)thd->alloc(sizeof(LEX_STRING)); + if (!(lex_str= (LEX_STRING *)thd->alloc(sizeof(LEX_STRING)))) + return 0; lex_str->str= strmake_root(mem, str, length); lex_str->length= length; return lex_str; @@ -3115,7 +3116,7 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables, /* I.e. we are in SELECT FROM INFORMATION_SCHEMA.STATISTICS rather than in SHOW KEYS - */ + */ if (!tables->view) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, thd->net.last_errno, thd->net.last_error); @@ -3128,7 +3129,7 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables, { TABLE *show_table= tables->table; KEY *key_info=show_table->key_info; - show_table->file->info(HA_STATUS_VARIABLE | + show_table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK | HA_STATUS_TIME); for (uint i=0 ; i < show_table->s->keys ; i++,key_info++) @@ -3140,7 +3141,7 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables, restore_record(table, s->default_values); table->field[1]->store(base_name, strlen(base_name), cs); table->field[2]->store(file_name, strlen(file_name), cs); - table->field[3]->store((longlong) ((key_info->flags & + table->field[3]->store((longlong) ((key_info->flags & HA_NOSAME) ? 0 : 1), TRUE); table->field[4]->store(base_name, strlen(base_name), cs); table->field[5]->store(key_info->name, strlen(key_info->name), cs); @@ -3163,12 +3164,12 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables, table->field[9]->store((longlong) records, TRUE); table->field[9]->set_notnull(); } - if (!(key_info->flags & HA_FULLTEXT) && - (!key_part->field || - key_part->length != + if (!(key_info->flags & HA_FULLTEXT) && + (key_part->field && + key_part->length != show_table->field[key_part->fieldnr-1]->key_length())) { - table->field[10]->store((longlong) key_part->length / + table->field[10]->store((longlong) key_part->length / key_part->field->charset()->mbmaxlen); table->field[10]->set_notnull(); } diff --git a/sql/sql_test.cc b/sql/sql_test.cc index c4448ff8abe..bf86630d28c 100644 --- a/sql/sql_test.cc +++ b/sql/sql_test.cc @@ -450,7 +450,7 @@ void mysql_print_status() calc_sum_of_all_status(&tmp); printf("\nStatus information:\n\n"); - my_getwd(current_dir, sizeof(current_dir),MYF(0)); + VOID(my_getwd(current_dir, sizeof(current_dir),MYF(0))); printf("Current dir: %s\n", current_dir); printf("Running threads: %d Stack size: %ld\n", thread_count, (long) thread_stack); diff --git a/sql/table.cc b/sql/table.cc index b912683d371..b7920d0d530 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1640,7 +1640,10 @@ ulong get_form_pos(File file, uchar *head, TYPELIB *save_names) ret_value=uint4korr(pos); } if (! save_names) - my_free((gptr) buf,MYF(0)); + { + if (names) + my_free((gptr) buf,MYF(0)); + } else if (!names) bzero((char*) save_names,sizeof(save_names)); else diff --git a/sql/uniques.cc b/sql/uniques.cc index 367aed2d113..ad074f8b2b0 100644 --- a/sql/uniques.cc +++ b/sql/uniques.cc @@ -38,8 +38,8 @@ int unique_write_to_file(gptr key, element_count count, Unique *unique) { /* - Use unique->size (size of element stored in the tree) and not - unique->tree.size_of_element. The latter is different from unique->size + Use unique->size (size of element stored in the tree) and not + unique->tree.size_of_element. The latter is different from unique->size when tree implementation chooses to store pointer to key in TREE_ELEMENT (instead of storing the element itself there) */ @@ -63,27 +63,27 @@ Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg, comp_func_fixed_arg); /* If the following fail's the next add will also fail */ my_init_dynamic_array(&file_ptrs, sizeof(BUFFPEK), 16, 16); - /* + /* If you change the following, change it in get_max_elements function, too. */ max_elements= max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+size); - open_cached_file(&file, mysql_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE, - MYF(MY_WME)); + VOID(open_cached_file(&file, mysql_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE, + MYF(MY_WME))); } /* Calculate log2(n!) - + NOTES Stirling's approximate formula is used: - - n! ~= sqrt(2*M_PI*n) * (n/M_E)^n - + + n! ~= sqrt(2*M_PI*n) * (n/M_E)^n + Derivation of formula used for calculations is as follows: log2(n!) = log(n!)/log(2) = log(sqrt(2*M_PI*n)*(n/M_E)^n) / log(2) = - + = (log(2*M_PI*n)/2 + n*log(n/M_E)) / log(2). */ @@ -94,7 +94,7 @@ inline double log2_n_fact(double x) /* - Calculate cost of merge_buffers function call for given sequence of + Calculate cost of merge_buffers function call for given sequence of input stream lengths and store the number of rows in result stream in *last. SYNOPSIS @@ -103,21 +103,21 @@ inline double log2_n_fact(double x) elem_size Size of element stored in buffer first Pointer to first merged element size last Pointer to last merged element size - + RETURN Cost of merge_buffers operation in disk seeks. - + NOTES It is assumed that no rows are eliminated during merge. - The cost is calculated as - + The cost is calculated as + cost(read_and_write) + cost(merge_comparisons). - - All bytes in the sequences is read and written back during merge so cost + + All bytes in the sequences is read and written back during merge so cost of disk io is 2*elem_size*total_buf_elems/IO_SIZE (2 is for read + write) - + For comparisons cost calculations we assume that all merged sequences have - the same length, so each of total_buf_size elements will be added to a sort + the same length, so each of total_buf_size elements will be added to a sort heap with (n_buffers-1) elements. This gives the comparison cost: total_buf_elems* log2(n_buffers) / TIME_FOR_COMPARE_ROWID; @@ -125,16 +125,16 @@ inline double log2_n_fact(double x) static double get_merge_buffers_cost(uint *buff_elems, uint elem_size, uint *first, uint *last) -{ +{ uint total_buf_elems= 0; for (uint *pbuf= first; pbuf <= last; pbuf++) total_buf_elems+= *pbuf; *last= total_buf_elems; - + int n_buffers= last - first + 1; /* Using log2(n)=log(n)/log(2) formula */ - return 2*((double)total_buf_elems*elem_size) / IO_SIZE + + return 2*((double)total_buf_elems*elem_size) / IO_SIZE + total_buf_elems*log((double) n_buffers) / (TIME_FOR_COMPARE_ROWID * M_LN2); } @@ -142,13 +142,13 @@ static double get_merge_buffers_cost(uint *buff_elems, uint elem_size, /* Calculate cost of merging buffers into one in Unique::get, i.e. calculate how long (in terms of disk seeks) the two calls - merge_many_buffs(...); - merge_buffers(...); + merge_many_buffs(...); + merge_buffers(...); will take. SYNOPSIS get_merge_many_buffs_cost() - buffer buffer space for temporary data, at least + buffer buffer space for temporary data, at least Unique::get_cost_calc_buff_size bytes maxbuffer # of full buffers max_n_elems # of elements in first maxbuffer buffers @@ -156,12 +156,12 @@ static double get_merge_buffers_cost(uint *buff_elems, uint elem_size, elem_size size of buffer element NOTES - maxbuffer+1 buffers are merged, where first maxbuffer buffers contain + maxbuffer+1 buffers are merged, where first maxbuffer buffers contain max_n_elems elements each and last buffer contains last_n_elems elements. The current implementation does a dumb simulation of merge_many_buffs function actions. - + RETURN Cost of merge in disk seeks. */ @@ -173,17 +173,17 @@ static double get_merge_many_buffs_cost(uint *buffer, register int i; double total_cost= 0.0; uint *buff_elems= buffer; /* #s of elements in each of merged sequences */ - - /* + + /* Set initial state: first maxbuffer sequences contain max_n_elems elements each, last sequence contains last_n_elems elements. */ for (i = 0; i < (int)maxbuffer; i++) - buff_elems[i]= max_n_elems; + buff_elems[i]= max_n_elems; buff_elems[maxbuffer]= last_n_elems; - /* - Do it exactly as merge_many_buff function does, calling + /* + Do it exactly as merge_many_buff function does, calling get_merge_buffers_cost to get cost of merge_buffers. */ if (maxbuffer >= MERGEBUFF2) @@ -194,17 +194,17 @@ static double get_merge_many_buffs_cost(uint *buffer, for (i = 0; i <= (int) maxbuffer - MERGEBUFF*3/2; i += MERGEBUFF) { total_cost+=get_merge_buffers_cost(buff_elems, elem_size, - buff_elems + i, + buff_elems + i, buff_elems + i + MERGEBUFF-1); lastbuff++; } total_cost+=get_merge_buffers_cost(buff_elems, elem_size, - buff_elems + i, + buff_elems + i, buff_elems + maxbuffer); maxbuffer= lastbuff; } } - + /* Simulate final merge_buff call. */ total_cost += get_merge_buffers_cost(buff_elems, elem_size, buff_elems, buff_elems + maxbuffer); @@ -213,7 +213,7 @@ static double get_merge_many_buffs_cost(uint *buffer, /* - Calculate cost of using Unique for processing nkeys elements of size + Calculate cost of using Unique for processing nkeys elements of size key_size using max_in_memory_size memory. SYNOPSIS @@ -223,12 +223,12 @@ static double get_merge_many_buffs_cost(uint *buffer, nkeys #of elements in Unique key_size size of each elements in bytes max_in_memory_size amount of memory Unique will be allowed to use - + RETURN Cost in disk seeks. - + NOTES - cost(using_unqiue) = + cost(using_unqiue) = cost(create_trees) + (see #1) cost(merge) + (see #2) cost(read_result) (see #3) @@ -237,42 +237,42 @@ static double get_merge_many_buffs_cost(uint *buffer, For each Unique::put operation there will be 2*log2(n+1) elements comparisons, where n runs from 1 tree_size (we assume that all added elements are different). Together this gives: - + n_compares = 2*(log2(2) + log2(3) + ... + log2(N+1)) = 2*log2((N+1)!) - + then cost(tree_creation) = n_compares*ROWID_COMPARE_COST; Total cost of creating trees: (n_trees - 1)*max_size_tree_cost + non_max_size_tree_cost. Approximate value of log2(N!) is calculated by log2_n_fact function. - + 2. Cost of merging. If only one tree is created by Unique no merging will be necessary. Otherwise, we model execution of merge_many_buff function and count - #of merges. (The reason behind this is that number of buffers is small, - while size of buffers is big and we don't want to loose precision with + #of merges. (The reason behind this is that number of buffers is small, + while size of buffers is big and we don't want to loose precision with O(x)-style formula) - + 3. If only one tree is created by Unique no disk io will happen. - Otherwise, ceil(key_len*n_keys) disk seeks are necessary. We assume + Otherwise, ceil(key_len*n_keys) disk seeks are necessary. We assume these will be random seeks. */ -double Unique::get_use_cost(uint *buffer, uint nkeys, uint key_size, +double Unique::get_use_cost(uint *buffer, uint nkeys, uint key_size, ulong max_in_memory_size) { ulong max_elements_in_tree; ulong last_tree_elems; int n_full_trees; /* number of trees in unique - 1 */ double result; - - max_elements_in_tree= + + max_elements_in_tree= max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size); n_full_trees= nkeys / max_elements_in_tree; last_tree_elems= nkeys % max_elements_in_tree; - + /* Calculate cost of creating trees */ result= 2*log2_n_fact(last_tree_elems + 1.0); if (n_full_trees) @@ -285,13 +285,13 @@ double Unique::get_use_cost(uint *buffer, uint nkeys, uint key_size, if (!n_full_trees) return result; - - /* + + /* There is more then one tree and merging is necessary. First, add cost of writing all trees to disk, assuming that all disk writes are sequential. */ - result += DISK_SEEK_BASE_COST * n_full_trees * + result += DISK_SEEK_BASE_COST * n_full_trees * ceil(((double) key_size)*max_elements_in_tree / IO_SIZE); result += DISK_SEEK_BASE_COST * ceil(((double) key_size)*last_tree_elems / IO_SIZE); @@ -303,8 +303,8 @@ double Unique::get_use_cost(uint *buffer, uint nkeys, uint key_size, return merge_cost; result += merge_cost; - /* - Add cost of reading the resulting sequence, assuming there were no + /* + Add cost of reading the resulting sequence, assuming there were no duplicate elements. */ result += ceil((double)key_size*nkeys/IO_SIZE); @@ -320,7 +320,7 @@ Unique::~Unique() } - /* Write tree to disk; clear tree */ + /* Write tree to disk; clear tree */ bool Unique::flush() { BUFFPEK file_ptr; @@ -359,7 +359,7 @@ Unique::reset() } elements= 0; } - + /* The comparison function, passed to queue_init() in merge_walk() must use comparison function of Uniques::tree, but compare members of struct @@ -386,7 +386,7 @@ C_MODE_END /* DESCRIPTION - Function is very similar to merge_buffers, but instead of writing sorted + Function is very similar to merge_buffers, but instead of writing sorted unique keys to the output file, it invokes walk_action for each key. This saves I/O if you need to pass through all unique keys only once. SYNOPSIS @@ -601,7 +601,7 @@ bool Unique::get(TABLE *table) bool error=1; /* Open cached file if it isn't open */ - outfile=table->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE), + outfile=table->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE), MYF(MY_ZEROFILL)); if (!outfile || ! my_b_inited(outfile) && @@ -618,7 +618,7 @@ bool Unique::get(TABLE *table) sort_param.keys= max_in_memory_size / sort_param.sort_length; sort_param.not_killable=1; - if (!(sort_buffer=(uchar*) my_malloc((sort_param.keys+1) * + if (!(sort_buffer=(uchar*) my_malloc((sort_param.keys+1) * sort_param.sort_length, MYF(0)))) return 1; @@ -633,7 +633,7 @@ bool Unique::get(TABLE *table) goto err; if (merge_buffers(&sort_param, &file, outfile, sort_buffer, file_ptr, file_ptr, file_ptr+maxbuffer,0)) - goto err; + goto err; error=0; err: x_free((gptr) sort_buffer); |