diff options
Diffstat (limited to 'sql')
134 files changed, 11704 insertions, 6742 deletions
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 37707fe3963..002aabb91b0 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -69,6 +69,7 @@ ADD_EXECUTABLE(mysqld ../sql-common/client.c derror.cc des_key_file.cc sql_tablespace.cc events.cc ../sql-common/my_user.c partition_info.cc rpl_utility.cc rpl_injector.cc sql_locale.cc rpl_rli.cc rpl_mi.cc sql_servers.cc + sql_connect.cc scheduler.cc ${PROJECT_SOURCE_DIR}/sql/sql_yacc.cc ${PROJECT_SOURCE_DIR}/sql/sql_yacc.h ${PROJECT_SOURCE_DIR}/include/mysqld_error.h diff --git a/sql/Makefile.am b/sql/Makefile.am index 43331e3d0c9..a85eb012f1d 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -26,7 +26,7 @@ INCLUDES = @ZLIB_INCLUDES@ \ WRAPLIBS= @WRAPLIBS@ SUBDIRS = share libexec_PROGRAMS = mysqld -noinst_PROGRAMS = gen_lex_hash +EXTRA_PROGRAMS = gen_lex_hash bin_PROGRAMS = mysql_tzinfo_to_sql gen_lex_hash_LDFLAGS = @NOINST_LDFLAGS@ SUPPORTING_LIBS = $(top_builddir)/vio/libvio.a \ @@ -52,7 +52,7 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ sql_error.h field.h handler.h mysqld_suffix.h \ ha_partition.h \ ha_ndbcluster.h ha_ndbcluster_binlog.h \ - ha_ndbcluster_tables.h \ + ha_ndbcluster_tables.h rpl_constants.h \ opt_range.h protocol.h rpl_tblmap.h rpl_utility.h \ log.h sql_show.h rpl_rli.h rpl_mi.h \ sql_select.h structs.h table.h sql_udf.h hash_filo.h \ @@ -64,10 +64,11 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ tztime.h my_decimal.h\ sp_head.h sp_pcontext.h sp_rcontext.h sp.h sp_cache.h \ parse_file.h sql_view.h sql_trigger.h \ - sql_array.h sql_cursor.h events.h \ + sql_array.h sql_cursor.h events.h scheduler.h \ event_db_repository.h event_queue.h \ - sql_plugin.h authors.h sql_partition.h event_data_objects.h \ - partition_info.h partition_element.h event_scheduler.h \ + sql_plugin.h authors.h \ + event_data_objects.h event_scheduler.h \ + sql_partition.h partition_info.h partition_element.h \ contributors.h sql_servers.h mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \ item.cc item_sum.cc item_buff.cc item_func.cc \ @@ -79,7 +80,8 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \ lock.cc my_lock.c \ sql_string.cc sql_manager.cc sql_map.cc \ mysqld.cc password.c hash_filo.cc hostname.cc \ - set_var.cc sql_parse.cc sql_yacc.yy \ + sql_connect.cc scheduler.cc sql_parse.cc \ + set_var.cc sql_yacc.yy \ sql_base.cc table.cc sql_select.cc sql_insert.cc \ sql_prepare.cc sql_error.cc sql_locale.cc \ sql_update.cc sql_delete.cc uniques.cc sql_do.cc \ @@ -128,6 +130,7 @@ EXTRA_DIST = udf_example.c udf_example.def $(BUILT_MAINT_SRC) \ nt_servc.cc nt_servc.h message.mc CMakeLists.txt \ udf_example.c udf_example.def CLEANFILES = lex_hash.h sql_yacc.output +DISTCLEANFILES = $(EXTRA_PROGRAMS) MAINTAINERCLEANFILES = $(BUILT_MAINT_SRC) AM_YFLAGS = -d --verbose @@ -150,11 +153,13 @@ link_sources: mysql_tzinfo_to_sql.cc mysql_tzinfo_to_sql.o: $(mysql_tzinfo_to_sql_SOURCES) $(CXXCOMPILE) -c $(INCLUDES) -DTZINFO2SQL $< -# FIXME seems like now "lex_hash.h" differs depending on configure -# flags, so can't pregenerate and include in source TAR. Revert to -# dist pregenerated if this changes, so the file doesn't differ. -lex_hash.h: gen_lex_hash$(EXEEXT) - ./gen_lex_hash$(EXEEXT) > $@ +# This generates lex_hash.h +# NOTE Built sources should depend on their sources not the tool +# this avoid the rebuild of the built files in a source dist +lex_hash.h: gen_lex_hash.cc lex.h + $(MAKE) $(AM_MAKEFLAGS) gen_lex_hash$(EXEEXT) + ./gen_lex_hash$(EXEEXT) > $@-t + $(MV) $@-t $@ # the following three should eventually be moved out of this directory ha_ndbcluster.o:ha_ndbcluster.cc ha_ndbcluster.h diff --git a/sql/authors.h b/sql/authors.h index 48b807c7884..dfe3b143e2f 100644 --- a/sql/authors.h +++ b/sql/authors.h @@ -66,6 +66,7 @@ struct show_table_authors_st show_table_authors[]= { "Parser, port to OS/2, storage engines and some random stuff" }, { "Yuri Dario", "", "OS/2 port" }, { "Andrei Elkin", "Espoo, Finland", "Replication" }, + { "Patrick Galbraith", "Sharon, NH", "Federated Engine, mysqlslap" }, { "Sergei Golubchik", "Kerpen, Germany", "Full-text search, precision math" }, { "Lenz Grimmer", "Hamburg, Germany", diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc index 07575a6d33a..e0b5ec23418 100644 --- a/sql/event_data_objects.cc +++ b/sql/event_data_objects.cc @@ -20,6 +20,8 @@ #include "event_db_repository.h" #include "sp_head.h" +/* That's a provisional solution */ +extern Event_db_repository events_event_db_repository; #define EVEX_MAX_INTERVAL_VALUE 1000000000L @@ -30,6 +32,47 @@ event_change_security_context(THD *thd, LEX_STRING user, LEX_STRING host, static void event_restore_security_context(THD *thd, Security_context *backup); + +/* + Initiliazes dbname and name of an Event_queue_element_for_exec + object + + SYNOPSIS + Event_queue_element_for_exec::init() + + RETURN VALUE + FALSE OK + TRUE Error (OOM) +*/ + +bool +Event_queue_element_for_exec::init(LEX_STRING db, LEX_STRING n) +{ + if (!(dbname.str= my_strndup(db.str, dbname.length= db.length, MYF(MY_WME)))) + return TRUE; + if (!(name.str= my_strndup(n.str, name.length= n.length, MYF(MY_WME)))) + { + my_free((gptr) dbname.str, MYF(0)); + return TRUE; + } + return FALSE; +} + + +/* + Destructor + + SYNOPSIS + Event_queue_element_for_exec::~Event_queue_element_for_exec() +*/ + +Event_queue_element_for_exec::~Event_queue_element_for_exec() +{ + my_free((gptr) dbname.str, MYF(0)); + my_free((gptr) name.str, MYF(0)); +} + + /* Returns a new instance @@ -38,7 +81,7 @@ event_restore_security_context(THD *thd, Security_context *backup); RETURN VALUE Address or NULL in case of error - + NOTE Created on THD's mem_root */ @@ -58,7 +101,8 @@ Event_parse_data::new_instance(THD *thd) */ Event_parse_data::Event_parse_data() - :on_completion(ON_COMPLETION_DROP), status(ENABLED), + :on_completion(Event_basic::ON_COMPLETION_DROP), + status(Event_basic::ENABLED), do_not_create(FALSE), item_starts(NULL), item_ends(NULL), item_execute_at(NULL), starts_null(TRUE), ends_null(TRUE), execute_at_null(TRUE), item_expression(NULL), expression(0) @@ -66,9 +110,7 @@ Event_parse_data::Event_parse_data() DBUG_ENTER("Event_parse_data::Event_parse_data"); /* Actually in the parser STARTS is always set */ - set_zero_time(&starts, MYSQL_TIMESTAMP_DATETIME); - set_zero_time(&ends, MYSQL_TIMESTAMP_DATETIME); - set_zero_time(&execute_at, MYSQL_TIMESTAMP_DATETIME); + starts= ends= execute_at= 0; body.str= comment.str= NULL; body.length= comment.length= 0; @@ -127,13 +169,13 @@ Event_parse_data::init_body(THD *thd) (long) body_begin, (long) thd->lex->ptr)); body.length= thd->lex->ptr - body_begin; - const uchar *body_end= body_begin + body.length - 1; + const char *body_end= body_begin + body.length - 1; /* Trim nuls or close-comments ('*'+'/') or spaces at the end */ while (body_begin < body_end) { - if ((*body_end == '\0') || + if ((*body_end == '\0') || (my_isspace(thd->variables.character_set_client, *body_end))) { /* consume NULs and meaningless whitespace */ --body.length; @@ -145,7 +187,7 @@ Event_parse_data::init_body(THD *thd) consume closing comments This is arguably wrong, but it's the best we have until the parser is - changed to be smarter. FIXME PARSER + changed to be smarter. FIXME PARSER See also the sp_head code, where something like this is done also. @@ -181,6 +223,55 @@ Event_parse_data::init_body(THD *thd) /* + This function is called on CREATE EVENT or ALTER EVENT. When either + ENDS or AT is in the past, we are trying to create an event that + will never be executed. If it has ON COMPLETION NOT PRESERVE + (default), then it would normally be dropped already, so on CREATE + EVENT we give a warning, and do not create anyting. On ALTER EVENT + we give a error, and do not change the event. + + If the event has ON COMPLETION PRESERVE, then we see if the event is + created or altered to the ENABLED (default) state. If so, then we + give a warning, and change the state to DISABLED. + + Otherwise it is a valid event in ON COMPLETION PRESERVE DISABLE + state. +*/ + +void +Event_parse_data::check_if_in_the_past(THD *thd, my_time_t ltime_utc) +{ + if (ltime_utc >= (my_time_t) thd->query_start()) + return; + + if (on_completion == Event_basic::ON_COMPLETION_DROP) + { + switch (thd->lex->sql_command) { + case SQLCOM_CREATE_EVENT: + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_EVENT_CANNOT_CREATE_IN_THE_PAST, + ER(ER_EVENT_CANNOT_CREATE_IN_THE_PAST)); + break; + case SQLCOM_ALTER_EVENT: + my_error(ER_EVENT_CANNOT_ALTER_IN_THE_PAST, MYF(0)); + break; + default: + DBUG_ASSERT(0); + } + + do_not_create= TRUE; + } + else if (status == Event_basic::ENABLED) + { + status= Event_basic::DISABLED; + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_EVENT_EXEC_TIME_IN_THE_PAST, + ER(ER_EVENT_EXEC_TIME_IN_THE_PAST)); + } +} + + +/* Sets time for execution for one-time event. SYNOPSIS @@ -197,8 +288,7 @@ Event_parse_data::init_execute_at(THD *thd) { my_bool not_used; TIME ltime; - my_time_t t; - TIME time_tmp; + my_time_t ltime_utc; DBUG_ENTER("Event_parse_data::init_execute_at"); @@ -207,41 +297,26 @@ Event_parse_data::init_execute_at(THD *thd) if (item_execute_at->fix_fields(thd, &item_execute_at)) goto wrong_value; - + /* no starts and/or ends in case of execute_at */ DBUG_PRINT("info", ("starts_null && ends_null should be 1 is %d", (starts_null && ends_null))); DBUG_ASSERT(starts_null && ends_null); - /* let's check whether time is in the past */ - thd->variables.time_zone->gmt_sec_to_TIME(&time_tmp, - (my_time_t) thd->query_start()); - if ((not_used= item_execute_at->get_date(<ime, TIME_NO_ZERO_DATE))) goto wrong_value; - if (TIME_to_ulonglong_datetime(<ime) < - TIME_to_ulonglong_datetime(&time_tmp)) - { - my_error(ER_EVENT_EXEC_TIME_IN_THE_PAST, MYF(0)); - DBUG_RETURN(ER_WRONG_VALUE); - } - - /* - This may result in a 1970-01-01 date if ltime is > 2037-xx-xx. - CONVERT_TZ has similar problem. - mysql_priv.h currently lists - #define TIMESTAMP_MAX_YEAR 2038 (see TIME_to_timestamp()) - */ - my_tz_UTC->gmt_sec_to_TIME(<ime,t=TIME_to_timestamp(thd,<ime,¬_used)); - if (!t) + ltime_utc= TIME_to_timestamp(thd,<ime,¬_used); + if (!ltime_utc) { DBUG_PRINT("error", ("Execute AT after year 2037")); goto wrong_value; } + check_if_in_the_past(thd, ltime_utc); + execute_at_null= FALSE; - execute_at= ltime; + execute_at= ltime_utc; DBUG_RETURN(0); wrong_value: @@ -381,8 +456,8 @@ int Event_parse_data::init_starts(THD *thd) { my_bool not_used; - TIME ltime, time_tmp; - my_time_t t; + TIME ltime; + my_time_t ltime_utc; DBUG_ENTER("Event_parse_data::init_starts"); if (!item_starts) @@ -394,36 +469,15 @@ Event_parse_data::init_starts(THD *thd) if ((not_used= item_starts->get_date(<ime, TIME_NO_ZERO_DATE))) goto wrong_value; - /* - Let's check whether time is in the past. - Note: This call is not post year 2038 safe. That's because - thd->query_start() is of time_t, while gmt_sec_to_TIME() - wants my_time_t. In the case time_t is larger than my_time_t - an overflow might happen and events subsystem will not work as - expected. - */ - thd->variables.time_zone->gmt_sec_to_TIME(&time_tmp, - (my_time_t) thd->query_start()); - - DBUG_PRINT("info",("now: %ld starts: %ld", - (long) TIME_to_ulonglong_datetime(&time_tmp), - (long) TIME_to_ulonglong_datetime(<ime))); - if (TIME_to_ulonglong_datetime(<ime) < - TIME_to_ulonglong_datetime(&time_tmp)) + ltime_utc= TIME_to_timestamp(thd, <ime, ¬_used); + if (!ltime_utc) goto wrong_value; - /* - Again, after 2038 this code won't work. As - mysql_priv.h currently lists - #define TIMESTAMP_MAX_YEAR 2038 (see TIME_to_timestamp()) - */ - my_tz_UTC->gmt_sec_to_TIME(<ime,t=TIME_to_timestamp(thd, <ime, - ¬_used)); - if (!t) - goto wrong_value; + DBUG_PRINT("info",("now: %ld starts: %ld", + (long) thd->query_start(), (long) ltime_utc)); - starts= ltime; starts_null= FALSE; + starts= ltime_utc; DBUG_RETURN(0); wrong_value: @@ -455,9 +509,9 @@ wrong_value: int Event_parse_data::init_ends(THD *thd) { - TIME ltime, ltime_now; my_bool not_used; - my_time_t t; + TIME ltime; + my_time_t ltime_utc; DBUG_ENTER("Event_parse_data::init_ends"); if (!item_ends) @@ -470,34 +524,19 @@ Event_parse_data::init_ends(THD *thd) if ((not_used= item_ends->get_date(<ime, TIME_NO_ZERO_DATE))) goto error_bad_params; - /* - Again, after 2038 this code won't work. As - mysql_priv.h currently lists - #define TIMESTAMP_MAX_YEAR 2038 (see TIME_to_timestamp()) - */ - DBUG_PRINT("info", ("get the UTC time")); - my_tz_UTC->gmt_sec_to_TIME(<ime,t=TIME_to_timestamp(thd, <ime, - ¬_used)); - if (!t) + ltime_utc= TIME_to_timestamp(thd, <ime, ¬_used); + if (!ltime_utc) goto error_bad_params; /* Check whether ends is after starts */ DBUG_PRINT("info", ("ENDS after STARTS?")); - if (!starts_null && my_time_compare(&starts, <ime) != -1) + if (!starts_null && starts >= ltime_utc) goto error_bad_params; - /* - The parser forces starts to be provided but one day STARTS could be - set before NOW() and in this case the following check should be done. - Check whether ENDS is not in the past. - */ - DBUG_PRINT("info", ("ENDS after NOW?")); - my_tz_UTC->gmt_sec_to_TIME(<ime_now, thd->query_start()); - if (my_time_compare(<ime_now, <ime) == 1) - goto error_bad_params; + check_if_in_the_past(thd, ltime_utc); - ends= ltime; ends_null= FALSE; + ends= ltime_utc; DBUG_RETURN(0); error_bad_params: @@ -550,9 +589,9 @@ Event_parse_data::check_parse_data(THD *thd) init_name(thd, identifier); init_definer(thd); - ret= init_execute_at(thd) || init_interval(thd) || init_starts(thd) || init_ends(thd); + check_originator_id(thd); DBUG_RETURN(ret); } @@ -568,16 +607,18 @@ Event_parse_data::check_parse_data(THD *thd) void Event_parse_data::init_definer(THD *thd) { - int definer_user_len; - int definer_host_len; DBUG_ENTER("Event_parse_data::init_definer"); - DBUG_PRINT("info",("init definer_user thd->mem_root: 0x%lx " - "thd->sec_ctx->priv_user: 0x%lx", (long) thd->mem_root, - (long) thd->security_ctx->priv_user)); + DBUG_ASSERT(thd->lex->definer); + + const char *definer_user= thd->lex->definer->user.str; + const char *definer_host= thd->lex->definer->host.str; + int definer_user_len= thd->lex->definer->user.length; + int definer_host_len= thd->lex->definer->host.length; - definer_user_len= strlen(thd->security_ctx->priv_user); - definer_host_len= strlen(thd->security_ctx->priv_host); + DBUG_PRINT("info",("init definer_user thd->mem_root: 0x%lx " + "definer_user: 0x%lx", (long) thd->mem_root, + (long) definer_user)); /* + 1 for @ */ DBUG_PRINT("info",("init definer as whole")); @@ -585,12 +626,11 @@ Event_parse_data::init_definer(THD *thd) definer.str= thd->alloc(definer.length + 1); DBUG_PRINT("info",("copy the user")); - memcpy(definer.str, thd->security_ctx->priv_user, definer_user_len); + memcpy(definer.str, definer_user, definer_user_len); definer.str[definer_user_len]= '@'; DBUG_PRINT("info",("copy the host")); - memcpy(definer.str + definer_user_len + 1, thd->security_ctx->priv_host, - definer_host_len); + memcpy(definer.str + definer_user_len + 1, definer_host, definer_host_len); definer.str[definer.length]= '\0'; DBUG_PRINT("info",("definer [%s] initted", definer.str)); @@ -599,6 +639,31 @@ Event_parse_data::init_definer(THD *thd) /* + Set the originator id of the event to the server_id if executing on + the master or set to the server_id of the master if executing on + the slave. If executing on slave, also set status to SLAVESIDE_DISABLED. + + SYNOPSIS + Event_parse_data::check_originator_id() +*/ +void Event_parse_data::check_originator_id(THD *thd) +{ + /* Disable replicated events on slave. */ + if ((thd->system_thread == SYSTEM_THREAD_SLAVE_SQL) || + (thd->system_thread == SYSTEM_THREAD_SLAVE_IO)) + { + DBUG_PRINT("info", ("Invoked object status set to SLAVESIDE_DISABLED.")); + if ((status == Event_basic::ENABLED) || + (status == Event_basic::DISABLED)) + status = Event_basic::SLAVESIDE_DISABLED; + originator = thd->server_id; + } + else + originator = server_id; +} + + +/* Constructor SYNOPSIS @@ -612,6 +677,7 @@ Event_basic::Event_basic() init_alloc_root(&mem_root, 256, 512); dbname.str= name.str= NULL; dbname.length= name.length= 0; + time_zone= NULL; DBUG_VOID_RETURN; } @@ -662,7 +728,7 @@ Event_basic::load_string_fields(Field **fields, ...) ret= TRUE; break; } - field_value->length= strlen(field_value->str); + field_value->length= strlen(field_value->str); field_name= (enum enum_events_table_field) va_arg(args, int); } @@ -672,6 +738,16 @@ Event_basic::load_string_fields(Field **fields, ...) } +bool +Event_basic::load_time_zone(THD *thd, const LEX_STRING tz_name) +{ + String str(tz_name.str, &my_charset_latin1); + time_zone= my_tz_find(thd, &str); + + return (time_zone == NULL); +} + + /* Constructor @@ -686,10 +762,7 @@ Event_queue_element::Event_queue_element(): { DBUG_ENTER("Event_queue_element::Event_queue_element"); - set_zero_time(&starts, MYSQL_TIMESTAMP_DATETIME); - set_zero_time(&ends, MYSQL_TIMESTAMP_DATETIME); - set_zero_time(&execute_at, MYSQL_TIMESTAMP_DATETIME); - set_zero_time(&last_executed, MYSQL_TIMESTAMP_DATETIME); + starts= ends= execute_at= last_executed= 0; starts_null= ends_null= execute_at_null= TRUE; DBUG_VOID_RETURN; @@ -731,7 +804,7 @@ Event_timed::Event_timed(): */ Event_timed::~Event_timed() -{ +{ } @@ -743,7 +816,7 @@ Event_timed::~Event_timed() */ Event_job_data::Event_job_data() - :thd(NULL), sphead(NULL), sql_mode(0) + :sphead(NULL), sql_mode(0) { } @@ -789,7 +862,7 @@ Event_timed::init() Loads an event's body from a row from mysql.event SYNOPSIS - Event_job_data::load_from_row(MEM_ROOT *mem_root, TABLE *table) + Event_job_data::load_from_row(THD *thd, TABLE *table) RETURN VALUE 0 OK @@ -802,7 +875,7 @@ Event_timed::init() */ int -Event_job_data::load_from_row(TABLE *table) +Event_job_data::load_from_row(THD *thd, TABLE *table) { char *ptr; uint len; @@ -814,9 +887,12 @@ Event_job_data::load_from_row(TABLE *table) if (table->s->fields != ET_FIELD_COUNT) goto error; + LEX_STRING tz_name; load_string_fields(table->field, ET_FIELD_DB, &dbname, ET_FIELD_NAME, &name, ET_FIELD_BODY, &body, ET_FIELD_DEFINER, &definer, - ET_FIELD_COUNT); + ET_FIELD_TIME_ZONE, &tz_name, ET_FIELD_COUNT); + if (load_time_zone(thd, tz_name)) + goto error; ptr= strchr(definer.str, '@'); @@ -843,7 +919,7 @@ error: Loads an event from a row from mysql.event SYNOPSIS - Event_queue_element::load_from_row(MEM_ROOT *mem_root, TABLE *table) + Event_queue_element::load_from_row(THD *thd, TABLE *table) RETURN VALUE 0 OK @@ -856,10 +932,10 @@ error: */ int -Event_queue_element::load_from_row(TABLE *table) +Event_queue_element::load_from_row(THD *thd, TABLE *table) { char *ptr; - bool res1, res2; + TIME time; DBUG_ENTER("Event_queue_element::load_from_row"); @@ -869,29 +945,44 @@ Event_queue_element::load_from_row(TABLE *table) if (table->s->fields != ET_FIELD_COUNT) goto error; + LEX_STRING tz_name; load_string_fields(table->field, ET_FIELD_DB, &dbname, ET_FIELD_NAME, &name, - ET_FIELD_DEFINER, &definer, ET_FIELD_COUNT); + ET_FIELD_DEFINER, &definer, + ET_FIELD_TIME_ZONE, &tz_name, ET_FIELD_COUNT); + if (load_time_zone(thd, tz_name)) + goto error; starts_null= table->field[ET_FIELD_STARTS]->is_null(); - res1= table->field[ET_FIELD_STARTS]->get_date(&starts, TIME_NO_ZERO_DATE); + if (!starts_null) + { + table->field[ET_FIELD_STARTS]->get_date(&time, TIME_NO_ZERO_DATE); + starts= sec_since_epoch_TIME(&time); + } ends_null= table->field[ET_FIELD_ENDS]->is_null(); - res2= table->field[ET_FIELD_ENDS]->get_date(&ends, TIME_NO_ZERO_DATE); + if (!ends_null) + { + table->field[ET_FIELD_ENDS]->get_date(&time, TIME_NO_ZERO_DATE); + ends= sec_since_epoch_TIME(&time); + } if (!table->field[ET_FIELD_INTERVAL_EXPR]->is_null()) expression= table->field[ET_FIELD_INTERVAL_EXPR]->val_int(); else expression= 0; /* - If res1 and res2 are TRUE then both fields are empty. + If neigher STARTS and ENDS is set, then both fields are empty. Hence, if ET_FIELD_EXECUTE_AT is empty there is an error. */ execute_at_null= table->field[ET_FIELD_EXECUTE_AT]->is_null(); DBUG_ASSERT(!(starts_null && ends_null && !expression && execute_at_null)); - if (!expression && - table->field[ET_FIELD_EXECUTE_AT]->get_date(&execute_at, - TIME_NO_ZERO_DATE)) - goto error; + if (!expression && !execute_at_null) + { + if (table->field[ET_FIELD_EXECUTE_AT]->get_date(&time, + TIME_NO_ZERO_DATE)) + goto error; + execute_at= sec_since_epoch_TIME(&time); + } /* We load the interval type from disk as string and then map it to @@ -918,17 +1009,35 @@ Event_queue_element::load_from_row(TABLE *table) interval= (interval_type) i; } - table->field[ET_FIELD_LAST_EXECUTED]->get_date(&last_executed, - TIME_NO_ZERO_DATE); + if (!table->field[ET_FIELD_LAST_EXECUTED]->is_null()) + { + table->field[ET_FIELD_LAST_EXECUTED]->get_date(&time, + TIME_NO_ZERO_DATE); + last_executed= sec_since_epoch_TIME(&time); + } last_executed_changed= FALSE; - if ((ptr= get_field(&mem_root, table->field[ET_FIELD_STATUS])) == NullS) goto error; DBUG_PRINT("load_from_row", ("Event [%s] is [%s]", name.str, ptr)); - status= (ptr[0]=='E'? Event_queue_element::ENABLED: - Event_queue_element::DISABLED); + + /* Set event status (ENABLED | SLAVESIDE_DISABLED | DISABLED) */ + switch (ptr[0]) + { + case 'E' : + status = Event_queue_element::ENABLED; + break; + case 'S' : + status = Event_queue_element::SLAVESIDE_DISABLED; + break; + case 'D' : + status = Event_queue_element::DISABLED; + break; + } + if ((ptr= get_field(&mem_root, table->field[ET_FIELD_ORIGINATOR])) == NullS) + goto error; + originator = table->field[ET_FIELD_ORIGINATOR]->val_int(); /* ToDo : Andrey . Find a way not to allocate ptr on event_mem_root */ if ((ptr= get_field(&mem_root, @@ -948,7 +1057,7 @@ error: Loads an event from a row from mysql.event SYNOPSIS - Event_timed::load_from_row(MEM_ROOT *mem_root, TABLE *table) + Event_timed::load_from_row(THD *thd, TABLE *table) RETURN VALUE 0 OK @@ -961,14 +1070,14 @@ error: */ int -Event_timed::load_from_row(TABLE *table) +Event_timed::load_from_row(THD *thd, TABLE *table) { char *ptr; uint len; DBUG_ENTER("Event_timed::load_from_row"); - if (Event_queue_element::load_from_row(table)) + if (Event_queue_element::load_from_row(thd, table)) goto error; load_string_fields(table->field, ET_FIELD_BODY, &body, ET_FIELD_COUNT); @@ -1004,11 +1113,30 @@ error: /* - Computes the sum of a timestamp plus interval. Presumed is that at least one - previous execution has occured. + add_interval() adds a specified interval to time 'ltime' in time + zone 'time_zone', and returns the result converted to the number of + seconds since epoch (aka Unix time; in UTC time zone). Zero result + means an error. +*/ +static +my_time_t +add_interval(TIME *ltime, const Time_zone *time_zone, + interval_type scale, INTERVAL interval) +{ + if (date_add_interval(ltime, scale, interval)) + return 0; + + my_bool not_used; + return time_zone->TIME_to_gmt_sec(ltime, ¬_used); +} + + +/* + Computes the sum of a timestamp plus interval. SYNOPSIS - get_next_time(TIME *start, int interval_value, interval_type interval) + get_next_time() + time_zone event time zone next the sum start add interval_value to this time time_now current time @@ -1025,26 +1153,19 @@ error: seconds as resolution for computation. 2) In all other cases - MONTH, QUARTER, YEAR we use MONTH as resolution and PERIOD_DIFF()'s implementation - 3) We get the difference between time_now and `start`, then divide it - by the months, respectively seconds and round up. Then we multiply - monts/seconds by the rounded value and add it to `start` -> we get - the next execution time. */ static -bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec, +bool get_next_time(const Time_zone *time_zone, my_time_t *next, + my_time_t start, my_time_t time_now, int i_value, interval_type i_type) { - bool ret; - INTERVAL interval; - TIME tmp; - longlong months=0, seconds=0; DBUG_ENTER("get_next_time"); - DBUG_PRINT("enter", ("start: %lu now: %lu", - (long) TIME_to_ulonglong_datetime(start), - (long) TIME_to_ulonglong_datetime(time_now))); + DBUG_PRINT("enter", ("start: %lu now: %lu", (long) start, (long) time_now)); - bzero(&interval, sizeof(interval)); + DBUG_ASSERT(start <= time_now); + + longlong months=0, seconds=0; switch (i_type) { case INTERVAL_YEAR: @@ -1091,84 +1212,151 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec, DBUG_ASSERT(0); } DBUG_PRINT("info", ("seconds: %ld months: %ld", (long) seconds, (long) months)); + + TIME local_start; + TIME local_now; + + /* Convert times from UTC to local. */ + { + time_zone->gmt_sec_to_TIME(&local_start, start); + time_zone->gmt_sec_to_TIME(&local_now, time_now); + } + + INTERVAL interval; + bzero(&interval, sizeof(interval)); + my_time_t next_time= 0; + if (seconds) { longlong seconds_diff; long microsec_diff; + bool negative= calc_time_diff(&local_now, &local_start, 1, + &seconds_diff, µsec_diff); + if (!negative) + { + /* + The formula below returns the interval that, when added to + local_start, will always give the time in the future. + */ + interval.second= seconds_diff - seconds_diff % seconds + seconds; + next_time= add_interval(&local_start, time_zone, + INTERVAL_SECOND, interval); + if (next_time == 0) + goto done; + } - if (calc_time_diff(time_now, start, 1, &seconds_diff, µsec_diff)) + if (next_time <= time_now) { - DBUG_PRINT("error", ("negative difference")); - DBUG_ASSERT(0); + /* + If 'negative' is true above, then 'next_time == 0', and + 'next_time <= time_now' is also true. If negative is false, + then next_time was set, but perhaps to the value that is less + then time_now. See below for elaboration. + */ + DBUG_ASSERT(negative || next_time > 0); + + /* + If local_now < local_start, i.e. STARTS time is in the future + according to the local time (it always in the past according + to UTC---this is a prerequisite of this function), then + STARTS is almost always in the past according to the local + time too. However, in the time zone that has backward + Daylight Saving Time shift, the following may happen: suppose + we have a backward DST shift at certain date after 2:59:59, + i.e. local time goes 1:59:59, 2:00:00, ... , 2:59:59, (shift + here) 2:00:00 (again), ... , 2:59:59 (again), 3:00:00, ... . + Now suppose the time has passed the first 2:59:59, has been + shifted backward, and now is (the second) 2:20:00. The user + does CREATE EVENT with STARTS 'current-date 2:40:00'. Local + time 2:40:00 from create statement is treated by time + functions as the first such time, so according to UTC it comes + before the second 2:20:00. But according to local time it is + obviously in the future, so we end up in this branch. + + Since we are in the second pass through 2:00:00--2:59:59, and + any local time form this interval is treated by system + functions as the time from the first pass, we have to find the + time for the next execution that is past the DST-affected + interval (past the second 2:59:59 for our example, + i.e. starting from 3:00:00). We do this in the loop until the + local time is mapped onto future UTC time. 'start' time is in + the past, so we may use 'do { } while' here, and add the first + interval right away. + + Alternatively, it could be that local_now >= local_start. Now + for the example above imagine we do CREATE EVENT with STARTS + 'current-date 2:10:00'. Local start 2:10 is in the past (now + is local 2:20), so we add an interval, and get next execution + time, say, 2:40. It is in the future according to local time, + but, again, since we are in the second pass through + 2:00:00--2:59:59, 2:40 will be converted into UTC time in the + past. So we will end up in this branch again, and may add + intervals in a 'do { } while' loop. + + Note that for any given event we may end up here only if event + next execution time will map to the time interval that is + passed twice, and only if the server was started during the + second pass, or the event is being created during the second + pass. After that, we never will get here (unless we again + start the server during the second pass). In other words, + such a condition is extremely rare. + */ + interval.second= seconds; + do + { + next_time= add_interval(&local_start, time_zone, + INTERVAL_SECOND, interval); + if (next_time == 0) + goto done; + } + while (next_time <= time_now); } - uint multiplier= (uint) (seconds_diff / seconds); - /* - Increase the multiplier is the modulus is not zero to make round up. - Or if time_now==start then we should not execute the same - event two times for the same time - get the next exec if the modulus is not - */ - DBUG_PRINT("info", ("multiplier: %d", multiplier)); - if (seconds_diff % seconds || (!seconds_diff && last_exec->year) || - TIME_to_ulonglong_datetime(time_now) == - TIME_to_ulonglong_datetime(last_exec)) - ++multiplier; - interval.second= seconds * multiplier; - DBUG_PRINT("info", ("multiplier: %lu interval.second: %lu", (ulong) multiplier, - (ulong) interval.second)); - tmp= *start; - if (!(ret= date_add_interval(&tmp, INTERVAL_SECOND, interval))) - *next= tmp; } else { - /* PRESUMED is that at least one execution took already place */ - int diff_months= (time_now->year - start->year)*12 + - (time_now->month - start->month); + long diff_months= (long) (local_now.year - local_start.year)*12 + + (local_now.month - local_start.month); /* - Note: If diff_months is 0 that means we are in the same month as the - last execution which is also the first execution. + Unlike for seconds above, the formula below returns the interval + that, when added to the local_start, will give the time in the + past, or somewhere in the current month. We are interested in + the latter case, to see if this time has already passed, or is + yet to come this month. + + Note that the time is guaranteed to be in the past unless + (diff_months % months == 0), but no good optimization is + possible here, because (diff_months % months == 0) is what will + happen most of the time, as get_next_time() will be called right + after the execution of the event. We could pass last_executed + time to this function, and see if the execution has already + happened this month, but for that we will have to convert + last_executed from seconds since epoch to local broken-down + time, and this will greatly reduce the effect of the + optimization. So instead we keep the code simple and clean. */ - /* - First we try with the smaller if not then + 1, because if we try with - directly with +1 we will be after the current date but it could be that - we will be 1 month ahead, so 2 steps are necessary. - */ - interval.month= (ulong) ((diff_months / months)*months); - /* - Check if the same month as last_exec (always set - prerequisite) - An event happens at most once per month so there is no way to - schedule it two times for the current month. This saves us from two - calls to date_add_interval() if the event was just executed. But if - the scheduler is started and there was at least 1 scheduled date - skipped this one does not help and two calls to date_add_interval() - will be done, which is a bit more expensive but compared to the - rareness of the case is neglectable. - */ - if (time_now->year == last_exec->year && - time_now->month == last_exec->month) - interval.month+= (ulong) months; - - tmp= *start; - if ((ret= date_add_interval(&tmp, INTERVAL_MONTH, interval))) + interval.month= (ulong) (diff_months - diff_months % months); + next_time= add_interval(&local_start, time_zone, + INTERVAL_MONTH, interval); + if (next_time == 0) goto done; - /* If `tmp` is still before time_now just add one more time the interval */ - if (my_time_compare(&tmp, time_now) == -1) - { - interval.month+= (ulong) months; - tmp= *start; - if ((ret= date_add_interval(&tmp, INTERVAL_MONTH, interval))) + if (next_time <= time_now) + { + interval.month= (ulong) months; + next_time= add_interval(&local_start, time_zone, + INTERVAL_MONTH, interval); + if (next_time == 0) goto done; } - *next= tmp; - /* assert on that the next is after now */ - DBUG_ASSERT(1==my_time_compare(next, time_now)); } + DBUG_ASSERT(time_now < next_time); + + *next= next_time; + done: - DBUG_PRINT("info", ("next: %lu", (long) TIME_to_ulonglong_datetime(next))); - DBUG_RETURN(ret); + DBUG_PRINT("info", ("next_time: %ld", (long) next_time)); + DBUG_RETURN(next_time == 0); } @@ -1183,23 +1371,20 @@ done: TRUE Error NOTES - The time is set in execute_at, if no more executions the latter is set to - 0000-00-00. + The time is set in execute_at, if no more executions the latter is + set to 0. */ bool Event_queue_element::compute_next_execution_time() { - TIME time_now; - int tmp; + my_time_t time_now; DBUG_ENTER("Event_queue_element::compute_next_execution_time"); DBUG_PRINT("enter", ("starts: %lu ends: %lu last_executed: %lu this: 0x%lx", - (long) TIME_to_ulonglong_datetime(&starts), - (long) TIME_to_ulonglong_datetime(&ends), - (long) TIME_to_ulonglong_datetime(&last_executed), + (long) starts, (long) ends, (long) last_executed, (long) this)); - if (status == Event_queue_element::DISABLED) + if (status != Event_queue_element::ENABLED) { DBUG_PRINT("compute_next_execution_time", ("Event %s is DISABLED", name.str)); @@ -1209,7 +1394,7 @@ Event_queue_element::compute_next_execution_time() if (!expression) { /* Let's check whether it was executed */ - if (last_executed.year) + if (last_executed) { DBUG_PRINT("info",("One-time event %s.%s of was already executed", dbname.str, name.str)); @@ -1222,17 +1407,16 @@ Event_queue_element::compute_next_execution_time() goto ret; } - my_tz_UTC->gmt_sec_to_TIME(&time_now, current_thd->query_start()); + time_now= (my_time_t) current_thd->query_start(); - DBUG_PRINT("info",("NOW: [%lu]", - (ulong) TIME_to_ulonglong_datetime(&time_now))); + DBUG_PRINT("info",("NOW: [%lu]", (ulong) time_now)); /* if time_now is after ends don't execute anymore */ - if (!ends_null && (tmp= my_time_compare(&ends, &time_now)) == -1) + if (!ends_null && ends < time_now) { DBUG_PRINT("info", ("NOW after ENDS, don't execute anymore")); /* time_now is after ends. don't execute anymore */ - set_zero_time(&execute_at, MYSQL_TIMESTAMP_DATETIME); + execute_at= 0; execute_at_null= TRUE; if (on_completion == Event_queue_element::ON_COMPLETION_DROP) dropped= TRUE; @@ -1248,12 +1432,11 @@ Event_queue_element::compute_next_execution_time() Let's check whether time_now is before starts. If so schedule for starts. */ - if (!starts_null && (tmp= my_time_compare(&time_now, &starts)) < 1) + if (!starts_null && time_now <= starts) { - if (tmp == 0 && my_time_compare(&starts, &last_executed) == 0) + if (time_now == starts && starts == last_executed) { /* - time_now = starts = last_executed do nothing or we will schedule for second time execution at starts. */ } @@ -1279,26 +1462,25 @@ Event_queue_element::compute_next_execution_time() If not set then schedule for now. */ DBUG_PRINT("info", ("Both STARTS & ENDS are set")); - if (!last_executed.year) + if (!last_executed) { DBUG_PRINT("info", ("Not executed so far.")); } { - TIME next_exec; + my_time_t next_exec; - if (get_next_time(&next_exec, &starts, &time_now, - last_executed.year? &last_executed:&starts, + if (get_next_time(time_zone, &next_exec, starts, time_now, (int) expression, interval)) goto err; /* There was previous execution */ - if (my_time_compare(&ends, &next_exec) == -1) + if (ends < next_exec) { DBUG_PRINT("info", ("Next execution of %s after ENDS. Stop executing.", name.str)); /* Next execution after ends. No more executions */ - set_zero_time(&execute_at, MYSQL_TIMESTAMP_DATETIME); + execute_at= 0; execute_at_null= TRUE; if (on_completion == Event_queue_element::ON_COMPLETION_DROP) dropped= TRUE; @@ -1307,8 +1489,7 @@ Event_queue_element::compute_next_execution_time() } else { - DBUG_PRINT("info",("Next[%lu]", - (ulong) TIME_to_ulonglong_datetime(&next_exec))); + DBUG_PRINT("info",("Next[%lu]", (ulong) next_exec)); execute_at= next_exec; execute_at_null= FALSE; } @@ -1323,15 +1504,14 @@ Event_queue_element::compute_next_execution_time() Both starts and m_ends are not set, so we schedule for the next based on last_executed. */ - if (last_executed.year) + if (last_executed) { - TIME next_exec; - if (get_next_time(&next_exec, &starts, &time_now, &last_executed, + my_time_t next_exec; + if (get_next_time(time_zone, &next_exec, starts, time_now, (int) expression, interval)) goto err; execute_at= next_exec; - DBUG_PRINT("info",("Next[%lu]", - (ulong) TIME_to_ulonglong_datetime(&next_exec))); + DBUG_PRINT("info",("Next[%lu]", (ulong) next_exec)); } else { @@ -1353,20 +1533,18 @@ Event_queue_element::compute_next_execution_time() Hence schedule for starts + m_expression in case last_executed is not set, otherwise to last_executed + m_expression */ - if (!last_executed.year) + if (!last_executed) { DBUG_PRINT("info", ("Not executed so far.")); } { - TIME next_exec; - if (get_next_time(&next_exec, &starts, &time_now, - last_executed.year? &last_executed:&starts, + my_time_t next_exec; + if (get_next_time(time_zone, &next_exec, starts, time_now, (int) expression, interval)) goto err; execute_at= next_exec; - DBUG_PRINT("info",("Next[%lu]", - (ulong) TIME_to_ulonglong_datetime(&next_exec))); + DBUG_PRINT("info",("Next[%lu]", (ulong) next_exec)); } execute_at_null= FALSE; } @@ -1381,20 +1559,20 @@ Event_queue_element::compute_next_execution_time() If last_executed is not set then schedule for now */ - if (!last_executed.year) + if (!last_executed) execute_at= time_now; else { - TIME next_exec; + my_time_t next_exec; - if (get_next_time(&next_exec, &starts, &time_now, &last_executed, + if (get_next_time(time_zone, &next_exec, starts, time_now, (int) expression, interval)) goto err; - if (my_time_compare(&ends, &next_exec) == -1) + if (ends < next_exec) { DBUG_PRINT("info", ("Next execution after ENDS. Stop executing.")); - set_zero_time(&execute_at, MYSQL_TIMESTAMP_DATETIME); + execute_at= 0; execute_at_null= TRUE; status= Event_queue_element::DISABLED; status_changed= TRUE; @@ -1403,8 +1581,7 @@ Event_queue_element::compute_next_execution_time() } else { - DBUG_PRINT("info", ("Next[%lu]", - (ulong) TIME_to_ulonglong_datetime(&next_exec))); + DBUG_PRINT("info", ("Next[%lu]", (ulong) next_exec)); execute_at= next_exec; execute_at_null= FALSE; } @@ -1413,8 +1590,7 @@ Event_queue_element::compute_next_execution_time() goto ret; } ret: - DBUG_PRINT("info", ("ret: 0 execute_at: %lu", - (long) TIME_to_ulonglong_datetime(&execute_at))); + DBUG_PRINT("info", ("ret: 0 execute_at: %lu", (long) execute_at)); DBUG_RETURN(FALSE); err: DBUG_PRINT("info", ("ret=1")); @@ -1434,41 +1610,12 @@ err: void Event_queue_element::mark_last_executed(THD *thd) { - TIME time_now; - thd->end_time(); - my_tz_UTC->gmt_sec_to_TIME(&time_now, (my_time_t) thd->query_start()); - last_executed= time_now; /* was execute_at */ + last_executed= (my_time_t) thd->query_start(); last_executed_changed= TRUE; - - execution_count++; -} - - -/* - Drops the event - - SYNOPSIS - Event_queue_element::drop() - thd thread context - - RETURN VALUE - 0 OK - -1 Cannot open mysql.event - -2 Cannot find the event in mysql.event (already deleted?) - - others return code from SE in case deletion of the event row - failed. -*/ - -int -Event_queue_element::drop(THD *thd) -{ - DBUG_ENTER("Event_queue_element::drop"); - DBUG_RETURN(Events::get_instance()-> - drop_event(thd, dbname, name, FALSE, TRUE)); + execution_count++; } @@ -1503,13 +1650,13 @@ Event_queue_element::update_timing_fields(THD *thd) thd->reset_n_backup_open_tables_state(&backup); - if (Events::get_instance()->open_event_table(thd, TL_WRITE, &table)) + if (events_event_db_repository.open_event_table(thd, TL_WRITE, &table)) { ret= TRUE; goto done; } fields= table->field; - if ((ret= Events::get_instance()->db_repository-> + if ((ret= events_event_db_repository. find_named_event(thd, dbname, name, table))) goto done; @@ -1519,8 +1666,11 @@ Event_queue_element::update_timing_fields(THD *thd) if (last_executed_changed) { + TIME time; + my_tz_UTC->gmt_sec_to_TIME(&time, last_executed); + fields[ET_FIELD_LAST_EXECUTED]->set_notnull(); - fields[ET_FIELD_LAST_EXECUTED]->store_time(&last_executed, + fields[ET_FIELD_LAST_EXECUTED]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); last_executed_changed= FALSE; } @@ -1531,6 +1681,13 @@ Event_queue_element::update_timing_fields(THD *thd) status_changed= FALSE; } + /* + Turn off row binlogging of event timing updates. These are not used + for RBR of events replicated to the slave. + */ + if (thd->current_stmt_binlog_row_based) + thd->clear_current_stmt_binlog_row_based(); + if ((table->file->ha_update_row(table->record[1], table->record[0]))) ret= TRUE; @@ -1542,6 +1699,26 @@ done: } +static +void +append_datetime(String *buf, Time_zone *time_zone, my_time_t secs, + const char *name, uint len) +{ + char dtime_buff[20*2+32];/* +32 to make my_snprintf_{8bit|ucs2} happy */ + buf->append(STRING_WITH_LEN(" ")); + buf->append(name, len); + buf->append(STRING_WITH_LEN(" '")); + /* + Pass the buffer and the second param tells fills the buffer and + returns the number of chars to copy. + */ + TIME time; + time_zone->gmt_sec_to_TIME(&time, secs); + buf->append(dtime_buff, my_datetime_to_str(&time, dtime_buff)); + buf->append(STRING_WITH_LEN("'")); +} + + /* Get SHOW CREATE EVENT as string @@ -1581,17 +1758,17 @@ Event_timed::get_create_event(THD *thd, String *buf) buf->append(' '); LEX_STRING *ival= &interval_type_to_name[interval]; buf->append(ival->str, ival->length); + + if (!starts_null) + append_datetime(buf, time_zone, starts, STRING_WITH_LEN("STARTS")); + + if (!ends_null) + append_datetime(buf, time_zone, ends, STRING_WITH_LEN("ENDS")); } else { - char dtime_buff[20*2+32];/* +32 to make my_snprintf_{8bit|ucs2} happy */ - buf->append(STRING_WITH_LEN(" ON SCHEDULE AT '")); - /* - Pass the buffer and the second param tells fills the buffer and - returns the number of chars to copy. - */ - buf->append(dtime_buff, my_datetime_to_str(&execute_at, dtime_buff)); - buf->append(STRING_WITH_LEN("'")); + append_datetime(buf, time_zone, execute_at, + STRING_WITH_LEN("ON SCHEDULE AT")); } if (on_completion == Event_timed::ON_COMPLETION_DROP) @@ -1601,6 +1778,8 @@ Event_timed::get_create_event(THD *thd, String *buf) if (status == Event_timed::ENABLED) buf->append(STRING_WITH_LEN("ENABLE")); + else if (status == Event_timed::SLAVESIDE_DISABLED) + buf->append(STRING_WITH_LEN("SLAVESIDE_DISABLE")); else buf->append(STRING_WITH_LEN("DISABLE")); @@ -1632,9 +1811,10 @@ Event_timed::get_create_event(THD *thd, String *buf) */ int -Event_job_data::get_fake_create_event(THD *thd, String *buf) +Event_job_data::get_fake_create_event(String *buf) { DBUG_ENTER("Event_job_data::get_create_event"); + /* FIXME: "EVERY 3337 HOUR" is asking for trouble. */ buf->append(STRING_WITH_LEN("CREATE EVENT anonymous ON SCHEDULE " "EVERY 3337 HOUR DO ")); buf->append(body.str, body.length); @@ -1686,6 +1866,9 @@ Event_job_data::execute(THD *thd) sphead->m_flags|= sp_head::LOG_SLOW_STATEMENTS; sphead->m_flags|= sp_head::LOG_GENERAL_LOG; + /* Execute the event in its time zone. */ + thd->variables.time_zone= time_zone; + ret= sphead->execute_procedure(thd, &empty_item_list); } else @@ -1719,7 +1902,7 @@ done: RETURN VALUE 0 success EVEX_COMPILE_ERROR error during compilation - EVEX_MICROSECOND_UNSUP mysql.event was tampered + EVEX_MICROSECOND_UNSUP mysql.event was tampered */ int @@ -1744,7 +1927,7 @@ Event_job_data::compile(THD *thd, MEM_ROOT *mem_root) show_create.length(0); - switch (get_fake_create_event(thd, &show_create)) { + switch (get_fake_create_event(&show_create)) { case EVEX_MICROSECOND_UNSUP: DBUG_RETURN(EVEX_MICROSECOND_UNSUP); case 0: @@ -1786,22 +1969,18 @@ Event_job_data::compile(THD *thd, MEM_ROOT *mem_root) event_change_security_context(thd, definer_user, definer_host, dbname, &save_ctx); thd->lex= &lex; - mysql_init_query(thd, (uchar*) thd->query, thd->query_length); + mysql_init_query(thd, thd->query, thd->query_length); if (MYSQLparse((void *)thd) || thd->is_fatal_error) { DBUG_PRINT("error", ("error during compile or thd->is_fatal_error: %d", thd->is_fatal_error)); - /* - Free lex associated resources - QQ: Do we really need all this stuff here? - */ - sql_print_error("SCHEDULER: Error during compilation of %s.%s or " - "thd->is_fatal_error: %d", - dbname.str, name.str, thd->is_fatal_error); - lex.unit.cleanup(); - delete lex.sphead; - sphead= lex.sphead= NULL; + + sql_print_error("Event Scheduler: " + "%serror during compilation of %s.%s", + thd->is_fatal_error ? "fatal " : "", + dbname.str, name.str); + ret= EVEX_COMPILE_ERROR; goto done; } @@ -1914,7 +2093,7 @@ event_change_security_context(THD *thd, LEX_STRING user, LEX_STRING host, thd->security_ctx= &thd->main_security_ctx; #endif DBUG_RETURN(FALSE); -} +} /* diff --git a/sql/event_data_objects.h b/sql/event_data_objects.h index e00b0b94eaf..c0ae389f967 100644 --- a/sql/event_data_objects.h +++ b/sql/event_data_objects.h @@ -22,10 +22,29 @@ #define EVEX_BAD_PARAMS -5 #define EVEX_MICROSECOND_UNSUP -6 - class sp_head; class Sql_alloc; +class Event_queue_element_for_exec +{ +public: + Event_queue_element_for_exec(){}; + ~Event_queue_element_for_exec(); + + bool + init(LEX_STRING dbname, LEX_STRING name); + + LEX_STRING dbname; + LEX_STRING name; + bool dropped; + THD *thd; + +private: + /* Prevent use of these */ + Event_queue_element_for_exec(const Event_queue_element_for_exec &); + void operator=(Event_queue_element_for_exec &); +}; + class Event_basic { @@ -33,19 +52,42 @@ protected: MEM_ROOT mem_root; public: + /* + ENABLED = feature can function normally (is turned on) + SLAVESIDE_DISABLED = feature is turned off on slave + DISABLED = feature is turned off + */ + enum enum_status + { + ENABLED = 1, + DISABLED, + SLAVESIDE_DISABLED + }; + + enum enum_on_completion + { + ON_COMPLETION_DROP = 1, + ON_COMPLETION_PRESERVE + }; + LEX_STRING dbname; LEX_STRING name; LEX_STRING definer;// combination of user and host + Time_zone *time_zone; + Event_basic(); virtual ~Event_basic(); virtual int - load_from_row(TABLE *table) = 0; + load_from_row(THD *thd, TABLE *table) = 0; protected: bool load_string_fields(Field **fields, ...); + + bool + load_time_zone(THD *thd, const LEX_STRING tz_name); }; @@ -57,25 +99,14 @@ protected: bool last_executed_changed; public: - enum enum_status - { - ENABLED = 1, - DISABLED - }; - - enum enum_on_completion - { - ON_COMPLETION_DROP = 1, - ON_COMPLETION_PRESERVE - }; - - enum enum_on_completion on_completion; - enum enum_status status; - TIME last_executed; - - TIME execute_at; - TIME starts; - TIME ends; + int on_completion; + int status; + longlong originator; + + my_time_t last_executed; + my_time_t execute_at; + my_time_t starts; + my_time_t ends; my_bool starts_null; my_bool ends_null; my_bool execute_at_null; @@ -91,37 +122,16 @@ public: virtual ~Event_queue_element(); virtual int - load_from_row(TABLE *table); + load_from_row(THD *thd, TABLE *table); bool compute_next_execution_time(); - int - drop(THD *thd); - void mark_last_executed(THD *thd); bool update_timing_fields(THD *thd); - - static void *operator new(size_t size) - { - void *p; - DBUG_ENTER("Event_queue_element::new(size)"); - p= my_malloc(size, MYF(0)); - DBUG_PRINT("info", ("alloc_ptr: 0x%lx", (long) p)); - DBUG_RETURN(p); - } - - static void operator delete(void *ptr, size_t size) - { - DBUG_ENTER("Event_queue_element::delete(ptr,size)"); - DBUG_PRINT("enter", ("free_ptr: 0x%lx", (long) ptr)); - TRASH(ptr, size); - my_free((gptr) ptr, MYF(0)); - DBUG_VOID_RETURN; - } }; @@ -150,7 +160,7 @@ public: init(); virtual int - load_from_row(TABLE *table); + load_from_row(THD *thd, TABLE *table); int get_create_event(THD *thd, String *buf); @@ -160,7 +170,6 @@ public: class Event_job_data : public Event_basic { public: - THD *thd; sp_head *sphead; LEX_STRING body; @@ -175,7 +184,7 @@ public: virtual ~Event_job_data(); virtual int - load_from_row(TABLE *table); + load_from_row(THD *thd, TABLE *table); int execute(THD *thd); @@ -184,7 +193,7 @@ public: compile(THD *thd, MEM_ROOT *mem_root); private: int - get_fake_create_event(THD *thd, String *buf); + get_fake_create_event(String *buf); Event_job_data(const Event_job_data &); /* Prevent use of these */ void operator=(Event_job_data &); @@ -194,21 +203,17 @@ private: class Event_parse_data : public Sql_alloc { public: - enum enum_status - { - ENABLED = 1, - DISABLED - }; - enum enum_on_completion - { - ON_COMPLETION_DROP = 1, - ON_COMPLETION_PRESERVE - }; - enum enum_on_completion on_completion; - enum enum_status status; + int on_completion; + int status; + longlong originator; + /* + do_not_create will be set if STARTS time is in the past and + on_completion == ON_COMPLETION_DROP. + */ + bool do_not_create; - const uchar *body_begin; + const char *body_begin; LEX_STRING dbname; LEX_STRING name; @@ -220,9 +225,9 @@ public: Item* item_ends; Item* item_execute_at; - TIME starts; - TIME ends; - TIME execute_at; + my_time_t starts; + my_time_t ends; + my_time_t execute_at; my_bool starts_null; my_bool ends_null; my_bool execute_at_null; @@ -267,7 +272,11 @@ private: void report_bad_value(const char *item_name, Item *bad_item); + void + check_if_in_the_past(THD *thd, my_time_t ltime_utc); + Event_parse_data(const Event_parse_data &); /* Prevent use of these */ + void check_originator_id(THD *thd); void operator=(Event_parse_data &); }; diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index 940930ec4c6..b6c9e4ea8e3 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -94,7 +94,7 @@ const TABLE_FIELD_W_TYPE event_table_fields[ET_FIELD_COUNT] = }, { { C_STRING_WITH_LEN("status") }, - { C_STRING_WITH_LEN("enum('ENABLED','DISABLED')") }, + { C_STRING_WITH_LEN("enum('ENABLED','DISABLED','SLAVESIDE_DISABLED')") }, {NULL, 0} }, { @@ -118,6 +118,16 @@ const TABLE_FIELD_W_TYPE event_table_fields[ET_FIELD_COUNT] = { C_STRING_WITH_LEN("comment") }, { C_STRING_WITH_LEN("char(64)") }, { C_STRING_WITH_LEN("utf8") } + }, + { + { C_STRING_WITH_LEN("originator") }, + { C_STRING_WITH_LEN("int(10)") }, + {NULL, 0} + }, + { + { C_STRING_WITH_LEN("time_zone") }, + { C_STRING_WITH_LEN("char(64)") }, + { C_STRING_WITH_LEN("latin1") } } }; @@ -137,7 +147,7 @@ const TABLE_FIELD_W_TYPE event_table_fields[ET_FIELD_COUNT] = EVEX_GENERAL_ERROR Bad data EVEX_GET_FIELD_FAILED Field count does not match. table corrupted? - DESCRIPTION + DESCRIPTION Used both when an event is created and when it is altered. */ @@ -170,10 +180,13 @@ mysql_event_fill_row(THD *thd, TABLE *table, Event_parse_data *et, fields[ET_FIELD_STATUS]->store((longlong)et->status, TRUE); + fields[ET_FIELD_ORIGINATOR]->store((longlong)et->originator, TRUE); + + /* Change the SQL_MODE only if body was present in an ALTER EVENT and of course always during CREATE EVENT. - */ + */ if (et->body.str) { fields[ET_FIELD_SQL_MODE]->store((longlong)thd->variables.sql_mode, TRUE); @@ -183,6 +196,14 @@ mysql_event_fill_row(THD *thd, TABLE *table, Event_parse_data *et, if (et->expression) { + const String *tz_name= thd->variables.time_zone->get_name(); + if (!is_update || !et->starts_null) + { + fields[ET_FIELD_TIME_ZONE]->set_notnull(); + fields[ET_FIELD_TIME_ZONE]->store(tz_name->ptr(), tz_name->length(), + tz_name->charset()); + } + fields[ET_FIELD_INTERVAL_EXPR]->set_notnull(); fields[ET_FIELD_INTERVAL_EXPR]->store((longlong)et->expression, TRUE); @@ -197,26 +218,40 @@ mysql_event_fill_row(THD *thd, TABLE *table, Event_parse_data *et, if (!et->starts_null) { + TIME time; + my_tz_UTC->gmt_sec_to_TIME(&time, et->starts); + fields[ET_FIELD_STARTS]->set_notnull(); - fields[ET_FIELD_STARTS]->store_time(&et->starts, MYSQL_TIMESTAMP_DATETIME); + fields[ET_FIELD_STARTS]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); } if (!et->ends_null) { + TIME time; + my_tz_UTC->gmt_sec_to_TIME(&time, et->ends); + fields[ET_FIELD_ENDS]->set_notnull(); - fields[ET_FIELD_ENDS]->store_time(&et->ends, MYSQL_TIMESTAMP_DATETIME); + fields[ET_FIELD_ENDS]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); } } - else if (et->execute_at.year) + else if (et->execute_at) { + const String *tz_name= thd->variables.time_zone->get_name(); + fields[ET_FIELD_TIME_ZONE]->set_notnull(); + fields[ET_FIELD_TIME_ZONE]->store(tz_name->ptr(), tz_name->length(), + tz_name->charset()); + fields[ET_FIELD_INTERVAL_EXPR]->set_null(); fields[ET_FIELD_TRANSIENT_INTERVAL]->set_null(); fields[ET_FIELD_STARTS]->set_null(); fields[ET_FIELD_ENDS]->set_null(); - + + TIME time; + my_tz_UTC->gmt_sec_to_TIME(&time, et->execute_at); + fields[ET_FIELD_EXECUTE_AT]->set_notnull(); fields[ET_FIELD_EXECUTE_AT]-> - store_time(&et->execute_at, MYSQL_TIMESTAMP_DATETIME); + store_time(&time, MYSQL_TIMESTAMP_DATETIME); } else { @@ -226,7 +261,7 @@ mysql_event_fill_row(THD *thd, TABLE *table, Event_parse_data *et, this is an error if the action is create. something is borked */ } - + ((Field_timestamp *)fields[ET_FIELD_MODIFIED])->set_time(); if (et->comment.str) @@ -288,7 +323,7 @@ Event_db_repository::index_read_for_db_for_i_s(THD *thd, TABLE *schema_table, { key_copy(key_buf, event_table->record[0], key_info, key_len); if (!(ret= event_table->file->index_read(event_table->record[0], key_buf, - key_len, HA_READ_PREFIX))) + (key_part_map)1, HA_READ_PREFIX))) { DBUG_PRINT("info",("Found rows. Let's retrieve them. ret=%d", ret)); do @@ -296,12 +331,12 @@ Event_db_repository::index_read_for_db_for_i_s(THD *thd, TABLE *schema_table, ret= copy_event_to_schema_table(thd, schema_table, event_table); if (ret == 0) ret= event_table->file->index_next_same(event_table->record[0], - key_buf, key_len); + key_buf, key_len); } while (ret == 0); } DBUG_PRINT("info", ("Scan finished. ret=%d", ret)); } - event_table->file->ha_index_end(); + event_table->file->ha_index_end(); /* ret is guaranteed to be != 0 */ if (ret == HA_ERR_END_OF_FILE || ret == HA_ERR_KEY_NOT_FOUND) DBUG_RETURN(FALSE); @@ -462,7 +497,7 @@ Event_db_repository::open_event_table(THD *thd, enum thr_lock_type lock_type, check_parse_params() thd Thread context parse_data Event's data - + RETURN VALUE FALSE OK TRUE Error (reported) @@ -508,7 +543,7 @@ check_parse_params(THD *thd, Event_parse_data *parse_data) 0 OK EVEX_GENERAL_ERROR Failure - DESCRIPTION + DESCRIPTION Creates an event. Relies on mysql_event_fill_row which is shared with ::update_event. The name of the event is inside "et". */ @@ -527,6 +562,8 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data, if (check_parse_params(thd, parse_data)) goto err; + if (parse_data->do_not_create) + goto ok; DBUG_PRINT("info", ("open mysql.event for update")); if (open_event_table(thd, TL_WRITE, &table)) @@ -535,7 +572,6 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data, goto err; } - DBUG_PRINT("info", ("name: %.*s", parse_data->name.length, parse_data->name.str)); @@ -587,7 +623,7 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data, goto err; } - if (!(parse_data->expression) && !(parse_data->execute_at.year)) + if (!(parse_data->expression) && !(parse_data->execute_at)) { DBUG_PRINT("error", ("neither expression nor execute_at are set!")); my_error(ER_EVENT_NEITHER_M_EXPR_NOR_M_AT, MYF(0)); @@ -601,7 +637,9 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data, handle it here */ if ((ret= mysql_event_fill_row(thd, table, parse_data, FALSE))) - goto err; + goto err; + + table->field[ET_FIELD_STATUS]->store((longlong)parse_data->status, TRUE); /* Close active transaction only if We are going to modify disk */ if (end_active_trans(thd)) @@ -618,7 +656,7 @@ ok: (void) mysql_change_db(thd, old_db.str, 1); /* This statement may cause a spooky valgrind warning at startup - inside init_key_cache on my system (ahristov, 2006/08/10) + inside init_key_cache on my system (ahristov, 2006/08/10) */ close_thread_tables(thd); DBUG_RETURN(FALSE); @@ -664,7 +702,7 @@ Event_db_repository::update_event(THD *thd, Event_parse_data *parse_data, goto err; } - if (check_parse_params(thd, parse_data)) + if (check_parse_params(thd, parse_data) || parse_data->do_not_create) goto err; DBUG_PRINT("info", ("dbname: %s", parse_data->dbname.str)); @@ -843,8 +881,7 @@ Event_db_repository::find_named_event(THD *thd, LEX_STRING db, LEX_STRING name, key_copy(key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read_idx(table->record[0], 0, key, - table->key_info->key_length, + if (table->file->index_read_idx(table->record[0], 0, key, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { DBUG_PRINT("info", ("Row not found")); @@ -886,14 +923,14 @@ Event_db_repository::drop_schema_events(THD *thd, LEX_STRING schema) */ void -Event_db_repository::drop_events_by_field(THD *thd, +Event_db_repository::drop_events_by_field(THD *thd, enum enum_events_table_field field, LEX_STRING field_value) { int ret= 0; TABLE *table= NULL; READ_RECORD read_record_info; - DBUG_ENTER("Event_db_repository::drop_events_by_field"); + DBUG_ENTER("Event_db_repository::drop_events_by_field"); DBUG_PRINT("enter", ("field=%d field_value=%s", field, field_value.str)); if (open_event_table(thd, TL_WRITE, &table)) @@ -964,7 +1001,7 @@ Event_db_repository::load_named_event(THD *thd, LEX_STRING dbname, my_error(ER_EVENT_OPEN_TABLE_FAILED, MYF(0)); else if ((ret= find_named_event(thd, dbname, name, table))) my_error(ER_EVENT_DOES_NOT_EXIST, MYF(0), name.str); - else if ((ret= etn->load_from_row(table))) + else if ((ret= etn->load_from_row(thd, table))) my_error(ER_CANNOT_LOAD_FROM_TABLE, MYF(0), "event"); if (table) diff --git a/sql/event_db_repository.h b/sql/event_db_repository.h index 1457fb64e2e..10a10d7957a 100644 --- a/sql/event_db_repository.h +++ b/sql/event_db_repository.h @@ -19,7 +19,7 @@ enum enum_events_table_field { - ET_FIELD_DB = 0, + ET_FIELD_DB = 0, ET_FIELD_NAME, ET_FIELD_BODY, ET_FIELD_DEFINER, @@ -35,6 +35,8 @@ enum enum_events_table_field ET_FIELD_ON_COMPLETION, ET_FIELD_SQL_MODE, ET_FIELD_COMMENT, + ET_FIELD_ORIGINATOR, + ET_FIELD_TIME_ZONE, ET_FIELD_COUNT /* a cool trick to count the number of fields :) */ }; @@ -61,7 +63,7 @@ public: update_event(THD *thd, Event_parse_data *parse_data, LEX_STRING *new_dbname, LEX_STRING *new_name); - bool + bool drop_event(THD *thd, LEX_STRING db, LEX_STRING name, bool drop_if_exists); void @@ -97,5 +99,5 @@ private: Event_db_repository(const Event_db_repository &); void operator=(Event_db_repository &); }; - + #endif /* _EVENT_DB_REPOSITORY_H_ */ diff --git a/sql/event_queue.cc b/sql/event_queue.cc index 068abbe3408..f958102e269 100644 --- a/sql/event_queue.cc +++ b/sql/event_queue.cc @@ -16,7 +16,6 @@ #include "mysql_priv.h" #include "event_queue.h" #include "event_data_objects.h" -#include "event_db_repository.h" #define EVENT_QUEUE_INITIAL_SIZE 30 @@ -33,16 +32,6 @@ #define LOCK_QUEUE_DATA() lock_data(SCHED_FUNC, __LINE__) #define UNLOCK_QUEUE_DATA() unlock_data(SCHED_FUNC, __LINE__) -struct event_queue_param -{ - THD *thd; - Event_queue *queue; - pthread_mutex_t LOCK_loaded; - pthread_cond_t COND_loaded; - bool loading_finished; -}; - - /* Compares the execute_at members of two Event_queue_element instances. Used as callback for the prioritized queue when shifting @@ -63,11 +52,13 @@ struct event_queue_param execute_at.second_part is not considered during comparison */ -static int +static int event_queue_element_compare_q(void *vptr, byte* a, byte *b) { - return my_time_compare(&((Event_queue_element *)a)->execute_at, - &((Event_queue_element *)b)->execute_at); + my_time_t lhs = ((Event_queue_element *)a)->execute_at; + my_time_t rhs = ((Event_queue_element *)b)->execute_at; + + return (lhs < rhs ? -1 : (lhs > rhs ? 1 : 0)); } @@ -85,7 +76,7 @@ Event_queue::Event_queue() { mutex_last_unlocked_in_func= mutex_last_locked_in_func= mutex_last_attempted_lock_in_func= ""; - set_zero_time(&next_activation_at, MYSQL_TIMESTAMP_DATETIME); + next_activation_at= 0; } @@ -136,36 +127,23 @@ Event_queue::deinit_mutexes() */ bool -Event_queue::init_queue(THD *thd, Event_db_repository *db_repo) +Event_queue::init_queue(THD *thd) { - bool res; DBUG_ENTER("Event_queue::init_queue"); DBUG_PRINT("enter", ("this: 0x%lx", (long) this)); LOCK_QUEUE_DATA(); - db_repository= db_repo; if (init_queue_ex(&queue, EVENT_QUEUE_INITIAL_SIZE , 0 /*offset*/, 0 /*max_on_top*/, event_queue_element_compare_q, NULL, EVENT_QUEUE_EXTENT)) { - sql_print_error("SCHEDULER: Can't initialize the execution queue"); - goto err; - } - - if (sizeof(my_time_t) != sizeof(time_t)) - { - sql_print_error("SCHEDULER: sizeof(my_time_t) != sizeof(time_t) ." - "The scheduler may not work correctly. Stopping"); + sql_print_error("Event Scheduler: Can't initialize the execution queue"); goto err; } - res= load_events_from_db(thd); UNLOCK_QUEUE_DATA(); - if (res) - deinit_queue(); - - DBUG_RETURN(res); + DBUG_RETURN(FALSE); err: UNLOCK_QUEUE_DATA(); @@ -195,44 +173,37 @@ Event_queue::deinit_queue() } -/* +/** Adds an event to the queue. SYNOPSIS Event_queue::create_event() dbname The schema of the new event name The name of the new event - - RETURN VALUE - OP_OK OK or scheduler not working - OP_LOAD_ERROR Error during loading from disk */ -int -Event_queue::create_event(THD *thd, LEX_STRING dbname, LEX_STRING name) +void +Event_queue::create_event(THD *thd, Event_queue_element *new_element) { - int res; - Event_queue_element *new_element; DBUG_ENTER("Event_queue::create_event"); - DBUG_PRINT("enter", ("thd: 0x%lx et=%s.%s", (long) thd, dbname.str, name.str)); + DBUG_PRINT("enter", ("thd: 0x%lx et=%s.%s", (long) thd, + new_element->dbname.str, new_element->name.str)); - new_element= new Event_queue_element(); - res= db_repository->load_named_event(thd, dbname, name, new_element); - if (res || new_element->status == Event_queue_element::DISABLED) + if ((new_element->status == Event_queue_element::DISABLED) + || (new_element->status == Event_queue_element::SLAVESIDE_DISABLED)) delete new_element; else { new_element->compute_next_execution_time(); + DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element)); LOCK_QUEUE_DATA(); - DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element)); queue_insert_safe(&queue, (byte *) new_element); dbug_dump_queue(thd->query_start()); - pthread_cond_broadcast(&COND_queue_state); + pthread_cond_broadcast(&COND_queue_state); UNLOCK_QUEUE_DATA(); } - - DBUG_RETURN(res); + DBUG_VOID_RETURN; } @@ -246,32 +217,17 @@ Event_queue::create_event(THD *thd, LEX_STRING dbname, LEX_STRING name) name Name of the event new_schema New schema, in case of RENAME TO, otherwise NULL new_name New name, in case of RENAME TO, otherwise NULL - - RETURN VALUE - OP_OK OK or scheduler not working - OP_LOAD_ERROR Error during loading from disk */ -int +void Event_queue::update_event(THD *thd, LEX_STRING dbname, LEX_STRING name, - LEX_STRING *new_schema, LEX_STRING *new_name) + Event_queue_element *new_element) { - int res; - Event_queue_element *new_element; - DBUG_ENTER("Event_queue::update_event"); DBUG_PRINT("enter", ("thd: 0x%lx et=[%s.%s]", (long) thd, dbname.str, name.str)); - new_element= new Event_queue_element(); - - res= db_repository->load_named_event(thd, new_schema ? *new_schema:dbname, - new_name ? *new_name:name, new_element); - if (res) - { - delete new_element; - goto end; - } - else if (new_element->status == Event_queue_element::DISABLED) + if ((new_element->status == Event_queue_element::DISABLED) || + (new_element->status == Event_queue_element::SLAVESIDE_DISABLED)) { DBUG_PRINT("info", ("The event is disabled.")); /* @@ -292,15 +248,13 @@ Event_queue::update_event(THD *thd, LEX_STRING dbname, LEX_STRING name, { DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element)); queue_insert_safe(&queue, (byte *) new_element); - pthread_cond_broadcast(&COND_queue_state); + pthread_cond_broadcast(&COND_queue_state); } dbug_dump_queue(thd->query_start()); UNLOCK_QUEUE_DATA(); -end: - DBUG_PRINT("info", ("res=%d", res)); - DBUG_RETURN(res); + DBUG_VOID_RETURN; } @@ -325,7 +279,7 @@ Event_queue::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name) find_n_remove_event(dbname, name); dbug_dump_queue(thd->query_start()); UNLOCK_QUEUE_DATA(); - + /* We don't signal here because the scheduler will catch the change next time it wakes up. @@ -347,7 +301,7 @@ Event_queue::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name) RETURN VALUE >=0 Number of dropped events - + NOTE Expected is the caller to acquire lock on LOCK_event_queue */ @@ -379,7 +333,7 @@ Event_queue::drop_matching_events(THD *thd, LEX_STRING pattern, i++; } /* - We don't call pthread_cond_broadcast(&COND_queue_state); + We don't call pthread_cond_broadcast(&COND_queue_state); If we remove the top event: 1. The queue is empty. The scheduler will wake up at some time and realize that the queue is empty. If create_event() comes inbetween @@ -452,133 +406,6 @@ Event_queue::find_n_remove_event(LEX_STRING db, LEX_STRING name) /* - Loads all ENABLED events from mysql.event into the prioritized - queue. Called during scheduler main thread initialization. Compiles - the events. Creates Event_queue_element instances for every ENABLED event - from mysql.event. - - SYNOPSIS - Event_queue::load_events_from_db() - thd - Thread context. Used for memory allocation in some cases. - - RETURN VALUE - 0 OK - !0 Error (EVEX_OPEN_TABLE_FAILED, EVEX_MICROSECOND_UNSUP, - EVEX_COMPILE_ERROR) - in all these cases mysql.event was - tampered. - - NOTES - Reports the error to the console -*/ - -int -Event_queue::load_events_from_db(THD *thd) -{ - TABLE *table; - READ_RECORD read_record_info; - int ret= -1; - uint count= 0; - bool clean_the_queue= TRUE; - - DBUG_ENTER("Event_queue::load_events_from_db"); - DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd)); - - if ((ret= db_repository->open_event_table(thd, TL_READ, &table))) - { - sql_print_error("SCHEDULER: Table mysql.event is damaged. Can not open"); - DBUG_RETURN(EVEX_OPEN_TABLE_FAILED); - } - - init_read_record(&read_record_info, thd, table ,NULL,1,0); - while (!(read_record_info.read_record(&read_record_info))) - { - Event_queue_element *et; - if (!(et= new Event_queue_element)) - { - DBUG_PRINT("info", ("Out of memory")); - break; - } - DBUG_PRINT("info", ("Loading event from row.")); - - if ((ret= et->load_from_row(table))) - { - sql_print_error("SCHEDULER: Error while loading from mysql.event. " - "Table probably corrupted"); - break; - } - if (et->status != Event_queue_element::ENABLED) - { - DBUG_PRINT("info",("%s is disabled",et->name.str)); - delete et; - continue; - } - - /* let's find when to be executed */ - if (et->compute_next_execution_time()) - { - sql_print_error("SCHEDULER: Error while computing execution time of %s.%s." - " Skipping", et->dbname.str, et->name.str); - continue; - } - - { - Event_job_data temp_job_data; - DBUG_PRINT("info", ("Event %s loaded from row. ", et->name.str)); - - temp_job_data.load_from_row(table); - - /* - We load only on scheduler root just to check whether the body - compiles. - */ - switch (ret= temp_job_data.compile(thd, thd->mem_root)) { - case EVEX_MICROSECOND_UNSUP: - sql_print_error("SCHEDULER: mysql.event is tampered. MICROSECOND is not " - "supported but found in mysql.event"); - break; - case EVEX_COMPILE_ERROR: - sql_print_error("SCHEDULER: Error while compiling %s.%s. Aborting load", - et->dbname.str, et->name.str); - break; - default: - break; - } - thd->end_statement(); - thd->cleanup_after_query(); - } - if (ret) - { - delete et; - goto end; - } - - queue_insert_safe(&queue, (byte *) et); - count++; - } - clean_the_queue= FALSE; -end: - end_read_record(&read_record_info); - - if (clean_the_queue) - { - empty_queue(); - ret= -1; - } - else - { - ret= 0; - sql_print_information("SCHEDULER: Loaded %d event%s", count, - (count == 1)?"":"s"); - } - - close_thread_tables(thd); - - DBUG_PRINT("info", ("Status code %d. Loaded %d event(s)", ret, count)); - DBUG_RETURN(ret); -} - - -/* Recalculates activation times in the queue. There is one reason for that. Because the values (execute_at) by which the queue is ordered are changed by calls to compute_next_execution_time() on a request from the @@ -627,8 +454,9 @@ Event_queue::empty_queue() { uint i; DBUG_ENTER("Event_queue::empty_queue"); - DBUG_PRINT("enter", ("Purging the queue. %d element(s)", queue.elements)); - sql_print_information("SCHEDULER: Purging queue. %u events", queue.elements); + DBUG_PRINT("enter", ("Purging the queue. %u element(s)", queue.elements)); + sql_print_information("Event Scheduler: Purging the queue. %u events", + queue.elements); /* empty the queue */ for (i= 0; i < queue.elements; ++i) { @@ -664,15 +492,11 @@ Event_queue::dbug_dump_queue(time_t now) DBUG_PRINT("info", ("exec_at: %lu starts: %lu ends: %lu execs_so_far: %u " "expr: %ld et.exec_at: %ld now: %ld " "(et.exec_at - now): %d if: %d", - (long) TIME_to_ulonglong_datetime(&et->execute_at), - (long) TIME_to_ulonglong_datetime(&et->starts), - (long) TIME_to_ulonglong_datetime(&et->ends), - et->execution_count, - (long) et->expression, - (long) (sec_since_epoch_TIME(&et->execute_at)), - (long) now, - (int) (sec_since_epoch_TIME(&et->execute_at) - now), - sec_since_epoch_TIME(&et->execute_at) <= now)); + (long) et->execute_at, (long) et->starts, + (long) et->ends, et->execution_count, + (long) et->expression, (long) et->execute_at, + (long) now, (int) (et->execute_at - now), + et->execute_at <= now)); } DBUG_VOID_RETURN; #endif @@ -688,31 +512,26 @@ static const char *queue_wait_msg= "Waiting for next activation"; SYNOPSIS Event_queue::get_top_for_execution_if_time() - thd [in] Thread - job_data [out] The object to execute + thd [in] Thread + event_name [out] The object to execute RETURN VALUE - FALSE No error. If *job_data==NULL then top not elligible for execution. - Could be that there is no top. - TRUE Error - + FALSE No error. event_name != NULL + TRUE Serious error */ bool -Event_queue::get_top_for_execution_if_time(THD *thd, Event_job_data **job_data) +Event_queue::get_top_for_execution_if_time(THD *thd, + Event_queue_element_for_exec **event_name) { bool ret= FALSE; - struct timespec top_time; - Event_queue_element *top= NULL; - bool to_free= FALSE; - bool to_drop= FALSE; - *job_data= NULL; + *event_name= NULL; DBUG_ENTER("Event_queue::get_top_for_execution_if_time"); LOCK_QUEUE_DATA(); for (;;) { - int res; + Event_queue_element *top= NULL; /* Break loop if thd has been killed */ if (thd->killed) @@ -724,7 +543,7 @@ Event_queue::get_top_for_execution_if_time(THD *thd, Event_job_data **job_data) if (!queue.elements) { /* There are no events in the queue */ - set_zero_time(&next_activation_at, MYSQL_TIMESTAMP_DATETIME); + next_activation_at= 0; /* Wait on condition until signaled. Release LOCK_queue while waiting. */ cond_wait(thd, NULL, queue_empty_msg, SCHED_FUNC, __LINE__); @@ -736,54 +555,44 @@ Event_queue::get_top_for_execution_if_time(THD *thd, Event_job_data **job_data) thd->end_time(); /* Get current time */ - time_t seconds_to_next_event= - sec_since_epoch_TIME(&top->execute_at) - thd->query_start(); next_activation_at= top->execute_at; - if (seconds_to_next_event > 0) + if (next_activation_at > thd->query_start()) { /* Not yet time for top event, wait on condition with time or until signaled. Release LOCK_queue while waiting. */ - set_timespec(top_time, seconds_to_next_event); + struct timespec top_time; + set_timespec(top_time, next_activation_at - thd->query_start()); cond_wait(thd, &top_time, queue_wait_msg, SCHED_FUNC, __LINE__); continue; } - DBUG_PRINT("info", ("Ready for execution")); - if (!(*job_data= new Event_job_data())) - { - ret= TRUE; - break; - } - if ((res= db_repository->load_named_event(thd, top->dbname, top->name, - *job_data))) + if (!(*event_name= new Event_queue_element_for_exec()) || + (*event_name)->init(top->dbname, top->name)) { - DBUG_PRINT("error", ("Got %d from load_named_event", res)); - delete *job_data; - *job_data= NULL; ret= TRUE; break; } + DBUG_PRINT("info", ("Ready for execution")); top->mark_last_executed(thd); if (top->compute_next_execution_time()) top->status= Event_queue_element::DISABLED; DBUG_PRINT("info", ("event %s status is %d", top->name.str, top->status)); - (*job_data)->execution_count= top->execution_count; + top->execution_count++; + (*event_name)->dropped= top->dropped; top->update_timing_fields(thd); - if (((top->execute_at.year && !top->expression) || top->execute_at_null) || - (top->status == Event_queue_element::DISABLED)) + if (top->status == Event_queue_element::DISABLED) { DBUG_PRINT("info", ("removing from the queue")); - sql_print_information("SCHEDULER: Last execution of %s.%s. %s", + sql_print_information("Event Scheduler: Last execution of %s.%s. %s", top->dbname.str, top->name.str, top->dropped? "Dropping.":""); - to_free= TRUE; - to_drop= top->dropped; + delete top; queue_remove(&queue, 0); } else @@ -794,19 +603,13 @@ Event_queue::get_top_for_execution_if_time(THD *thd, Event_job_data **job_data) } end: UNLOCK_QUEUE_DATA(); - if (to_drop) - { - DBUG_PRINT("info", ("Dropping from disk")); - top->drop(thd); - } - if (to_free) - delete top; - DBUG_PRINT("info", ("returning %d et_new: 0x%lx ", ret, (long) *job_data)); + DBUG_PRINT("info", ("returning %d et_new: 0x%lx ", + ret, (long) *event_name)); - if (*job_data) - DBUG_PRINT("info", ("db: %s name: %s definer=%s", (*job_data)->dbname.str, - (*job_data)->name.str, (*job_data)->definer.str)); + if (*event_name) + DBUG_PRINT("info", ("db: %s name: %s", + (*event_name)->dbname.str, (*event_name)->name.str)); DBUG_RETURN(ret); } @@ -938,10 +741,11 @@ Event_queue::dump_internal_status() printf("Last lock attempt at: %s:%u\n", mutex_last_attempted_lock_in_func, mutex_last_attempted_lock_at_line); printf("WOC : %s\n", waiting_on_cond? "YES":"NO"); + + TIME time; + my_tz_UTC->gmt_sec_to_TIME(&time, next_activation_at); printf("Next activation : %04d-%02d-%02d %02d:%02d:%02d\n", - next_activation_at.year, next_activation_at.month, - next_activation_at.day, next_activation_at.hour, - next_activation_at.minute, next_activation_at.second); + time.year, time.month, time.day, time.hour, time.minute, time.second); DBUG_VOID_RETURN; } diff --git a/sql/event_queue.h b/sql/event_queue.h index 9f48da4914f..95f52b7b588 100644 --- a/sql/event_queue.h +++ b/sql/event_queue.h @@ -16,12 +16,10 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ class Event_basic; -class Event_db_repository; -class Event_job_data; class Event_queue_element; +class Event_queue_element_for_exec; class THD; -class Event_scheduler; class Event_queue { @@ -35,19 +33,19 @@ public: deinit_mutexes(); bool - init_queue(THD *thd, Event_db_repository *db_repo); - + init_queue(THD *thd); + void deinit_queue(); /* Methods for queue management follow */ - int - create_event(THD *thd, LEX_STRING dbname, LEX_STRING name); + void + create_event(THD *thd, Event_queue_element *new_element); - int + void update_event(THD *thd, LEX_STRING dbname, LEX_STRING name, - LEX_STRING *new_schema, LEX_STRING *new_name); + Event_queue_element *new_element); void drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name); @@ -59,14 +57,15 @@ public: recalculate_activation_times(THD *thd); bool - get_top_for_execution_if_time(THD *thd, Event_job_data **job_data); + get_top_for_execution_if_time(THD *thd, + Event_queue_element_for_exec **event_name); + void dump_internal_status(); - int - load_events_from_db(THD *thd); - + void + empty_queue(); protected: void find_n_remove_event(LEX_STRING db, LEX_STRING name); @@ -76,8 +75,6 @@ protected: drop_matching_events(THD *thd, LEX_STRING pattern, bool (*)(LEX_STRING, Event_basic *)); - void - empty_queue(); void dbug_dump_queue(time_t now); @@ -86,14 +83,10 @@ protected: pthread_mutex_t LOCK_event_queue; pthread_cond_t COND_queue_state; - Event_db_repository *db_repository; - - Event_scheduler *scheduler; - - /* The sorted queue with the Event_job_data objects */ + /* The sorted queue with the Event_queue_element objects */ QUEUE queue; - TIME next_activation_at; + my_time_t next_activation_at; uint mutex_last_locked_at_line; uint mutex_last_unlocked_at_line; diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc index a47576cf0c0..d50ea932596 100644 --- a/sql/event_scheduler.cc +++ b/sql/event_scheduler.cc @@ -18,6 +18,7 @@ #include "event_data_objects.h" #include "event_scheduler.h" #include "event_queue.h" +#include "event_db_repository.h" #ifdef __GNUC__ #if __GNUC__ >= 2 @@ -34,6 +35,11 @@ extern pthread_attr_t connection_attrib; + +Event_db_repository *Event_worker_thread::db_repository; +Events *Event_worker_thread::events_facade; + + static const LEX_STRING scheduler_states_names[] = { @@ -60,8 +66,8 @@ struct scheduler_param { et The event itself */ -static void -evex_print_warnings(THD *thd, Event_job_data *et) +void +Event_worker_thread::print_warnings(THD *thd, Event_job_data *et) { MYSQL_ERROR *err; DBUG_ENTER("evex_print_warnings"); @@ -72,7 +78,7 @@ evex_print_warnings(THD *thd, Event_job_data *et) char prefix_buf[5 * STRING_BUFFER_USUAL_SIZE]; String prefix(prefix_buf, sizeof(prefix_buf), system_charset_info); prefix.length(0); - prefix.append("SCHEDULER: ["); + prefix.append("Event Scheduler: ["); append_identifier(thd, &prefix, et->definer.str, et->definer.length); prefix.append("][", 2); @@ -104,25 +110,22 @@ evex_print_warnings(THD *thd, Event_job_data *et) SYNOPSIS post_init_event_thread() thd Thread + + NOTES + Before this is called, one should not do any DBUG_XXX() calls. + */ bool post_init_event_thread(THD *thd) { - my_thread_init(); - pthread_detach_this_thread(); - thd->real_id= pthread_self(); + (void) init_new_connection_handler_thread(); if (init_thr_lock() || thd->store_globals()) { thd->cleanup(); return TRUE; } -#if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__) - sigset_t set; - VOID(sigemptyset(&set)); // Get mask in use - VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals)); -#endif pthread_mutex_lock(&LOCK_thread_count); threads.append(thd); thread_count++; @@ -187,7 +190,7 @@ pre_init_event_thread(THD* thd) thd->options|= OPTION_AUTO_IS_NULL; thd->client_capabilities|= CLIENT_MULTI_RESULTS; pthread_mutex_lock(&LOCK_thread_count); - thd->thread_id= thread_id++; + thd->thread_id= thd->variables.pseudo_thread_id= thread_id++; pthread_mutex_unlock(&LOCK_thread_count); /* @@ -218,26 +221,26 @@ pthread_handler_t event_scheduler_thread(void *arg) { /* needs to be first for thread_stack */ - THD *thd= (THD *)((struct scheduler_param *) arg)->thd; + THD *thd= (THD *) ((struct scheduler_param *) arg)->thd; Event_scheduler *scheduler= ((struct scheduler_param *) arg)->scheduler; - - my_free((char*)arg, MYF(0)); + bool res; thd->thread_stack= (char *)&thd; // remember where our stack is + res= post_init_event_thread(thd); DBUG_ENTER("event_scheduler_thread"); - - if (!post_init_event_thread(thd)) + my_free((char*)arg, MYF(0)); + if (!res) scheduler->run(thd); deinit_event_thread(thd); - + pthread_exit(0); DBUG_RETURN(0); // Against gcc warnings } /* - Function that executes an event in a child thread. Setups the + Function that executes an event in a child thread. Setups the environment for the event execution and cleans after that. SYNOPSIS @@ -251,51 +254,109 @@ event_scheduler_thread(void *arg) pthread_handler_t event_worker_thread(void *arg) { + THD *thd; + Event_queue_element_for_exec *event= (Event_queue_element_for_exec *)arg; + + thd= event->thd; + + Event_worker_thread worker_thread; + worker_thread.run(thd, event); + + return 0; // Can't return anything here +} + + +/* + Function that executes an event in a child thread. Setups the + environment for the event execution and cleans after that. + + SYNOPSIS + Event_worker_thread::run() + thd Thread context + event The Event_queue_element_for_exec object to be processed +*/ + +void +Event_worker_thread::run(THD *thd, Event_queue_element_for_exec *event) +{ /* needs to be first for thread_stack */ - THD *thd; - Event_job_data *event= (Event_job_data *)arg; + char my_stack; int ret; + Event_job_data *job_data= NULL; + bool res; - thd= event->thd; + thd->thread_stack= &my_stack; // remember where our stack is + res= post_init_event_thread(thd); + + DBUG_ENTER("Event_worker_thread::run"); + DBUG_PRINT("info", ("Time is %ld, THD: 0x%lx", + (long) time(NULL), (long) thd)); + + if (res) + goto end; + + if (!(job_data= new Event_job_data())) + goto end; + else if ((ret= db_repository-> + load_named_event(thd, event->dbname, event->name, job_data))) + { + DBUG_PRINT("error", ("Got %d from load_named_event", ret)); + goto end; + } + + sql_print_information("Event Scheduler: " + "[%s.%s of %s] executing in thread %lu. ", + job_data->dbname.str, job_data->name.str, + job_data->definer.str, thd->thread_id); + + thd->enable_slow_log= TRUE; + + ret= job_data->execute(thd); + + print_warnings(thd, job_data); - thd->thread_stack= (char *) &thd; // remember where our stack is - DBUG_ENTER("event_worker_thread"); + sql_print_information("Event Scheduler: " + "[%s.%s of %s] executed in thread %lu. " + "RetCode=%d", job_data->dbname.str, job_data->name.str, + job_data->definer.str, thd->thread_id, ret); + if (ret == EVEX_COMPILE_ERROR) + sql_print_information("Event Scheduler: " + "COMPILE ERROR for event %s.%s of %s", + job_data->dbname.str, job_data->name.str, + job_data->definer.str); + else if (ret == EVEX_MICROSECOND_UNSUP) + sql_print_information("Event Scheduler: MICROSECOND is not supported"); - if (!post_init_event_thread(thd)) +end: + delete job_data; + + if (event->dropped) { - DBUG_PRINT("info", ("Baikonur, time is %ld, BURAN reporting and operational." - "THD: 0x%lx", - (long) time(NULL), (long) thd)); - - sql_print_information("SCHEDULER: [%s.%s of %s] executing in thread %lu. " - "Execution %u", - event->dbname.str, event->name.str, - event->definer.str, thd->thread_id, - event->execution_count); - - thd->enable_slow_log= TRUE; - - ret= event->execute(thd); - - evex_print_warnings(thd, event); - - sql_print_information("SCHEDULER: [%s.%s of %s] executed in thread %lu. " - "RetCode=%d", event->dbname.str, event->name.str, - event->definer.str, thd->thread_id, ret); - if (ret == EVEX_COMPILE_ERROR) - sql_print_information("SCHEDULER: COMPILE ERROR for event %s.%s of %s", - event->dbname.str, event->name.str, - event->definer.str); - else if (ret == EVEX_MICROSECOND_UNSUP) - sql_print_information("SCHEDULER: MICROSECOND is not supported"); + sql_print_information("Event Scheduler: Dropping %s.%s", + event->dbname.str, event->name.str); + /* + Using db_repository can lead to a race condition because we access + the table without holding LOCK_metadata. + Scenario: + 1. CREATE EVENT xyz AT ... (conn thread) + 2. execute xyz (worker) + 3. CREATE EVENT XYZ EVERY ... (conn thread) + 4. drop xyz (worker) + 5. XYZ was just created on disk but `drop xyz` of the worker dropped it. + A consequent load to create Event_queue_element will fail. + + If all operations are performed under LOCK_metadata there is no such + problem. However, this comes at the price of introduction bi-directional + association between class Events and class Event_worker_thread. + */ + events_facade->drop_event(thd, event->dbname, event->name, FALSE); } - DBUG_PRINT("info", ("BURAN %s.%s is landing!", event->dbname.str, + DBUG_PRINT("info", ("Done with Event %s.%s", event->dbname.str, event->name.str)); - delete event; + delete event; deinit_event_thread(thd); - - DBUG_RETURN(0); // Can't return anything here + pthread_exit(0); } @@ -384,7 +445,7 @@ Event_scheduler::start() if (!(new_thd= new THD)) { - sql_print_error("SCHEDULER: Cannot init manager event thread"); + sql_print_error("Event Scheduler: Cannot initialize the scheduler thread"); ret= TRUE; goto end; } @@ -400,7 +461,7 @@ Event_scheduler::start() scheduler_thd= new_thd; DBUG_PRINT("info", ("Setting state go RUNNING")); state= RUNNING; - DBUG_PRINT("info", ("Forking new thread for scheduduler. THD: 0x%lx", (long) new_thd)); + DBUG_PRINT("info", ("Forking new thread for scheduler. THD: 0x%lx", (long) new_thd)); if (pthread_create(&th, &connection_attrib, event_scheduler_thread, (void*)scheduler_param_value)) { @@ -441,10 +502,9 @@ bool Event_scheduler::run(THD *thd) { int res= FALSE; - Event_job_data *job_data; DBUG_ENTER("Event_scheduler::run"); - sql_print_information("SCHEDULER: Manager thread started with id %lu", + sql_print_information("Event Scheduler: scheduler thread started with id %lu", thd->thread_id); /* Recalculate the values in the queue because there could have been stops @@ -454,18 +514,22 @@ Event_scheduler::run(THD *thd) while (is_running()) { + Event_queue_element_for_exec *event_name; + /* Gets a minimized version */ - if (queue->get_top_for_execution_if_time(thd, &job_data)) + if (queue->get_top_for_execution_if_time(thd, &event_name)) { - sql_print_information("SCHEDULER: Serious error during getting next " + sql_print_information("Event Scheduler: " + "Serious error during getting next " "event to execute. Stopping"); break; } - DBUG_PRINT("info", ("get_top returned job_data: 0x%lx", (long) job_data)); - if (job_data) + DBUG_PRINT("info", ("get_top_for_execution_if_time returned " + "event_name=0x%lx", (long) event_name)); + if (event_name) { - if ((res= execute_top(thd, job_data))) + if ((res= execute_top(event_name))) break; } else @@ -480,7 +544,7 @@ Event_scheduler::run(THD *thd) state= INITIALIZED; pthread_cond_signal(&COND_state); UNLOCK_DATA(); - sql_print_information("SCHEDULER: Stopped"); + sql_print_information("Event Scheduler: Stopped"); DBUG_RETURN(res); } @@ -499,7 +563,7 @@ Event_scheduler::run(THD *thd) */ bool -Event_scheduler::execute_top(THD *thd, Event_job_data *job_data) +Event_scheduler::execute_top(Event_queue_element_for_exec *event_name) { THD *new_thd; pthread_t th; @@ -510,22 +574,30 @@ Event_scheduler::execute_top(THD *thd, Event_job_data *job_data) pre_init_event_thread(new_thd); new_thd->system_thread= SYSTEM_THREAD_EVENT_WORKER; - job_data->thd= new_thd; - DBUG_PRINT("info", ("BURAN %s@%s ready for start t-3..2..1..0..ignition", - job_data->dbname.str, job_data->name.str)); + event_name->thd= new_thd; + DBUG_PRINT("info", ("Event %s@%s ready for start", + event_name->dbname.str, event_name->name.str)); + /* + TODO: should use thread pool here, preferably with an upper limit + on number of threads: if too many events are scheduled for the + same time, starting all of them at once won't help them run truly + in parallel (because of the great amount of synchronization), so + we may as well execute them in sequence, keeping concurrency at a + reasonable level. + */ /* Major failure */ if ((res= pthread_create(&th, &connection_attrib, event_worker_thread, - job_data))) + event_name))) goto error; ++started_events; - DBUG_PRINT("info", ("Launch succeeded. BURAN is in THD: 0x%lx", (long) new_thd)); + DBUG_PRINT("info", ("Event is in THD: 0x%lx", (long) new_thd)); DBUG_RETURN(FALSE); error: - DBUG_PRINT("error", ("Baikonur, we have a problem! res: %d", res)); + DBUG_PRINT("error", ("Event_scheduler::execute_top() res: %d", res)); if (new_thd) { new_thd->proc_info= "Clearing"; @@ -537,7 +609,7 @@ error: delete new_thd; pthread_mutex_unlock(&LOCK_thread_count); } - delete job_data; + delete event_name; DBUG_RETURN(TRUE); } @@ -563,7 +635,7 @@ Event_scheduler::is_running() } -/* +/** Stops the scheduler (again). Waits for acknowledgement from the scheduler that it has stopped - synchronous stopping. @@ -589,8 +661,8 @@ Event_scheduler::stop() /* Guarantee we don't catch spurious signals */ do { - DBUG_PRINT("info", ("Waiting for COND_started_or_stopped from the manager " - "thread. Current value of state is %s . " + DBUG_PRINT("info", ("Waiting for COND_started_or_stopped from " + "the scheduler thread. Current value of state is %s . " "workers count=%d", scheduler_states_names[state].str, workers_count())); /* @@ -604,20 +676,23 @@ Event_scheduler::stop() */ state= STOPPING; - DBUG_PRINT("info", ("Manager thread has id %lu", scheduler_thd->thread_id)); + DBUG_PRINT("info", ("Scheduler thread has id %lu", + scheduler_thd->thread_id)); /* Lock from delete */ pthread_mutex_lock(&scheduler_thd->LOCK_delete); /* This will wake up the thread if it waits on Queue's conditional */ - sql_print_information("SCHEDULER: Killing manager thread %lu", + sql_print_information("Event Scheduler: Killing the scheduler thread, " + "thread id %lu", scheduler_thd->thread_id); scheduler_thd->awake(THD::KILL_CONNECTION); pthread_mutex_unlock(&scheduler_thd->LOCK_delete); /* thd could be 0x0, when shutting down */ - sql_print_information("SCHEDULER: Waiting the manager thread to reply"); + sql_print_information("Event Scheduler: " + "Waiting for the scheduler thread to reply"); COND_STATE_WAIT(thd, NULL, "Waiting scheduler to stop"); } while (state == STOPPING); - DBUG_PRINT("info", ("Manager thread has cleaned up. Set state to INIT")); + DBUG_PRINT("info", ("Scheduler thread has cleaned up. Set state to INIT")); /* The rationale behind setting it to NULL here but not destructing it beforehand is because the THD will be deinited in event_scheduler_thread(). @@ -647,7 +722,7 @@ Event_scheduler::workers_count() { THD *tmp; uint count= 0; - + DBUG_ENTER("Event_scheduler::workers_count"); pthread_mutex_lock(&LOCK_thread_count); // For unlink from list I_List_iterator<THD> it(threads); diff --git a/sql/event_scheduler.h b/sql/event_scheduler.h index 18625ef35f3..74d53c4f63d 100644 --- a/sql/event_scheduler.h +++ b/sql/event_scheduler.h @@ -15,8 +15,17 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +/* + This file is internal to Events module. Please do not include it directly. + All public declarations of Events module are in events.h and + event_data_objects.h. +*/ + + class Event_queue; class Event_job_data; +class Event_db_repository; +class Events; void pre_init_event_thread(THD* thd); @@ -27,6 +36,29 @@ post_init_event_thread(THD* thd); void deinit_event_thread(THD *thd); + +class Event_worker_thread +{ +public: + static void + init(Events *events, Event_db_repository *db_repo) + { + db_repository= db_repo; + events_facade= events; + } + + void + run(THD *thd, Event_queue_element_for_exec *event); + +private: + void + print_warnings(THD *thd, Event_job_data *et); + + static Event_db_repository *db_repository; + static Events *events_facade; +}; + + class Event_scheduler { public: @@ -48,7 +80,7 @@ public: bool run(THD *thd); - void + void init_scheduler(Event_queue *queue); void @@ -71,10 +103,9 @@ private: uint workers_count(); - /* helper functions */ bool - execute_top(THD *thd, Event_job_data *job_data); + execute_top(Event_queue_element_for_exec *event_name); /* helper functions for working with mutexes & conditionals */ void diff --git a/sql/events.cc b/sql/events.cc index e6224915d6b..46111bcaa91 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -49,7 +49,7 @@ counterpart. 1. CREATE EVENT the_name ON SCHEDULE EVERY 1 SECOND DISABLE DO SELECT 1; 2. DROP EVENT the_name - + In other words, the first one will create a row in mysql.event . In the second step because there will be a line, disk based drop will pass and the scheduler will remove the memory counterpart. The reason is that @@ -97,7 +97,7 @@ Event_queue events_event_queue; static Event_scheduler events_event_scheduler; -static + Event_db_repository events_event_db_repository; Events Events::singleton; @@ -296,29 +296,6 @@ Events::Events() /* - Opens mysql.event table with specified lock - - SYNOPSIS - Events::open_event_table() - thd Thread context - lock_type How to lock the table - table We will store the open table here - - RETURN VALUE - 1 Cannot lock table - 2 The table is corrupted - different number of fields - 0 OK -*/ - -int -Events::open_event_table(THD *thd, enum thr_lock_type lock_type, - TABLE **table) -{ - return db_repository->open_event_table(thd, lock_type, table); -} - - -/* The function exported to the world for creating of events. SYNOPSIS @@ -332,7 +309,7 @@ Events::open_event_table(THD *thd, enum thr_lock_type lock_type, TRUE Error (Reported) NOTES - In case there is an event with the same name (db) and + In case there is an event with the same name (db) and IF NOT EXISTS is specified, an warning is put into the stack. */ @@ -347,15 +324,39 @@ Events::create_event(THD *thd, Event_parse_data *parse_data, bool if_not_exists) DBUG_RETURN(TRUE); } + /* + Turn off row binlogging of this statement and use statement-based + so that all supporting tables are updated for CREATE EVENT command. + */ + if (thd->current_stmt_binlog_row_based) + thd->clear_current_stmt_binlog_row_based(); + pthread_mutex_lock(&LOCK_event_metadata); + /* On error conditions my_error() is called so no need to handle here */ - if (!(ret= db_repository->create_event(thd, parse_data, if_not_exists))) + if (!(ret= db_repository->create_event(thd, parse_data, if_not_exists)) && + !parse_data->do_not_create) { - if ((ret= event_queue->create_event(thd, parse_data->dbname, - parse_data->name))) + Event_queue_element *new_element; + + if (!(new_element= new Event_queue_element())) + ret= TRUE; // OOM + else if ((ret= db_repository->load_named_event(thd, parse_data->dbname, + parse_data->name, + new_element))) { DBUG_ASSERT(ret == OP_LOAD_ERROR); - my_error(ER_EVENT_MODIFY_QUEUE_ERROR, MYF(0)); + delete new_element; + } + else /* Binlog the create event. */ + { + event_queue->create_event(thd, new_element); + if (mysql_bin_log.is_open() && (thd->query_length > 0)) + { + thd->clear_error(); + thd->binlog_query(THD::MYSQL_QUERY_TYPE, + thd->query, thd->query_length, FALSE, FALSE); + } } } pthread_mutex_unlock(&LOCK_event_metadata); @@ -378,7 +379,7 @@ Events::create_event(THD *thd, Event_parse_data *parse_data, bool if_not_exists) TRUE Error NOTES - et contains data about dbname and event name. + et contains data about dbname and event name. new_name is the new name of the event, if not null this means that RENAME TO was specified in the query */ @@ -387,6 +388,7 @@ bool Events::update_event(THD *thd, Event_parse_data *parse_data, sp_name *rename_to) { int ret; + Event_queue_element *new_element; DBUG_ENTER("Events::update_event"); LEX_STRING *new_dbname= rename_to ? &rename_to->m_db : NULL; LEX_STRING *new_name= rename_to ? &rename_to->m_name : NULL; @@ -396,15 +398,39 @@ Events::update_event(THD *thd, Event_parse_data *parse_data, sp_name *rename_to) DBUG_RETURN(TRUE); } + /* + Turn off row binlogging of this statement and use statement-based + so that all supporting tables are updated for UPDATE EVENT command. + */ + if (thd->current_stmt_binlog_row_based) + thd->clear_current_stmt_binlog_row_based(); + pthread_mutex_lock(&LOCK_event_metadata); + /* On error conditions my_error() is called so no need to handle here */ if (!(ret= db_repository->update_event(thd, parse_data, new_dbname, new_name))) { - if ((ret= event_queue->update_event(thd, parse_data->dbname, - parse_data->name, new_dbname, new_name))) + LEX_STRING dbname= new_dbname ? *new_dbname : parse_data->dbname; + LEX_STRING name= new_name ? *new_name : parse_data->name; + + if (!(new_element= new Event_queue_element())) + ret= TRUE; // OOM + else if ((ret= db_repository->load_named_event(thd, dbname, name, + new_element))) { DBUG_ASSERT(ret == OP_LOAD_ERROR); - my_error(ER_EVENT_MODIFY_QUEUE_ERROR, MYF(0)); + delete new_element; + } + else /* Binlog the alter event. */ + { + event_queue->update_event(thd, parse_data->dbname, parse_data->name, + new_element); + if (mysql_bin_log.is_open() && (thd->query_length > 0)) + { + thd->clear_error(); + thd->binlog_query(THD::MYSQL_QUERY_TYPE, + thd->query, thd->query_length, FALSE, FALSE); + } } } pthread_mutex_unlock(&LOCK_event_metadata); @@ -423,10 +449,6 @@ Events::update_event(THD *thd, Event_parse_data *parse_data, sp_name *rename_to) name [in] Event's name if_exists [in] When set and the event does not exist => warning onto the stack - only_from_disk [in] Whether to remove the event from the queue too. - In case of Event_job_data::drop() it's needed to - do only disk drop because Event_queue will handle - removal from memory queue. RETURN VALUE FALSE OK @@ -434,8 +456,7 @@ Events::update_event(THD *thd, Event_parse_data *parse_data, sp_name *rename_to) */ bool -Events::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name, bool if_exists, - bool only_from_disk) +Events::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name, bool if_exists) { int ret; DBUG_ENTER("Events::drop_event"); @@ -445,19 +466,32 @@ Events::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name, bool if_exists, DBUG_RETURN(TRUE); } + /* + Turn off row binlogging of this statement and use statement-based so + that all supporting tables are updated for DROP EVENT command. + */ + if (thd->current_stmt_binlog_row_based) + thd->clear_current_stmt_binlog_row_based(); + pthread_mutex_lock(&LOCK_event_metadata); /* On error conditions my_error() is called so no need to handle here */ if (!(ret= db_repository->drop_event(thd, dbname, name, if_exists))) { - if (!only_from_disk) - event_queue->drop_event(thd, dbname, name); + event_queue->drop_event(thd, dbname, name); + /* Binlog the drop event. */ + if (mysql_bin_log.is_open() && (thd->query_length > 0)) + { + thd->clear_error(); + thd->binlog_query(THD::MYSQL_QUERY_TYPE, + thd->query, thd->query_length, FALSE, FALSE); + } } pthread_mutex_unlock(&LOCK_event_metadata); DBUG_RETURN(ret); } -/* +/** Drops all events from a schema SYNOPSIS @@ -470,8 +504,8 @@ void Events::drop_schema_events(THD *thd, char *db) { LEX_STRING const db_lex= { db, strlen(db) }; - - DBUG_ENTER("Events::drop_schema_events"); + + DBUG_ENTER("Events::drop_schema_events"); DBUG_PRINT("enter", ("dropping events from %s", db)); if (unlikely(check_system_tables_error)) { @@ -541,6 +575,10 @@ Events::show_create_event(THD *thd, LEX_STRING dbname, LEX_STRING name) field_list.push_back(new Item_empty_string("sql_mode", sql_mode_len)); + const String *tz_name= et->time_zone->get_name(); + field_list.push_back(new Item_empty_string("time_zone", + tz_name->length())); + field_list.push_back(new Item_empty_string("Create Event", show_str.length())); @@ -553,6 +591,8 @@ Events::show_create_event(THD *thd, LEX_STRING dbname, LEX_STRING name) protocol->store((char*) sql_mode_str, sql_mode_len, scs); + protocol->store((char*) tz_name->ptr(), tz_name->length(), scs); + protocol->store(show_str.c_ptr(), show_str.length(), scs); ret= protocol->write(); send_eof(thd); @@ -649,17 +689,18 @@ Events::init() if (check_system_tables(thd)) { check_system_tables_error= TRUE; - sql_print_error("SCHEDULER: The system tables are damaged. " + sql_print_error("Event Scheduler: The system tables are damaged. " "The scheduler subsystem will be unusable during this run."); goto end; } check_system_tables_error= FALSE; - if (event_queue->init_queue(thd, db_repository)) + if (event_queue->init_queue(thd) || load_events_from_db(thd)) { - sql_print_error("SCHEDULER: Error while loading from disk."); + sql_print_error("Event Scheduler: Error while loading from disk."); goto end; } + scheduler->init_scheduler(event_queue); DBUG_ASSERT(opt_event_scheduler == Events::EVENTS_ON || @@ -667,6 +708,7 @@ Events::init() if (opt_event_scheduler == Events::EVENTS_ON) res= scheduler->start(); + Event_worker_thread::init(this, db_repository); end: delete thd; /* Remember that we don't have a THD */ @@ -702,7 +744,7 @@ Events::deinit() } -/* +/** Inits Events mutexes SYNOPSIS @@ -760,7 +802,7 @@ Events::dump_internal_status() } -/* +/** Starts execution of events by the scheduler SYNOPSIS @@ -868,7 +910,7 @@ Events::check_system_tables(THD *thd) if ((ret= simple_open_n_lock_tables(thd, &tables))) { - sql_print_error("SCHEDULER: Cannot open mysql.db"); + sql_print_error("Event Scheduler: Cannot open mysql.db"); ret= TRUE; } ret= table_check_intact(tables.table, MYSQL_DB_FIELD_COUNT, @@ -883,7 +925,7 @@ Events::check_system_tables(THD *thd) if (simple_open_n_lock_tables(thd, &tables)) { - sql_print_error("SCHEDULER: Cannot open mysql.user"); + sql_print_error("Event Scheduler: Cannot open mysql.user"); ret= TRUE; } else @@ -903,3 +945,133 @@ Events::check_system_tables(THD *thd) DBUG_RETURN(ret); } + + +/* + Loads all ENABLED events from mysql.event into the prioritized + queue. Called during scheduler main thread initialization. Compiles + the events. Creates Event_queue_element instances for every ENABLED event + from mysql.event. + + SYNOPSIS + Events::load_events_from_db() + thd Thread context. Used for memory allocation in some cases. + + RETURN VALUE + 0 OK + !0 Error (EVEX_OPEN_TABLE_FAILED, EVEX_MICROSECOND_UNSUP, + EVEX_COMPILE_ERROR) - in all these cases mysql.event was + tampered. + + NOTES + Reports the error to the console +*/ + +int +Events::load_events_from_db(THD *thd) +{ + TABLE *table; + READ_RECORD read_record_info; + int ret= -1; + uint count= 0; + bool clean_the_queue= TRUE; + + DBUG_ENTER("Events::load_events_from_db"); + DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd)); + + if ((ret= db_repository->open_event_table(thd, TL_READ, &table))) + { + sql_print_error("Event Scheduler: Table mysql.event is damaged. Can not open"); + DBUG_RETURN(EVEX_OPEN_TABLE_FAILED); + } + + init_read_record(&read_record_info, thd, table ,NULL,1,0); + while (!(read_record_info.read_record(&read_record_info))) + { + Event_queue_element *et; + if (!(et= new Event_queue_element)) + { + DBUG_PRINT("info", ("Out of memory")); + break; + } + DBUG_PRINT("info", ("Loading event from row.")); + + if ((ret= et->load_from_row(thd, table))) + { + sql_print_error("Event Scheduler: " + "Error while reading from mysql.event. " + "The table is probably corrupted"); + break; + } + if (et->status != Event_queue_element::ENABLED) + { + DBUG_PRINT("info",("%s is disabled",et->name.str)); + delete et; + continue; + } + + /* let's find when to be executed */ + if (et->compute_next_execution_time()) + { + sql_print_error("Event Scheduler: Error while computing execution time of %s.%s." + " Skipping", et->dbname.str, et->name.str); + continue; + } + + { + Event_job_data temp_job_data; + DBUG_PRINT("info", ("Event %s loaded from row. ", et->name.str)); + + temp_job_data.load_from_row(thd, table); + + /* + We load only on scheduler root just to check whether the body + compiles. + */ + switch (ret= temp_job_data.compile(thd, thd->mem_root)) { + case EVEX_MICROSECOND_UNSUP: + sql_print_error("Event Scheduler: mysql.event is tampered. MICROSECOND is not " + "supported but found in mysql.event"); + break; + case EVEX_COMPILE_ERROR: + sql_print_error("Event Scheduler: Error while compiling %s.%s. Aborting load", + et->dbname.str, et->name.str); + break; + default: + break; + } + thd->end_statement(); + thd->cleanup_after_query(); + } + if (ret) + { + delete et; + goto end; + } + + DBUG_PRINT("load_events_from_db", ("Adding 0x%lx to the exec list.", + (long) et)); + event_queue->create_event(thd, et); + count++; + } + clean_the_queue= FALSE; +end: + end_read_record(&read_record_info); + + if (clean_the_queue) + { + event_queue->empty_queue(); + ret= -1; + } + else + { + ret= 0; + sql_print_information("Event Scheduler: Loaded %d event%s", + count, (count == 1)?"":"s"); + } + + close_thread_tables(thd); + + DBUG_PRINT("info", ("Status code %d. Loaded %d event(s)", ret, count)); + DBUG_RETURN(ret); +} diff --git a/sql/events.h b/sql/events.h index 621ab0ffca5..f97a0c5f57e 100644 --- a/sql/events.h +++ b/sql/events.h @@ -19,7 +19,6 @@ class sp_name; class Event_parse_data; class Event_db_repository; class Event_queue; -class Event_queue_element; class Event_scheduler; /* Return codes */ @@ -38,17 +37,14 @@ enum enum_events_error_code int sortcmp_lex_string(LEX_STRING s, LEX_STRING t, CHARSET_INFO *cs); +/** + @class Events -- a facade to the functionality of the Event Scheduler. + +*/ class Events { public: - /* - Quite NOT the best practice and will be removed once - Event_timed::drop() and Event_timed is fixed not do drop directly - or other scheme will be found. - */ - friend class Event_queue_element; - /* The order should match the order in opt_typelib */ enum enum_opt_event_scheduler { @@ -92,15 +88,11 @@ public: update_event(THD *thd, Event_parse_data *parse_data, sp_name *rename_to); bool - drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name, bool if_exists, - bool only_from_disk); + drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name, bool if_exists); void drop_schema_events(THD *thd, char *db); - int - open_event_table(THD *thd, enum thr_lock_type lock_type, TABLE **table); - bool show_create_event(THD *thd, LEX_STRING dbname, LEX_STRING name); @@ -119,6 +111,9 @@ private: bool check_system_tables(THD *thd); + int + load_events_from_db(THD *thd); + /* Singleton DP is used */ Events(); ~Events(){} diff --git a/sql/field.cc b/sql/field.cc index 867edc6f9dd..250a9e3c1b9 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1015,6 +1015,7 @@ bool Field::type_can_have_key_part(enum enum_field_types type) case MYSQL_TYPE_BLOB: case MYSQL_TYPE_VAR_STRING: case MYSQL_TYPE_STRING: + case MYSQL_TYPE_GEOMETRY: return TRUE; default: return FALSE; @@ -1207,13 +1208,13 @@ String *Field::val_int_as_str(String *val_buffer, my_bool unsigned_val) { ASSERT_COLUMN_MARKED_FOR_READ; CHARSET_INFO *cs= &my_charset_bin; - uint length= 21; + uint length; longlong value= val_int(); - if (val_buffer->alloc(length)) + if (val_buffer->alloc(MY_INT64_NUM_DECIMAL_DIGITS)) return 0; length= (uint) (*cs->cset->longlong10_to_str)(cs, (char*) val_buffer->ptr(), - length, + MY_INT64_NUM_DECIMAL_DIGITS, unsigned_val ? 10 : -10, value); val_buffer->length(length); @@ -5436,27 +5437,30 @@ int Field_newdate::store(const char *from,uint len,CHARSET_INFO *cs) { ASSERT_COLUMN_MARKED_FOR_WRITE; TIME l_time; - long tmp; int error; THD *thd= table ? table->in_use : current_thd; - if (str_to_datetime(from, len, &l_time, - (TIME_FUZZY_DATE | - (thd->variables.sql_mode & - (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | - MODE_INVALID_DATES))), - &error) <= MYSQL_TIMESTAMP_ERROR) + enum enum_mysql_timestamp_type ret; + if ((ret= str_to_datetime(from, len, &l_time, + (TIME_FUZZY_DATE | + (thd->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_INVALID_DATES))), + &error)) <= MYSQL_TIMESTAMP_ERROR) { - tmp= 0L; + int3store(ptr,0L); error= 2; } else - tmp= l_time.day + l_time.month*32 + l_time.year*16*32; + { + int3store(ptr, l_time.day + l_time.month*32 + l_time.year*16*32); + if(!error && (ret != MYSQL_TIMESTAMP_DATE)) + return 2; + } if (error) set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, from, len, MYSQL_TIMESTAMP_DATE, 1); - int3store(ptr,tmp); return error; } @@ -6297,9 +6301,9 @@ int Field_string::cmp(const char *a_ptr, const char *b_ptr) void Field_string::sort_string(char *to,uint length) { - uint tmp= my_strnxfrm(field_charset, - (uchar*) to, length, - (uchar*) ptr, field_length); + IF_DBUG(uint tmp=) my_strnxfrm(field_charset, + (uchar*) to, length, + (uchar*) ptr, field_length); DBUG_ASSERT(tmp == length); } @@ -7203,7 +7207,7 @@ int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs) cannot_convert_error_pos, from + length)) return 2; - if (copy_length < length) + if (from_end_pos < from + length) { report_data_too_long(this); return 2; @@ -8502,9 +8506,28 @@ char *Field_bit::pack(char *to, const char *from, uint max_length) { DBUG_ASSERT(max_length); uint length; - if (bit_len) + if (bit_len > 0) { - uchar bits= get_rec_bits(bit_ptr, bit_ofs, bit_len); + /* + We have the following: + + ptr Points into a field in record R1 + from Points to a field in a record R2 + bit_ptr Points to the byte (in the null bytes) that holds the + odd bits of R1 + from_bitp Points to the byte that holds the odd bits of R2 + + We have the following: + + ptr - bit_ptr = from - from_bitp + + We want to isolate 'from_bitp', so this gives: + + ptr - bit_ptr - from = - from_bitp + - ptr + bit_ptr + from = from_bitp + bit_ptr + from - ptr = from_bitp + */ + uchar bits= get_rec_bits(bit_ptr + (from - ptr), bit_ofs, bit_len); *to++= bits; } length= min(bytes_in_rec, max_length - (bit_len > 0)); @@ -8515,9 +8538,16 @@ char *Field_bit::pack(char *to, const char *from, uint max_length) const char *Field_bit::unpack(char *to, const char *from) { - if (bit_len) + if (bit_len > 0) { - set_rec_bits(*from, bit_ptr, bit_ofs, bit_len); + /* + set_rec_bits is a macro, don't put the post-increment in the + argument since that might cause strange side-effects. + + For the choice of the second argument, see the explanation for + Field_bit::pack(). + */ + set_rec_bits(*from, bit_ptr + (to - ptr), bit_ofs, bit_len); from++; } memcpy(to, from, bytes_in_rec); diff --git a/sql/field.h b/sql/field.h index 581e99bd1bc..b2169dac5b6 100644 --- a/sql/field.h +++ b/sql/field.h @@ -30,7 +30,7 @@ class Send_field; class Protocol; class create_field; struct st_cache_field; -void field_conv(Field *to,Field *from); +int field_conv(Field *to,Field *from); inline uint get_enum_pack_length(int elements) { @@ -193,9 +193,9 @@ public: */ virtual void sql_type(String &str) const =0; virtual uint size_of() const =0; // For new field - inline bool is_null(uint row_offset=0) + inline bool is_null(my_ptrdiff_t row_offset= 0) { return null_ptr ? (null_ptr[row_offset] & null_bit ? 1 : 0) : table->null_row; } - inline bool is_real_null(uint row_offset=0) + inline bool is_real_null(my_ptrdiff_t row_offset= 0) { return null_ptr ? (null_ptr[row_offset] & null_bit ? 1 : 0) : 0; } inline bool is_null_in_record(const uchar *record) { @@ -210,9 +210,9 @@ public: return 0; return test(null_ptr[offset] & null_bit); } - inline void set_null(int row_offset=0) + inline void set_null(my_ptrdiff_t row_offset= 0) { if (null_ptr) null_ptr[row_offset]|= null_bit; } - inline void set_notnull(int row_offset=0) + inline void set_notnull(my_ptrdiff_t row_offset= 0) { if (null_ptr) null_ptr[row_offset]&= (uchar) ~null_bit; } inline bool maybe_null(void) { return null_ptr != 0 || table->maybe_null; } inline bool real_maybe_null(void) { return null_ptr != 0; } @@ -504,6 +504,7 @@ public: {} int store_decimal(const my_decimal *); my_decimal *val_decimal(my_decimal *); + uint32 max_display_length() { return field_length; } }; @@ -532,7 +533,6 @@ public: void overflow(bool negative); bool zero_pack() const { return 0; } void sql_type(String &str) const; - uint32 max_display_length() { return field_length; } }; @@ -783,7 +783,6 @@ public: void sort_string(char *buff,uint length); uint32 pack_length() const { return sizeof(float); } void sql_type(String &str) const; - uint32 max_display_length() { return 24; } }; @@ -826,7 +825,6 @@ public: uint32 pack_length() const { return sizeof(double); } void sql_type(String &str) const; uint size_of() const { return sizeof(*this); } - uint32 max_display_length() { return 53; } }; @@ -1322,7 +1320,7 @@ public: uint max_packed_col_length(uint max_length); void free() { value.free(); } inline void clear_temporary() { bzero((char*) &value,sizeof(value)); } - friend void field_conv(Field *to,Field *from); + friend int field_conv(Field *to,Field *from); uint size_of() const { return sizeof(*this); } bool has_charset(void) const { return charset() == &my_charset_bin ? FALSE : TRUE; } @@ -1355,7 +1353,7 @@ public: int store_decimal(const my_decimal *); void get_key_image(char *buff,uint length,imagetype type); uint size_of() const { return sizeof(*this); } - int reset(void) { return !maybe_null(); } + int reset(void) { return !maybe_null() || Field_blob::reset(); } }; #endif /*HAVE_SPATIAL*/ diff --git a/sql/field_conv.cc b/sql/field_conv.cc index 2670de0387b..a718a402897 100644 --- a/sql/field_conv.cc +++ b/sql/field_conv.cc @@ -173,7 +173,7 @@ set_field_to_null_with_conversions(Field *field, bool no_conversions) if (field == field->table->next_number_field) { field->table->auto_increment_field_not_null= FALSE; - return 0; // field is set in handler.cc + return 0; // field is set in fill_record() } if (field->table->in_use->count_cuted_fields == CHECK_FIELD_WARN) { @@ -336,6 +336,13 @@ static void do_field_real(Copy_field *copy) } +static void do_field_decimal(Copy_field *copy) +{ + my_decimal value; + copy->to_field->store_decimal(copy->from_field->val_decimal(&value)); +} + + /* string copy for single byte characters set when to string is shorter than from string @@ -580,6 +587,8 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) if (to->real_type() == MYSQL_TYPE_BIT || from->real_type() == MYSQL_TYPE_BIT) return do_field_int; + if (to->result_type() == DECIMAL_RESULT) + return do_field_decimal; // Check if identical fields if (from->result_type() == STRING_RESULT) { @@ -678,7 +687,7 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) /* Simple quick field convert that is called on insert */ -void field_conv(Field *to,Field *from) +int field_conv(Field *to,Field *from) { if (to->real_type() == from->real_type() && !(to->type() == MYSQL_TYPE_BLOB && to->table->copy_blobs)) @@ -706,7 +715,7 @@ void field_conv(Field *to,Field *from) if (to->ptr != from->ptr) #endif memcpy(to->ptr,from->ptr,to->pack_length()); - return; + return 0; } } if (to->type() == MYSQL_TYPE_BLOB) @@ -722,8 +731,7 @@ void field_conv(Field *to,Field *from) from->real_type() != MYSQL_TYPE_STRING && from->real_type() != MYSQL_TYPE_VARCHAR)) blob->value.copy(); - blob->store(blob->value.ptr(),blob->value.length(),from->charset()); - return; + return blob->store(blob->value.ptr(),blob->value.length(),from->charset()); } if ((from->result_type() == STRING_RESULT && (to->result_type() == STRING_RESULT || @@ -740,15 +748,15 @@ void field_conv(Field *to,Field *from) end with \0. Can be replaced with .ptr() when we have our own string->double conversion. */ - to->store(result.c_ptr_quick(),result.length(),from->charset()); + return to->store(result.c_ptr_quick(),result.length(),from->charset()); } else if (from->result_type() == REAL_RESULT) - to->store(from->val_real()); + return to->store(from->val_real()); else if (from->result_type() == DECIMAL_RESULT) { my_decimal buff; - to->store_decimal(from->val_decimal(&buff)); + return to->store_decimal(from->val_decimal(&buff)); } else - to->store(from->val_int(), test(from->flags & UNSIGNED_FLAG)); + return to->store(from->val_int(), test(from->flags & UNSIGNED_FLAG)); } diff --git a/sql/filesort.cc b/sql/filesort.cc index 2f9a96472ca..a80e4a0fa54 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -1374,7 +1374,10 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length, } else { - switch ((sortorder->result_type=sortorder->item->result_type())) { + sortorder->result_type= sortorder->item->result_type(); + if (sortorder->item->result_as_longlong()) + sortorder->result_type= INT_RESULT; + switch (sortorder->result_type) { case STRING_RESULT: sortorder->length=sortorder->item->max_length; set_if_smaller(sortorder->length, thd->variables.max_sort_length); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index a65ca1c6736..c989ddc338d 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -259,17 +259,16 @@ static int ndb_to_mysql_error(const NdbError *ndberr) int execute_no_commit_ignore_no_key(ha_ndbcluster *h, NdbTransaction *trans) { - int res= trans->execute(NdbTransaction::NoCommit, - NdbOperation::AO_IgnoreError, - h->m_force_send); - if (res == -1) + if (trans->execute(NdbTransaction::NoCommit, + NdbOperation::AO_IgnoreError, + h->m_force_send) == -1) return -1; const NdbError &err= trans->getNdbError(); if (err.classification != NdbError::NoError && err.classification != NdbError::ConstraintViolation && err.classification != NdbError::NoDataFound) - return res; + return -1; return 0; } @@ -734,10 +733,9 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field, DBUG_PRINT("info", ("bit field")); DBUG_DUMP("value", (char*)&bits, pack_len); #ifdef WORDS_BIGENDIAN - if (pack_len < 5) - { - DBUG_RETURN(ndb_op->setValue(fieldnr, ((char*)&bits)+4) != 0); - } + /* store lsw first */ + bits = ((bits >> 32) & 0x00000000FFFFFFFF) + | ((bits << 32) & 0xFFFFFFFF00000000); #endif DBUG_RETURN(ndb_op->setValue(fieldnr, (char*)&bits) != 0); } @@ -1009,7 +1007,7 @@ int ha_ndbcluster::get_metadata(const char *path) DBUG_ASSERT(m_table == NULL); DBUG_ASSERT(m_table_info == NULL); - const void *data, *pack_data; + const void *data= NULL, *pack_data= NULL; uint length, pack_length; /* @@ -1105,7 +1103,7 @@ int ha_ndbcluster::create_indexes(Ndb *ndb, TABLE *tab) KEY* key_info= tab->key_info; const char **key_name= tab->s->keynames.type_names; DBUG_ENTER("ha_ndbcluster::create_indexes"); - + for (i= 0; i < tab->s->keys; i++, key_info++, key_name++) { index_name= *key_name; @@ -2765,10 +2763,12 @@ int ha_ndbcluster::write_row(byte *record) { Ndb *ndb= get_ndb(); Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1; +#ifndef DBUG_OFF char buff[22]; DBUG_PRINT("info", ("Trying to set next auto increment value to %s", llstr(next_val, buff))); +#endif Ndb_tuple_id_range_guard g(m_share); if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE) == -1) @@ -2821,7 +2821,7 @@ int ha_ndbcluster::key_cmp(uint keynr, const byte * old_row, int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) { - THD *thd= current_thd; + THD *thd= table->in_use; NdbTransaction *trans= m_active_trans; NdbScanOperation* cursor= m_active_cursor; NdbOperation *op; @@ -3008,7 +3008,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) int ha_ndbcluster::delete_row(const byte *record) { - THD *thd= current_thd; + THD *thd= table->in_use; NdbTransaction *trans= m_active_trans; NdbScanOperation* cursor= m_active_cursor; NdbOperation *op; @@ -3154,10 +3154,21 @@ void ndb_unpack_record(TABLE *table, NdbValue *value, else { DBUG_PRINT("info", ("bit field H'%.8X%.8X", - *(Uint32*) (*value).rec->aRef(), - *((Uint32*) (*value).rec->aRef()+1))); - field_bit->Field_bit::store((longlong) (*value).rec->u_64_value(), + *(Uint32 *)(*value).rec->aRef(), + *((Uint32 *)(*value).rec->aRef()+1))); +#ifdef WORDS_BIGENDIAN + /* lsw is stored first */ + Uint32 *buf= (Uint32 *)(*value).rec->aRef(); + field_bit->Field_bit::store((((longlong)*buf) + & 0x000000000FFFFFFFF) + | + ((((longlong)*(buf+1)) << 32) + & 0xFFFFFFFF00000000), TRUE); +#else + field_bit->Field_bit::store((longlong) + (*value).rec->u_64_value(), TRUE); +#endif } /* Move back internal field pointer to point to original @@ -3369,19 +3380,6 @@ int ha_ndbcluster::index_read(byte *buf, } -int ha_ndbcluster::index_read_idx(byte *buf, uint index_no, - const byte *key, uint key_len, - enum ha_rkey_function find_flag) -{ - statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status); - DBUG_ENTER("ha_ndbcluster::index_read_idx"); - DBUG_PRINT("enter", ("index_no: %u, key_len: %u", index_no, key_len)); - close_scan(); - index_init(index_no, 0); - DBUG_RETURN(index_read(buf, key, key_len, find_flag)); -} - - int ha_ndbcluster::index_next(byte *buf) { DBUG_ENTER("ha_ndbcluster::index_next"); @@ -3548,10 +3546,10 @@ int ha_ndbcluster::close_scan() m_multi_cursor= 0; if (!m_active_cursor && !m_multi_cursor) - DBUG_RETURN(1); + DBUG_RETURN(0); NdbScanOperation *cursor= m_active_cursor ? m_active_cursor : m_multi_cursor; - + if (m_lock_tuple) { /* @@ -3811,7 +3809,7 @@ int ha_ndbcluster::info(uint flag) if (flag & HA_STATUS_AUTO) { DBUG_PRINT("info", ("HA_STATUS_AUTO")); - if (m_table) + if (m_table && table->found_next_number_field) { Ndb *ndb= get_ndb(); Ndb_tuple_id_range_guard g(m_share); @@ -3875,7 +3873,8 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) break; case HA_EXTRA_WRITE_CAN_REPLACE: DBUG_PRINT("info", ("HA_EXTRA_WRITE_CAN_REPLACE")); - if (!m_has_unique_index) + if (!m_has_unique_index || + current_thd->slave_thread) /* always set if slave, quick fix for bug 27378 */ { DBUG_PRINT("info", ("Turning ON use of write instead of insert")); m_use_write= TRUE; @@ -3905,6 +3904,12 @@ int ha_ndbcluster::reset() */ if (m_part_info) bitmap_set_all(&m_part_info->used_partitions); + + /* reset flags set by extra calls */ + m_ignore_dup_key= FALSE; + m_use_write= FALSE; + m_ignore_no_key= FALSE; + DBUG_RETURN(0); } @@ -4000,7 +4005,7 @@ int ha_ndbcluster::end_bulk_insert() } else { - int res= trans->restart(); + IF_DBUG(int res=) trans->restart(); DBUG_ASSERT(res == 0); } } @@ -4718,7 +4723,9 @@ static int create_ndb_column(NDBCOL &col, // Set autoincrement if (field->flags & AUTO_INCREMENT_FLAG) { +#ifndef DBUG_OFF char buff[22]; +#endif col.setAutoIncrement(TRUE); ulonglong value= info->auto_increment_value ? info->auto_increment_value : (ulonglong) 1; @@ -4742,10 +4749,11 @@ int ha_ndbcluster::create(const char *name, NDBTAB tab; NDBCOL col; uint pack_length, length, i, pk_length= 0; - const void *data, *pack_data; + const void *data= NULL, *pack_data= NULL; bool create_from_engine= (create_info->table_options & HA_OPTION_CREATE_FROM_ENGINE); bool is_truncate= (thd->lex->sql_command == SQLCOM_TRUNCATE); char tablespace[FN_LEN]; + NdbDictionary::Table::SingleUserMode single_user_mode= NdbDictionary::Table::SingleUserModeLocked; DBUG_ENTER("ha_ndbcluster::create"); DBUG_PRINT("enter", ("name: %s", name)); @@ -4797,19 +4805,23 @@ int ha_ndbcluster::create(const char *name, schema distribution table is setup ( unless it is a creation of the schema dist table itself ) */ - if (!ndb_schema_share && - !(strcmp(m_dbname, NDB_REP_DB) == 0 && - strcmp(m_tabname, NDB_SCHEMA_TABLE) == 0)) + if (!ndb_schema_share) { - DBUG_PRINT("info", ("Schema distribution table not setup")); - DBUG_RETURN(HA_ERR_NO_CONNECTION); + if (!(strcmp(m_dbname, NDB_REP_DB) == 0 && + strcmp(m_tabname, NDB_SCHEMA_TABLE) == 0)) + { + DBUG_PRINT("info", ("Schema distribution table not setup")); + DBUG_RETURN(HA_ERR_NO_CONNECTION); + } + single_user_mode = NdbDictionary::Table::SingleUserModeReadWrite; } #endif /* HAVE_NDB_BINLOG */ DBUG_PRINT("table", ("name: %s", m_tabname)); tab.setName(m_tabname); tab.setLogging(!(create_info->options & HA_LEX_CREATE_TMP_TABLE)); - + tab.setSingleUserMode(single_user_mode); + // Save frm data for this table if (readfrm(name, &data, &length)) DBUG_RETURN(1); @@ -4832,7 +4844,8 @@ int ha_ndbcluster::create(const char *name, if ((my_errno= create_ndb_column(col, field, create_info))) DBUG_RETURN(my_errno); - if (create_info->storage_media == HA_SM_DISK) + if (create_info->storage_media == HA_SM_DISK || + create_info->tablespace) col.setStorageType(NdbDictionary::Column::StorageTypeDisk); else col.setStorageType(NdbDictionary::Column::StorageTypeMemory); @@ -5074,7 +5087,7 @@ int ha_ndbcluster::create_handler_files(const char *file, { Ndb* ndb; const NDBTAB *tab; - const void *data, *pack_data; + const void *data= NULL, *pack_data= NULL; uint length, pack_length; int error= 0; @@ -5389,7 +5402,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) { DBUG_PRINT("NDB_SHARE", ("%s temporary use_count: %u", share->key, share->use_count)); - int r= rename_share(share, to); + IF_DBUG(int r=) rename_share(share, to); DBUG_ASSERT(r == 0); } #endif @@ -5410,8 +5423,8 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) #ifdef HAVE_NDB_BINLOG if (share) { - int r= rename_share(share, from); - DBUG_ASSERT(r == 0); + IF_DBUG(int ret=) rename_share(share, from); + DBUG_ASSERT(ret == 0); /* ndb_share reference temporary free */ DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u", share->key, share->use_count)); @@ -5572,6 +5585,7 @@ retry_temporary_error1: { ndb_table_id= h->m_table->getObjectId(); ndb_table_version= h->m_table->getObjectVersion(); + DBUG_PRINT("info", ("success 1")); } else { @@ -5585,6 +5599,7 @@ retry_temporary_error1: break; } res= ndb_to_mysql_error(&dict->getNdbError()); + DBUG_PRINT("info", ("error(1) %u", res)); } h->release_metadata(thd, ndb); } @@ -5601,6 +5616,8 @@ retry_temporary_error1: { ndb_table_id= ndbtab_g.get_table()->getObjectId(); ndb_table_version= ndbtab_g.get_table()->getObjectVersion(); + DBUG_PRINT("info", ("success 2")); + break; } else { @@ -5620,8 +5637,8 @@ retry_temporary_error1: } } } - else - res= ndb_to_mysql_error(&dict->getNdbError()); + res= ndb_to_mysql_error(&dict->getNdbError()); + DBUG_PRINT("info", ("error(2) %u", res)); break; } } @@ -6017,7 +6034,7 @@ void ha_ndbcluster::set_part_info(partition_info *part_info) int ha_ndbcluster::close(void) { DBUG_ENTER("close"); - THD *thd= current_thd; + THD *thd= table->in_use; Ndb *ndb= thd ? check_ndb_in_thd(thd) : g_ndb; /* ndb_share reference handler free */ DBUG_PRINT("NDB_SHARE", ("%s handler free use_count: %u", @@ -6117,7 +6134,7 @@ int ndbcluster_discover(handlerton *hton, THD* thd, const char *db, int error= 0; NdbError ndb_error; uint len; - const void* data; + const void* data= NULL; Ndb* ndb; char key[FN_REFLEN]; DBUG_ENTER("ndbcluster_discover"); @@ -6153,9 +6170,16 @@ int ndbcluster_discover(handlerton *hton, THD* thd, const char *db, { const NdbError err= dict->getNdbError(); if (err.code == 709 || err.code == 723) + { error= -1; + DBUG_PRINT("info", ("ndb_error.code: %u", ndb_error.code)); + } else + { + error= -1; ndb_error= err; + DBUG_PRINT("info", ("ndb_error.code: %u", ndb_error.code)); + } goto err; } DBUG_PRINT("info", ("Found table %s", tab->getName())); @@ -6189,6 +6213,7 @@ int ndbcluster_discover(handlerton *hton, THD* thd, const char *db, DBUG_RETURN(0); err: + my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR)); if (share) { /* ndb_share reference temporary free */ @@ -6342,7 +6367,7 @@ int ndb_create_table_from_engine(THD *thd, const char *db, LEX *old_lex= thd->lex, newlex; thd->lex= &newlex; newlex.current_select= NULL; - lex_start(thd, (const uchar*) "", 0); + lex_start(thd, "", 0); int res= ha_create_table_from_engine(thd, db, table_name); thd->lex= old_lex; return res; @@ -7269,7 +7294,9 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname, if (share->commit_count != 0) { *commit_count= share->commit_count; +#ifndef DBUG_OFF char buff[22]; +#endif DBUG_PRINT("info", ("Getting commit_count: %s from share", llstr(share->commit_count, buff))); pthread_mutex_unlock(&share->mutex); @@ -7305,7 +7332,9 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname, pthread_mutex_lock(&share->mutex); if (share->commit_count_lock == lock) { +#ifndef DBUG_OFF char buff[22]; +#endif DBUG_PRINT("info", ("Setting commit_count to %s", llstr(stat.commit_count, buff))); share->commit_count= stat.commit_count; @@ -7364,7 +7393,9 @@ ndbcluster_cache_retrieval_allowed(THD *thd, bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); char *dbname= full_name; char *tabname= dbname+strlen(dbname)+1; +#ifndef DBUG_OFF char buff[22], buff2[22]; +#endif DBUG_ENTER("ndbcluster_cache_retrieval_allowed"); DBUG_PRINT("enter", ("dbname: %s, tabname: %s, is_autocommit: %d", dbname, tabname, is_autocommit)); @@ -7431,7 +7462,9 @@ ha_ndbcluster::register_query_cache_table(THD *thd, ulonglong *engine_data) { Uint64 commit_count; +#ifndef DBUG_OFF char buff[22]; +#endif bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); DBUG_ENTER("ha_ndbcluster::register_query_cache_table"); DBUG_PRINT("enter",("dbname: %s, tabname: %s, is_autocommit: %d", @@ -7602,7 +7635,9 @@ int handle_trailing_share(NDB_SHARE *share) /* Ndb share has not been released as it should */ +#ifdef NOT_YET DBUG_ASSERT(FALSE); +#endif /* This is probably an error. We can however save the situation @@ -7876,7 +7911,9 @@ ndb_get_table_statistics(ha_ndbcluster* file, bool report_error, Ndb* ndb, const int retries= 10; int reterr= 0; int retry_sleep= 30 * 1000; /* 30 milliseconds */ +#ifndef DBUG_OFF char buff[22], buff2[22], buff3[22], buff4[22]; +#endif DBUG_ENTER("ndb_get_table_statistics"); DBUG_PRINT("enter", ("table: %s", ndbtab->getName())); @@ -8534,7 +8571,6 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) goto ndb_util_thread_fail; thd->init_for_queries(); thd->version=refresh_version; - thd->set_time(); thd->main_security_ctx.host_or_ip= ""; thd->client_capabilities = 0; my_net_init(&thd->net, 0); @@ -8694,7 +8730,9 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) ndb_get_table_statistics(NULL, FALSE, ndb, ndbtab_g.get_table(), &stat) == 0) { +#ifndef DBUG_OFF char buff[22], buff2[22]; +#endif DBUG_PRINT("info", ("Table: %s commit_count: %s rows: %s", share->key, @@ -9052,7 +9090,7 @@ void ndb_serialize_cond(const Item *item, void *arg) Check that the field is part of the table of the handler instance and that we expect a field with of this result type. */ - if (context->table == field->table) + if (context->table->s == field->table->s) { const NDBTAB *tab= (const NDBTAB *) context->ndb_table; DBUG_PRINT("info", ("FIELD_ITEM")); @@ -9546,8 +9584,8 @@ void ndb_serialize_cond(const Item *item, void *arg) DBUG_PRINT("info", ("INT_ITEM")); if (context->expecting(Item::INT_ITEM)) { - Item_int *int_item= (Item_int *) item; - DBUG_PRINT("info", ("value %ld", (long) int_item->value)); + DBUG_PRINT("info", ("value %ld", + (long) ((Item_int*) item)->value)); NDB_ITEM_QUALIFICATION q; q.value_type= Item::INT_ITEM; curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); @@ -9573,8 +9611,7 @@ void ndb_serialize_cond(const Item *item, void *arg) DBUG_PRINT("info", ("REAL_ITEM")); if (context->expecting(Item::REAL_ITEM)) { - Item_float *float_item= (Item_float *) item; - DBUG_PRINT("info", ("value %f", float_item->value)); + DBUG_PRINT("info", ("value %f", ((Item_float*) item)->value)); NDB_ITEM_QUALIFICATION q; q.value_type= Item::REAL_ITEM; curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); @@ -9621,8 +9658,8 @@ void ndb_serialize_cond(const Item *item, void *arg) DBUG_PRINT("info", ("DECIMAL_ITEM")); if (context->expecting(Item::DECIMAL_ITEM)) { - Item_decimal *decimal_item= (Item_decimal *) item; - DBUG_PRINT("info", ("value %f", decimal_item->val_real())); + DBUG_PRINT("info", ("value %f", + ((Item_decimal*) item)->val_real())); NDB_ITEM_QUALIFICATION q; q.value_type= Item::DECIMAL_ITEM; curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); @@ -10607,10 +10644,23 @@ bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *create_info, int pk= 0; int ai= 0; + + if (create_info->tablespace) + create_info->storage_media = HA_SM_DISK; + else + create_info->storage_media = HA_SM_MEMORY; + for (i= 0; i < table->s->fields; i++) { Field *field= table->field[i]; const NDBCOL *col= tab->getColumn(i); + if (col->getStorageType() == NDB_STORAGETYPE_MEMORY && create_info->storage_media != HA_SM_MEMORY || + col->getStorageType() == NDB_STORAGETYPE_DISK && create_info->storage_media != HA_SM_DISK) + { + DBUG_PRINT("info", ("Column storage media is changed")); + DBUG_RETURN(COMPATIBLE_DATA_NO); + } + if (field->flags & FIELD_IS_RENAMED) { DBUG_PRINT("info", ("Field has been renamed, copy table")); diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 63665fde0f8..6cc0e423f2f 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -642,8 +642,6 @@ class ha_ndbcluster: public handler int index_end(); int index_read(byte *buf, const byte *key, uint key_len, enum ha_rkey_function find_flag); - int index_read_idx(byte *buf, uint index, const byte *key, uint key_len, - enum ha_rkey_function find_flag); int index_next(byte *buf); int index_prev(byte *buf); int index_first(byte *buf); diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index ea5a2deaeb3..4dc75be79e9 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -500,7 +500,7 @@ static int ndbcluster_reset_logs(THD *thd) static int ndbcluster_binlog_index_purge_file(THD *thd, const char *file) { - if (!ndb_binlog_running) + if (!ndb_binlog_running || thd->slave_thread) return 0; DBUG_ENTER("ndbcluster_binlog_index_purge_file"); @@ -729,6 +729,9 @@ static int ndbcluster_create_ndb_apply_status_table(THD *thd) NDB_REP_DB "." NDB_APPLY_TABLE " ( server_id INT UNSIGNED NOT NULL," " epoch BIGINT UNSIGNED NOT NULL, " + " log_name VARCHAR(255) BINARY NOT NULL, " + " start_pos BIGINT UNSIGNED NOT NULL, " + " end_pos BIGINT UNSIGNED NOT NULL, " " PRIMARY KEY USING HASH (server_id) ) ENGINE=NDB"); run_query(thd, buf, end, TRUE, TRUE); @@ -1829,15 +1832,15 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, // fall through case SOT_CREATE_TABLE: pthread_mutex_lock(&LOCK_open); - if (ndbcluster_check_if_local_table(schema->db, schema->name)) - { - DBUG_PRINT("info", ("NDB binlog: Skipping locally defined table '%s.%s'", - schema->db, schema->name)); + if (ndbcluster_check_if_local_table(schema->db, schema->name)) + { + DBUG_PRINT("info", ("NDB binlog: Skipping locally defined table '%s.%s'", + schema->db, schema->name)); sql_print_error("NDB binlog: Skipping locally defined table '%s.%s' from " "binlog schema event '%s' from node %d. ", schema->db, schema->name, schema->query, schema->node_id); - } + } else if (ndb_create_table_from_engine(thd, schema->db, schema->name)) { sql_print_error("NDB binlog: Could not discover table '%s.%s' from " @@ -1854,27 +1857,27 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, log_query= 1; break; case SOT_DROP_DB: - /* Drop the database locally if it only contains ndb tables */ - if (! ndbcluster_check_if_local_tables_in_db(thd, schema->db)) - { - run_query(thd, schema->query, - schema->query + schema->query_length, - TRUE, /* print error */ - TRUE); /* don't binlog the query */ - /* binlog dropping database after any table operations */ - post_epoch_log_list->push_back(schema, mem_root); - /* acknowledge this query _after_ epoch completion */ - post_epoch_unlock= 1; - } - else - { - /* Database contained local tables, leave it */ - sql_print_error("NDB binlog: Skipping drop database '%s' since it contained local tables " + /* Drop the database locally if it only contains ndb tables */ + if (! ndbcluster_check_if_local_tables_in_db(thd, schema->db)) + { + run_query(thd, schema->query, + schema->query + schema->query_length, + TRUE, /* print error */ + TRUE); /* don't binlog the query */ + /* binlog dropping database after any table operations */ + post_epoch_log_list->push_back(schema, mem_root); + /* acknowledge this query _after_ epoch completion */ + post_epoch_unlock= 1; + } + else + { + /* Database contained local tables, leave it */ + sql_print_error("NDB binlog: Skipping drop database '%s' since it contained local tables " "binlog schema event '%s' from node %d. ", schema->db, schema->query, schema->node_id); - log_query= 1; - } + log_query= 1; + } break; case SOT_CREATE_DB: /* fall through */ @@ -2121,18 +2124,18 @@ ndb_binlog_thread_handle_schema_event_post_epoch(THD *thd, share= 0; } pthread_mutex_lock(&LOCK_open); - if (ndbcluster_check_if_local_table(schema->db, schema->name)) - { - DBUG_PRINT("info", ("NDB binlog: Skipping locally defined table '%s.%s'", - schema->db, schema->name)); + if (ndbcluster_check_if_local_table(schema->db, schema->name)) + { + DBUG_PRINT("info", ("NDB binlog: Skipping locally defined table '%s.%s'", + schema->db, schema->name)); sql_print_error("NDB binlog: Skipping locally defined table '%s.%s' from " "binlog schema event '%s' from node %d. ", schema->db, schema->name, schema->query, schema->node_id); - } + } else if (ndb_create_table_from_engine(thd, schema->db, schema->name)) - { - sql_print_error("NDB binlog: Could not discover table '%s.%s' from " + { + sql_print_error("NDB binlog: Could not discover table '%s.%s' from " "binlog schema event '%s' from node %d. my_errno: %d", schema->db, schema->name, schema->query, schema->node_id, my_errno); @@ -2260,7 +2263,7 @@ int ndb_add_ndb_binlog_index(THD *thd, void *_row) { TABLE_LIST *p_binlog_tables= &binlog_tables; close_tables_for_reopen(thd, &p_binlog_tables); - ndb_binlog_index= 0; + ndb_binlog_index= 0; continue; } sql_print_error("NDB Binlog: Unable to lock table ndb_binlog_index"); @@ -3225,15 +3228,17 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp, if (share->flags & NSF_BLOB_FLAG) { my_ptrdiff_t ptrdiff= 0; - int ret= get_ndb_blobs_value(table, share->ndb_value[0], - blobs_buffer[0], blobs_buffer_size[0], - ptrdiff); + IF_DBUG(int ret =) get_ndb_blobs_value(table, share->ndb_value[0], + blobs_buffer[0], + blobs_buffer_size[0], + ptrdiff); DBUG_ASSERT(ret == 0); } ndb_unpack_record(table, share->ndb_value[0], &b, table->record[0]); - int ret= trans.write_row(::server_id, - injector::transaction::table(table, TRUE), - &b, n_fields, table->record[0]); + IF_DBUG(int ret=) trans.write_row(::server_id, + injector::transaction::table(table, + TRUE), + &b, n_fields, table->record[0]); DBUG_ASSERT(ret == 0); } break; @@ -3251,27 +3256,29 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp, n= 0; /* use the primary key only as it save time and space and it is the only thing needed to log the delete - */ + */ else n= 1; /* we use the before values since we don't have a primary key since the mysql server does not handle the hidden primary key - */ + */ if (share->flags & NSF_BLOB_FLAG) { my_ptrdiff_t ptrdiff= table->record[n] - table->record[0]; - int ret= get_ndb_blobs_value(table, share->ndb_value[n], - blobs_buffer[n], blobs_buffer_size[n], - ptrdiff); + IF_DBUG(int ret =) get_ndb_blobs_value(table, share->ndb_value[n], + blobs_buffer[n], + blobs_buffer_size[n], + ptrdiff); DBUG_ASSERT(ret == 0); } ndb_unpack_record(table, share->ndb_value[n], &b, table->record[n]); DBUG_EXECUTE("info", print_records(table, table->record[n]);); - int ret= trans.delete_row(::server_id, - injector::transaction::table(table, TRUE), - &b, n_fields, table->record[n]); + IF_DBUG(int ret =) trans.delete_row(::server_id, + injector::transaction::table(table, + TRUE), + &b, n_fields, table->record[n]); DBUG_ASSERT(ret == 0); } break; @@ -3283,9 +3290,10 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp, if (share->flags & NSF_BLOB_FLAG) { my_ptrdiff_t ptrdiff= 0; - int ret= get_ndb_blobs_value(table, share->ndb_value[0], - blobs_buffer[0], blobs_buffer_size[0], - ptrdiff); + IF_DBUG(int ret =) get_ndb_blobs_value(table, share->ndb_value[0], + blobs_buffer[0], + blobs_buffer_size[0], + ptrdiff); DBUG_ASSERT(ret == 0); } ndb_unpack_record(table, share->ndb_value[0], @@ -3296,7 +3304,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp, /* since table has a primary key, we can do a write using only after values - */ + */ trans.write_row(::server_id, injector::transaction::table(table, TRUE), &b, n_fields, table->record[0]);// after values } @@ -3305,22 +3313,24 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp, /* mysql server cannot handle the ndb hidden key and therefore needs the before image as well - */ + */ if (share->flags & NSF_BLOB_FLAG) { my_ptrdiff_t ptrdiff= table->record[1] - table->record[0]; - int ret= get_ndb_blobs_value(table, share->ndb_value[1], - blobs_buffer[1], blobs_buffer_size[1], - ptrdiff); + IF_DBUG(int ret =) get_ndb_blobs_value(table, share->ndb_value[1], + blobs_buffer[1], + blobs_buffer_size[1], + ptrdiff); DBUG_ASSERT(ret == 0); } ndb_unpack_record(table, share->ndb_value[1], &b, table->record[1]); DBUG_EXECUTE("info", print_records(table, table->record[1]);); - int ret= trans.update_row(::server_id, - injector::transaction::table(table, TRUE), - &b, n_fields, - table->record[1], // before values - table->record[0]);// after values + IF_DBUG(int ret =) trans.update_row(::server_id, + injector::transaction::table(table, + TRUE), + &b, n_fields, + table->record[1], // before values + table->record[0]);// after values DBUG_ASSERT(ret == 0); } } @@ -3496,7 +3506,6 @@ pthread_handler_t ndb_binlog_thread_func(void *arg) thd->command= COM_DAEMON; thd->system_thread= SYSTEM_THREAD_NDBCLUSTER_BINLOG; thd->version= refresh_version; - thd->set_time(); thd->main_security_ctx.host_or_ip= ""; thd->client_capabilities= 0; my_net_init(&thd->net, 0); @@ -3850,7 +3859,9 @@ restart: continue; } TABLE *table= share->table; +#ifndef DBUG_OFF const LEX_STRING &name= table->s->table_name; +#endif if ((event_types & (NdbDictionary::Event::TE_INSERT | NdbDictionary::Event::TE_UPDATE | NdbDictionary::Event::TE_DELETE)) == 0) @@ -3867,7 +3878,7 @@ restart: } DBUG_PRINT("info", ("use_table: %.*s", name.length, name.str)); injector::transaction::table tbl(table, TRUE); - int ret= trans.use_table(::server_id, tbl); + IF_DBUG(int ret=) trans.use_table(::server_id, tbl); DBUG_ASSERT(ret == 0); } } @@ -3877,10 +3888,12 @@ restart: { TABLE *table= ndb_apply_status_share->table; - const LEX_STRING& name=table->s->table_name; +#ifndef DBUG_OFF + const LEX_STRING& name= table->s->table_name; DBUG_PRINT("info", ("use_table: %.*s", name.length, name.str)); +#endif injector::transaction::table tbl(table, TRUE); - int ret= trans.use_table(::server_id, tbl); + IF_DBUG(int ret=) trans.use_table(::server_id, tbl); DBUG_ASSERT(ret == 0); // Set all fields non-null. @@ -3888,6 +3901,9 @@ restart: bzero(table->record[0], table->s->null_bytes); table->field[0]->store((longlong)::server_id); table->field[1]->store((longlong)gci); + table->field[2]->store("", 0, &my_charset_bin); + table->field[3]->store((longlong)0); + table->field[4]->store((longlong)0); trans.write_row(::server_id, injector::transaction::table(table, TRUE), &table->s->all_set, table->s->fields, @@ -3945,7 +3961,7 @@ restart: else { // set injector_ndb database/schema from table internal name - int ret= + IF_DBUG(int ret=) i_ndb->setDatabaseAndSchemaName(pOp->getEvent()->getTable()); DBUG_ASSERT(ret == 0); ndb_binlog_thread_handle_non_data_event(thd, i_ndb, pOp, row); @@ -3979,7 +3995,7 @@ restart: /* note! pOp is not referring to an event in the next epoch or is == 0 - */ + */ #ifdef RUN_NDB_BINLOG_TIMER write_timer.stop(); #endif diff --git a/sql/ha_ndbcluster_tables.h b/sql/ha_ndbcluster_tables.h index 9f7b9146d91..c6bc8f577f8 100644 --- a/sql/ha_ndbcluster_tables.h +++ b/sql/ha_ndbcluster_tables.h @@ -15,7 +15,9 @@ */ #define NDB_REP_DB "mysql" +#define OLD_NDB_REP_DB "cluster" #define NDB_REP_TABLE "ndb_binlog_index" #define NDB_APPLY_TABLE "ndb_apply_status" #define OLD_NDB_APPLY_TABLE "apply_status" #define NDB_SCHEMA_TABLE "ndb_schema" +#define OLD_NDB_SCHEMA_TABLE "schema" diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 87d24207dcd..d3979fa0718 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -158,7 +158,7 @@ static uint alter_table_flags(uint flags __attribute__((unused))) ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) :handler(hton, share), m_part_info(NULL), m_create_handler(FALSE), - m_is_sub_partitioned(0) + m_is_sub_partitioned(0), is_clone(FALSE) { DBUG_ENTER("ha_partition::ha_partition(table)"); init_handler_variables(); @@ -180,8 +180,7 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) ha_partition::ha_partition(handlerton *hton, partition_info *part_info) :handler(hton, NULL), m_part_info(part_info), m_create_handler(TRUE), - m_is_sub_partitioned(m_part_info->is_sub_partitioned()) - + m_is_sub_partitioned(m_part_info->is_sub_partitioned()), is_clone(FALSE) { DBUG_ENTER("ha_partition::ha_partition(part_info)"); init_handler_variables(); @@ -2262,9 +2261,12 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) } /* Initialise the bitmap we use to determine what partitions are used */ - if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE)) - DBUG_RETURN(1); - bitmap_set_all(&(m_part_info->used_partitions)); + if (!is_clone) + { + if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE)) + DBUG_RETURN(1); + bitmap_set_all(&(m_part_info->used_partitions)); + } /* Recalculate table flags as they may change after open */ m_table_flags= m_file[0]->table_flags(); @@ -2320,6 +2322,19 @@ err_handler: DBUG_RETURN(error); } +handler *ha_partition::clone(MEM_ROOT *mem_root) +{ + handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type); + ((ha_partition*)new_handler)->m_part_info= m_part_info; + ((ha_partition*)new_handler)->is_clone= TRUE; + if (new_handler && !new_handler->ha_open(table, + table->s->normalized_path.str, + table->db_stat, + HA_OPEN_IGNORE_IF_LOCKED)) + return new_handler; + return NULL; +} + /* Close handler object @@ -2346,7 +2361,8 @@ int ha_partition::close(void) DBUG_ENTER("ha_partition::close"); delete_queue(&m_queue); - bitmap_free(&(m_part_info->used_partitions)); + if (!is_clone) + bitmap_free(&(m_part_info->used_partitions)); file= m_file; repeat: @@ -3320,13 +3336,14 @@ int ha_partition::index_end() */ int ha_partition::index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag) + key_part_map keypart_map, + enum ha_rkey_function find_flag) { DBUG_ENTER("ha_partition::index_read"); end_range= 0; m_index_scan_type= partition_index_read; - DBUG_RETURN(common_index_read(buf, key, key_len, find_flag)); + DBUG_RETURN(common_index_read(buf, key, keypart_map, find_flag)); } @@ -3339,14 +3356,17 @@ int ha_partition::index_read(byte * buf, const byte * key, see index_read for rest */ -int ha_partition::common_index_read(byte *buf, const byte *key, uint key_len, +int ha_partition::common_index_read(byte *buf, const byte *key, + key_part_map keypart_map, enum ha_rkey_function find_flag) { int error; bool reverse_order= FALSE; + uint key_len= calculate_key_len(table, active_index, key, keypart_map); DBUG_ENTER("ha_partition::common_index_read"); memcpy((void*)m_start_key.key, key, key_len); + m_start_key.keypart_map= keypart_map; m_start_key.length= key_len; m_start_key.flag= find_flag; @@ -3475,33 +3495,6 @@ int ha_partition::common_first_last(byte *buf) /* - Perform index read using index where always only one row is returned - - SYNOPSIS - index_read_idx() - see index_read for rest of parameters and return values - - DESCRIPTION - Positions an index cursor to the index specified in key. Fetches the - row if any. This is only used to read whole keys. - TODO: Optimise this code to avoid index_init and index_end -*/ - -int ha_partition::index_read_idx(byte * buf, uint index, const byte * key, - uint key_len, - enum ha_rkey_function find_flag) -{ - int res; - DBUG_ENTER("ha_partition::index_read_idx"); - - index_init(index, 0); - res= index_read(buf, key, key_len, find_flag); - index_end(); - DBUG_RETURN(res); -} - - -/* Read last using key SYNOPSIS @@ -3519,14 +3512,15 @@ int ha_partition::index_read_idx(byte * buf, uint index, const byte * key, Can only be used on indexes supporting HA_READ_ORDER */ -int ha_partition::index_read_last(byte *buf, const byte *key, uint keylen) +int ha_partition::index_read_last(byte *buf, const byte *key, + key_part_map keypart_map) { DBUG_ENTER("ha_partition::index_read_last"); m_ordered= TRUE; // Safety measure end_range= 0; m_index_scan_type= partition_index_read_last; - DBUG_RETURN(common_index_read(buf, key, keylen, HA_READ_PREFIX_LAST)); + DBUG_RETURN(common_index_read(buf, key, keypart_map, HA_READ_PREFIX_LAST)); } @@ -3672,7 +3666,7 @@ int ha_partition::read_range_first(const key_range *start_key, m_index_scan_type= partition_index_read; error= common_index_read(m_rec0, start_key->key, - start_key->length, start_key->flag); + start_key->keypart_map, start_key->flag); } DBUG_RETURN(error); } @@ -3871,7 +3865,7 @@ int ha_partition::handle_unordered_scan_next_partition(byte * buf) case partition_index_read: DBUG_PRINT("info", ("index_read on partition %d", i)); error= file->index_read(buf, m_start_key.key, - m_start_key.length, + m_start_key.keypart_map, m_start_key.flag); break; case partition_index_first: @@ -3963,7 +3957,7 @@ int ha_partition::handle_ordered_index_scan(byte *buf, bool reverse_order) case partition_index_read: error= file->index_read(rec_buf_ptr, m_start_key.key, - m_start_key.length, + m_start_key.keypart_map, m_start_key.flag); break; case partition_index_first: @@ -3977,7 +3971,7 @@ int ha_partition::handle_ordered_index_scan(byte *buf, bool reverse_order) case partition_index_read_last: error= file->index_read_last(rec_buf_ptr, m_start_key.key, - m_start_key.length); + m_start_key.keypart_map); reverse_order= TRUE; break; default: @@ -4466,7 +4460,7 @@ void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info, 2) It is called from close_thread_table which in turn is called from close_thread_tables except in the case where the tables are locked in which case ha_commit_stmt is called instead. - It is only called from here if flush_version hasn't changed and the + It is only called from here if refresh_version hasn't changed and the table is not an old table when calling close_thread_table. close_thread_tables is called from many places as a general clean up function after completing a query. @@ -4487,8 +4481,9 @@ void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info, The handler will set HA_KEYREAD_ONLY in its table flags to indicate this feature is supported. HA_EXTRA_FLUSH: - Indication to flush tables to disk, called at close_thread_table to + Indication to flush tables to disk, is supposed to be used to ensure disk based tables are flushed at end of query execution. + Currently is never used. 2) Parameters used by some non-MyISAM handlers ---------------------------------------------- diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 4fdf325fa06..a081e4bb472 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -132,7 +132,13 @@ private: THR_LOCK_DATA lock; /* MySQL lock */ PARTITION_SHARE *share; /* Shared lock info */ + /* + TRUE <=> this object was created with ha_partition::clone and doesn't + "own" the m_part_info structure. + */ + bool is_clone; public: + handler *clone(MEM_ROOT *mem_root); virtual void set_part_info(partition_info *part_info) { m_part_info= part_info; @@ -378,9 +384,8 @@ public: any end processing needed. */ virtual int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - virtual int index_read_idx(byte * buf, uint idx, const byte * key, - uint key_len, enum ha_rkey_function find_flag); + key_part_map keypart_map, + enum ha_rkey_function find_flag); virtual int index_init(uint idx, bool sorted); virtual int index_end(); @@ -393,7 +398,8 @@ public: virtual int index_first(byte * buf); virtual int index_last(byte * buf); virtual int index_next_same(byte * buf, const byte * key, uint keylen); - virtual int index_read_last(byte * buf, const byte * key, uint keylen); + virtual int index_read_last(byte * buf, const byte * key, + key_part_map keypart_map); /* read_first_row is virtual method but is only implemented by @@ -419,7 +425,8 @@ public: private: int common_index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag); + key_part_map keypart_map, + enum ha_rkey_function find_flag); int common_first_last(byte * buf); int partition_scan_set_up(byte * buf, bool idx_read_flag); int handle_unordered_next(byte * buf, bool next_same); diff --git a/sql/handler.cc b/sql/handler.cc index 8e7206aade9..617bf9ee378 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -845,7 +845,7 @@ int ha_rollback_trans(THD *thd, bool all) message in the error log, so we don't send it. */ if (is_real_trans && (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) && - !thd->slave_thread) + !thd->slave_thread && thd->killed != THD::KILL_CONNECTION) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARNING_NOT_COMPLETE_ROLLBACK, ER(ER_WARNING_NOT_COMPLETE_ROLLBACK)); @@ -1143,9 +1143,9 @@ bool mysql_xa_recover(THD *thd) XID_STATE *xs; DBUG_ENTER("mysql_xa_recover"); - field_list.push_back(new Item_int("formatID",0,11)); - field_list.push_back(new Item_int("gtrid_length",0,11)); - field_list.push_back(new Item_int("bqual_length",0,11)); + field_list.push_back(new Item_int("formatID", 0, MY_INT32_NUM_DECIMAL_DIGITS)); + field_list.push_back(new Item_int("gtrid_length", 0, MY_INT32_NUM_DECIMAL_DIGITS)); + field_list.push_back(new Item_int("bqual_length", 0, MY_INT32_NUM_DECIMAL_DIGITS)); field_list.push_back(new Item_empty_string("data",XIDDATASIZE)); if (protocol->send_fields(&field_list, @@ -1753,7 +1753,6 @@ int handler::update_auto_increment() bool append= FALSE; THD *thd= table->in_use; struct system_variables *variables= &thd->variables; - bool auto_increment_field_not_null; DBUG_ENTER("handler::update_auto_increment"); /* @@ -1761,11 +1760,9 @@ int handler::update_auto_increment() than the interval, but not smaller. */ DBUG_ASSERT(next_insert_id >= auto_inc_interval_for_cur_row.minimum()); - auto_increment_field_not_null= table->auto_increment_field_not_null; - table->auto_increment_field_not_null= FALSE; // to reset for next row if ((nr= table->next_number_field->val_int()) != 0 || - auto_increment_field_not_null && + table->auto_increment_field_not_null && thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO) { /* @@ -1843,7 +1840,7 @@ int handler::update_auto_increment() nr= compute_next_insert_id(nr-1, variables); } - if (table->s->next_number_key_offset == 0) + if (table->s->next_number_keypart == 0) { /* We must defer the appending until "nr" has been possibly truncated */ append= TRUE; @@ -1963,7 +1960,7 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment, table->read_set); column_bitmaps_signal(); index_init(table->s->next_number_index, 1); - if (!table->s->next_number_key_offset) + if (table->s->next_number_keypart == 0) { // Autoincrement at key-start error=index_last(table->record[1]); /* @@ -1979,7 +1976,8 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment, key_copy(key, table->record[0], table->key_info + table->s->next_number_index, table->s->next_number_key_offset); - error= index_read(table->record[1], key, table->s->next_number_key_offset, + error= index_read(table->record[1], key, + make_prev_keypart_map(table->s->next_number_keypart), HA_READ_PREFIX_LAST); /* MySQL needs to call us for next row: assume we are inserting ("a",null) @@ -2311,7 +2309,7 @@ int handler::check_old_types() } -static bool update_frm_version(TABLE *table, bool needs_lock) +static bool update_frm_version(TABLE *table) { char path[FN_REFLEN]; File file; @@ -2323,9 +2321,6 @@ static bool update_frm_version(TABLE *table, bool needs_lock) strxmov(path, table->s->normalized_path.str, reg_ext, NullS); - if (needs_lock) - pthread_mutex_lock(&LOCK_open); - if ((file= my_open(path, O_RDWR|O_BINARY, MYF(MY_WME))) >= 0) { uchar version[4]; @@ -2347,8 +2342,6 @@ static bool update_frm_version(TABLE *table, bool needs_lock) err: if (file >= 0) VOID(my_close(file,MYF(MY_WME))); - if (needs_lock) - pthread_mutex_unlock(&LOCK_open); DBUG_RETURN(result); } @@ -2465,7 +2458,7 @@ int handler::ha_check(THD *thd, HA_CHECK_OPT *check_opt) } if ((error= check(thd, check_opt))) return error; - return update_frm_version(table, 0); + return update_frm_version(table); } @@ -2474,7 +2467,7 @@ int handler::ha_repair(THD* thd, HA_CHECK_OPT* check_opt) int result; if ((result= repair(thd, check_opt))) return result; - return update_frm_version(table, 0); + return update_frm_version(table); } @@ -2855,8 +2848,8 @@ ha_find_files(THD *thd,const char *db,const char *path, { int error= 0; DBUG_ENTER("ha_find_files"); - DBUG_PRINT("enter", ("db: %s, path: %s, wild: %s, dir: %d", - db, path, wild, dir)); + DBUG_PRINT("enter", ("db: '%s' path: '%s' wild: '%s' dir: %d", + db, path, wild ? wild : "NULL", dir)); st_find_files_args args= {db, path, wild, dir, files}; plugin_foreach(thd, find_files_handlerton, @@ -3084,9 +3077,9 @@ int handler::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, multi_range_curr < multi_range_end; multi_range_curr++) { - result= read_range_first(multi_range_curr->start_key.length ? + result= read_range_first(multi_range_curr->start_key.keypart_map ? &multi_range_curr->start_key : 0, - multi_range_curr->end_key.length ? + multi_range_curr->end_key.keypart_map ? &multi_range_curr->end_key : 0, test(multi_range_curr->range_flag & EQ_RANGE), multi_range_sorted); @@ -3151,9 +3144,9 @@ int handler::read_multi_range_next(KEY_MULTI_RANGE **found_range_p) multi_range_curr < multi_range_end; multi_range_curr++) { - result= read_range_first(multi_range_curr->start_key.length ? + result= read_range_first(multi_range_curr->start_key.keypart_map ? &multi_range_curr->start_key : 0, - multi_range_curr->end_key.length ? + multi_range_curr->end_key.keypart_map ? &multi_range_curr->end_key : 0, test(multi_range_curr->range_flag & EQ_RANGE), multi_range_sorted); @@ -3212,7 +3205,7 @@ int handler::read_range_first(const key_range *start_key, else result= index_read(table->record[0], start_key->key, - start_key->length, + start_key->keypart_map, start_key->flag); if (result) DBUG_RETURN((result == HA_ERR_KEY_NOT_FOUND) @@ -3284,15 +3277,19 @@ int handler::compare_key(key_range *range) return cmp; } + int handler::index_read_idx(byte * buf, uint index, const byte * key, - uint key_len, enum ha_rkey_function find_flag) + key_part_map keypart_map, + enum ha_rkey_function find_flag) { - int error= ha_index_init(index, 0); - if (!error) - error= index_read(buf, key, key_len, find_flag); + int error, error1; + error= index_init(index, 0); if (!error) - error= ha_index_end(); - return error; + { + error= index_read(buf, key, keypart_map, find_flag); + error1= index_end(); + } + return error ? error : error1; } @@ -3456,7 +3453,7 @@ namespace { { int const check(table->s->tmp_table == NO_TMP_TABLE && binlog_filter->db_ok(table->s->db.str) && - strcmp("mysql", table->s->db.str) != 0); + !table->no_replicate); table->s->cached_row_logging_check= check; } diff --git a/sql/handler.h b/sql/handler.h index 82970cc1ac6..5c42dc670ee 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -867,6 +867,18 @@ public: {} }; +uint calculate_key_len(TABLE *, uint, const byte *, key_part_map); +/* + bitmap with first N+1 bits set + (keypart_map for a key prefix of [0..N] keyparts) +*/ +#define make_keypart_map(N) (((key_part_map)2 << (N)) - 1) +/* + bitmap with first N bits set + (keypart_map for a key prefix of [0..N-1] keyparts) +*/ +#define make_prev_keypart_map(N) (((key_part_map)1 << (N)) - 1) + /* The handler class is the interface for dynamically loadable storage engines. Do not add ifdefs and take care when adding or @@ -974,7 +986,11 @@ public: check_if_locking_is_allowed() thd Handler of the thread, trying to lock the table table Table handler to check - count Number of locks already granted to the table + count Total number of tables to be locked + current Index of the current table in the list of the tables + to be locked. + system_count Pointer to the counter of system tables seen thus + far. called_by_privileged_thread TRUE if called from a logger THD (general_log_thd or slow_log_thd) or by a privileged thread, which @@ -993,7 +1009,8 @@ public: */ virtual bool check_if_locking_is_allowed(uint sql_command, ulong type, TABLE *table, - uint count, + uint count, uint current, + uint *system_count, bool called_by_privileged_thread) { return TRUE; @@ -1202,11 +1219,32 @@ public: DBUG_ASSERT(FALSE); return HA_ERR_WRONG_COMMAND; } - virtual int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag) + private: + virtual int index_read(byte * buf, const byte * key, uint key_len, + enum ha_rkey_function find_flag) { return HA_ERR_WRONG_COMMAND; } + public: +/** + @brief + Positions an index cursor to the index specified in the handle. Fetches the + row if available. If the key value is null, begin at the first key of the + index. +*/ + virtual int index_read(byte * buf, const byte * key, key_part_map keypart_map, + enum ha_rkey_function find_flag) + { + uint key_len= calculate_key_len(table, active_index, key, keypart_map); + return index_read(buf, key, key_len, find_flag); + } +/** + @brief + Positions an index cursor to the index specified in the handle. Fetches the + row if available. If the key value is null, begin at the first key of the + index. +*/ virtual int index_read_idx(byte * buf, uint index, const byte * key, - uint key_len, enum ha_rkey_function find_flag); + key_part_map keypart_map, + enum ha_rkey_function find_flag); virtual int index_next(byte * buf) { return HA_ERR_WRONG_COMMAND; } virtual int index_prev(byte * buf) @@ -1216,8 +1254,21 @@ public: virtual int index_last(byte * buf) { return HA_ERR_WRONG_COMMAND; } virtual int index_next_same(byte *buf, const byte *key, uint keylen); + private: virtual int index_read_last(byte * buf, const byte * key, uint key_len) { return (my_errno=HA_ERR_WRONG_COMMAND); } + public: +/** + @brief + The following functions works like index_read, but it find the last + row with the current key value or prefix. +*/ + virtual int index_read_last(byte * buf, const byte * key, + key_part_map keypart_map) + { + uint key_len= calculate_key_len(table, active_index, key, keypart_map); + return index_read_last(buf, key, key_len); + } virtual int read_multi_range_first(KEY_MULTI_RANGE **found_range_p, KEY_MULTI_RANGE *ranges, uint range_count, bool sorted, HANDLER_BUFFER *buffer); @@ -1243,8 +1294,7 @@ public: { return HA_ERR_WRONG_COMMAND; } virtual int rnd_same(byte *buf, uint inx) { return HA_ERR_WRONG_COMMAND; } - virtual ha_rows records_in_range(uint inx, key_range *min_key, - key_range *max_key) + virtual ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key) { return (ha_rows) 10; } virtual void position(const byte *record)=0; virtual int info(uint)=0; // see my_base.h for full description diff --git a/sql/item.cc b/sql/item.cc index b087ddb3abb..613b72ad05e 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -148,7 +148,7 @@ void Hybrid_type_traits_integer::fix_length_and_dec(Item *item, Item *arg) const { item->decimals= 0; - item->max_length= 21; + item->max_length= MY_INT64_NUM_DECIMAL_DIGITS; item->unsigned_flag= 0; } @@ -1088,7 +1088,7 @@ bool Item_splocal::set_value(THD *thd, sp_rcontext *ctx, Item **it) Item_case_expr methods *****************************************************************************/ -Item_case_expr::Item_case_expr(int case_expr_id) +Item_case_expr::Item_case_expr(uint case_expr_id) :Item_sp_variable( C_STRING_WITH_LEN("case_expr")), m_case_expr_id(case_expr_id) { @@ -1125,6 +1125,8 @@ Item_case_expr::this_item_addr(THD *thd, Item **) void Item_case_expr::print(String *str) { + if (str->reserve(MAX_INT_WIDTH + sizeof("case_expr@"))) + return; /* purecov: inspected */ VOID(str->append(STRING_WITH_LEN("case_expr@"))); str->qs_append(m_case_expr_id); } @@ -1289,15 +1291,18 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array, Exception is Item_direct_view_ref which we need to convert to Item_ref to allow fields from view being stored in tmp table. */ + Item_aggregate_ref *item_ref; uint el= fields.elements; - Item *new_item, *real_itm= real_item(); + Item *real_itm= real_item(); ref_pointer_array[el]= real_itm; - if (!(new_item= new Item_aggregate_ref(&thd->lex->current_select->context, + if (!(item_ref= new Item_aggregate_ref(&thd->lex->current_select->context, ref_pointer_array + el, 0, name))) return; // fatal_error is set + if (type() == SUM_FUNC_ITEM) + item_ref->depended_from= ((Item_sum *) this)->depended_from(); fields.push_front(real_itm); - thd->change_item_tree(ref, new_item); + thd->change_item_tree(ref, item_ref); } } @@ -1553,6 +1558,8 @@ bool agg_item_charsets(DTCollation &coll, const char *fname, doesn't display each argument's characteristics. - if nargs is 1, then this error cannot happen. */ + LINT_INIT(safe_args[0]); + LINT_INIT(safe_args[1]); if (nargs >=2 && nargs <= 3) { safe_args[0]= args[0]; @@ -1637,7 +1644,7 @@ void Item_ident_for_show::make_field(Send_field *tmp_field) Item_field::Item_field(Field *f) :Item_ident(0, NullS, *f->table_name, f->field_name), item_equal(0), no_const_subst(0), - have_privileges(0), any_privileges(0) + have_privileges(0), any_privileges(0), fixed_as_field(0) { set_field(f); /* @@ -1652,7 +1659,7 @@ Item_field::Item_field(THD *thd, Name_resolution_context *context_arg, Field *f) :Item_ident(context_arg, f->table->s->db.str, *f->table_name, f->field_name), item_equal(0), no_const_subst(0), - have_privileges(0), any_privileges(0) + have_privileges(0), any_privileges(0), fixed_as_field(0) { /* We always need to provide Item_field with a fully qualified field @@ -1691,9 +1698,12 @@ Item_field::Item_field(Name_resolution_context *context_arg, const char *field_name_arg) :Item_ident(context_arg, db_arg,table_name_arg,field_name_arg), field(0), result_field(0), item_equal(0), no_const_subst(0), - have_privileges(0), any_privileges(0) + have_privileges(0), any_privileges(0), fixed_as_field(0) { + SELECT_LEX *select= current_thd->lex->current_select; collation.set(DERIVATION_IMPLICIT); + if (select && select->parsing_place != IN_HAVING) + select->select_n_where_fields++; } // Constructor need to process subselect with temporary tables (see Item) @@ -1704,7 +1714,8 @@ Item_field::Item_field(THD *thd, Item_field *item) item_equal(item->item_equal), no_const_subst(item->no_const_subst), have_privileges(item->have_privileges), - any_privileges(item->any_privileges) + any_privileges(item->any_privileges), + fixed_as_field(item->fixed_as_field) { collation.set(DERIVATION_IMPLICIT); } @@ -1785,9 +1796,10 @@ void Item_ident::print(String *str) } } - if (!table_name || !field_name) + if (!table_name || !field_name || !field_name[0]) { - const char *nm= field_name ? field_name : name ? name : "tmp_field"; + const char *nm= (field_name && field_name[0]) ? + field_name : name ? name : "tmp_field"; append_identifier(thd, str, nm, (uint) strlen(nm)); return; } @@ -2518,7 +2530,7 @@ bool Item_param::set_from_user_var(THD *thd, const user_var_entry *entry) item_result_type= REAL_RESULT; break; case INT_RESULT: - set_int(*(longlong*)entry->value, 21); + set_int(*(longlong*)entry->value, MY_INT64_NUM_DECIMAL_DIGITS); item_type= Item::INT_ITEM; item_result_type= INT_RESULT; break; @@ -3348,7 +3360,7 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select) ORDER *group_list= (ORDER*) select->group_list.first; bool ambiguous_fields= FALSE; uint counter; - bool not_used; + enum_resolution_type resolution; /* Search for a column or derived column named as 'ref' in the SELECT @@ -3356,8 +3368,10 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select) */ if (!(select_ref= find_item_in_list(ref, *(select->get_item_list()), &counter, REPORT_EXCEPT_NOT_FOUND, - ¬_used))) + &resolution))) return NULL; /* Some error occurred. */ + if (resolution == RESOLVED_AGAINST_ALIAS) + ref->alias_name_used= TRUE; /* If this is a non-aggregated field inside HAVING, search in GROUP BY. */ if (select->having_fix_field && !ref->with_sum_func && group_list) @@ -3467,7 +3481,12 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) */ Name_resolution_context *last_checked_context= context; Item **ref= (Item **) not_found_item; - Name_resolution_context *outer_context= context->outer_context; + SELECT_LEX *current_sel= (SELECT_LEX *) thd->lex->current_select; + Name_resolution_context *outer_context= 0; + /* Currently derived tables cannot be correlated */ + if (current_sel->master_unit()->first_select()->linkage != + DERIVED_TABLE_TYPE) + outer_context= context->outer_context; for (; outer_context; outer_context= outer_context->outer_context) @@ -3515,8 +3534,46 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) } if (*from_field != view_ref_found) { + prev_subselect_item->used_tables_cache|= (*from_field)->table->map; prev_subselect_item->const_item_cache= 0; + if (!last_checked_context->select_lex->having_fix_field && + !fixed_as_field) + { + Item_outer_ref *rf; + Query_arena *arena= 0, backup; + /* + Each outer field is replaced for an Item_outer_ref object. + This is done in order to get correct results when the outer + select employs a temporary table. + The original fields are saved in the inner_fields_list of the + outer select. This list is created by the following reasons: + 1. We can't add field items to the outer select list directly + because the outer select hasn't been fully fixed yet. + 2. We need a location to refer to in the Item_ref object + so the inner_fields_list is used as such temporary + reference storage. + The new Item_outer_ref object replaces the original field and is + also saved in the inner_refs_list of the outer select. Here + it is only created. It can be fixed only after the original + field has been fixed and this is done in the fix_inner_refs() + function. + */ + set_field(*from_field); + arena= thd->activate_stmt_arena_if_needed(&backup); + rf= new Item_outer_ref(context, this); + if (!rf) + { + if (arena) + thd->restore_active_arena(arena, &backup); + return -1; + } + *reference= rf; + select->inner_refs_list.push_back(rf); + if (arena) + thd->restore_active_arena(arena, &backup); + fixed_as_field= 1; + } if (thd->lex->in_sum_func && thd->lex->in_sum_func->nest_level == thd->lex->current_select->nest_level) @@ -3620,9 +3677,9 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) *ref= NULL; // Don't call set_properties() rf= (place == IN_HAVING ? new Item_ref(context, ref, (char*) table_name, - (char*) field_name) : + (char*) field_name, alias_name_used) : new Item_direct_ref(context, ref, (char*) table_name, - (char*) field_name)); + (char*) field_name, alias_name_used)); *ref= save; if (!rf) return -1; @@ -3644,7 +3701,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) { mark_as_dependent(thd, last_checked_context->select_lex, context->select_lex, - this, this); + this, (Item_ident*)*reference); if (last_checked_context->select_lex->having_fix_field) { Item_ref *rf; @@ -3740,12 +3797,14 @@ bool Item_field::fix_fields(THD *thd, Item **reference) if (thd->lex->current_select->is_item_list_lookup) { uint counter; - bool not_used; + enum_resolution_type resolution; Item** res= find_item_in_list(this, thd->lex->current_select->item_list, &counter, REPORT_EXCEPT_NOT_FOUND, - ¬_used); + &resolution); if (!res) return 1; + if (resolution == RESOLVED_AGAINST_ALIAS) + alias_name_used= TRUE; if (res != (Item **)not_found_item) { if ((*res)->type() == Item::FIELD_ITEM) @@ -3756,7 +3815,18 @@ bool Item_field::fix_fields(THD *thd, Item **reference) use the field from the Item_field in the select list and leave the Item_field instance in place. */ - set_field((*((Item_field**)res))->field); + + Field *field= (*((Item_field**)res))->field; + + if (field == NULL) + { + /* The column to which we link isn't valid. */ + my_error(ER_BAD_FIELD_ERROR, MYF(0), (*res)->name, + current_thd->where); + return(1); + } + + set_field(field); return 0; } else @@ -3842,7 +3912,9 @@ bool Item_field::fix_fields(THD *thd, Item **reference) { /* First usage of column */ table->used_fields++; // Used to optimize loops - table->used_keys.intersect(field->part_of_key); + /* purecov: begin inspected */ + table->covering_keys.intersect(field->part_of_key); + /* purecov: end */ } } } @@ -4060,7 +4132,9 @@ bool Item_field::set_no_const_sub(byte *arg) DESCRIPTION The function returns a pointer to an item that is taken from the very beginning of the item_equal list which the Item_field - object refers to (belongs to). + object refers to (belongs to) unless item_equal contains a constant + item. In this case the function returns this constant item, + (if the substitution does not require conversion). If the Item_field object does not refer any Item_equal object 'this' is returned @@ -4069,7 +4143,8 @@ bool Item_field::set_no_const_sub(byte *arg) of the thransformer method. RETURN VALUES - pointer to a replacement Item_field if there is a better equal item; + pointer to a replacement Item_field if there is a better equal item or + a pointer to a constant equal item; this - otherwise. */ @@ -4077,6 +4152,14 @@ Item *Item_field::replace_equal_field(byte *arg) { if (item_equal) { + Item *const_item= item_equal->get_const(); + if (const_item) + { + if (cmp_context != (Item_result)-1 && + const_item->cmp_context != cmp_context) + return this; + return const_item; + } Item_field *subst= item_equal->get_first(); if (subst && !field->eq(subst->field)) return subst; @@ -4280,13 +4363,16 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table, bool fixed_length) case MYSQL_TYPE_MEDIUM_BLOB: case MYSQL_TYPE_LONG_BLOB: case MYSQL_TYPE_BLOB: - case MYSQL_TYPE_GEOMETRY: if (this->type() == Item::TYPE_HOLDER) field= new Field_blob(max_length, maybe_null, name, collation.collation, 1); else field= new Field_blob(max_length, maybe_null, name, collation.collation); break; // Blob handled outside of case + case MYSQL_TYPE_GEOMETRY: + return new Field_geom(max_length, maybe_null, name, table->s, + (Field::geometry_type) + ((Item_geometry_func *)this)->get_geometry_type()); } if (field) field->init(table); @@ -4329,18 +4415,19 @@ void Item_field::save_org_in_field(Field *to) int Item_field::save_in_field(Field *to, bool no_conversions) { + int res; if (result_field->is_null()) { null_value=1; - return set_field_to_null_with_conversions(to, no_conversions); + res= set_field_to_null_with_conversions(to, no_conversions); } else { to->set_notnull(); - field_conv(to,result_field); + res= field_conv(to,result_field); null_value=0; } - return 0; + return res; } @@ -4892,17 +4979,79 @@ void Item_field::update_null_value() } +/* + Add the field to the select list and substitute it for the reference to + the field. + + SYNOPSIS + Item_field::update_value_transformer() + select_arg current select + + DESCRIPTION + If the field doesn't belong to the table being inserted into then it is + added to the select list, pointer to it is stored in the ref_pointer_array + of the select and the field itself is substituted for the Item_ref object. + This is done in order to get correct values from update fields that + belongs to the SELECT part in the INSERT .. SELECT .. ON DUPLICATE KEY + UPDATE statement. + + RETURN + 0 if error occured + ref if all conditions are met + this field otherwise +*/ + +Item *Item_field::update_value_transformer(byte *select_arg) +{ + SELECT_LEX *select= (SELECT_LEX*)select_arg; + DBUG_ASSERT(fixed); + + if (field->table != select->context.table_list->table && + type() != Item::TRIGGER_FIELD_ITEM) + { + List<Item> *all_fields= &select->join->all_fields; + Item **ref_pointer_array= select->ref_pointer_array; + int el= all_fields->elements; + Item_ref *ref; + + ref_pointer_array[el]= (Item*)this; + all_fields->push_front((Item*)this); + ref= new Item_ref(&select->context, ref_pointer_array + el, + table_name, field_name); + return ref; + } + return this; +} + + +void Item_field::print(String *str) +{ + if (field && field->table->const_table) + { + char buff[MAX_FIELD_WIDTH]; + String tmp(buff,sizeof(buff),str->charset()); + field->val_str(&tmp); + str->append('\''); + str->append(tmp); + str->append('\''); + return; + } + Item_ident::print(str); +} + + Item_ref::Item_ref(Name_resolution_context *context_arg, Item **item, const char *table_name_arg, - const char *field_name_arg) + const char *field_name_arg, + bool alias_name_used_arg) :Item_ident(context_arg, NullS, table_name_arg, field_name_arg), result_field(0), ref(item) { + alias_name_used= alias_name_used_arg; /* This constructor used to create some internals references over fixed items */ - DBUG_ASSERT(ref != 0); - if (*ref && (*ref)->fixed) + if (ref && *ref && (*ref)->fixed) set_properties(); } @@ -5181,11 +5330,13 @@ void Item_ref::set_properties() */ with_sum_func= (*ref)->with_sum_func; unsigned_flag= (*ref)->unsigned_flag; + fixed= 1; + if (alias_name_used) + return; if ((*ref)->type() == FIELD_ITEM) alias_name_used= ((Item_ident *) (*ref))->alias_name_used; else alias_name_used= TRUE; // it is not field, so it is was resolved by alias - fixed= 1; } @@ -5203,7 +5354,7 @@ void Item_ref::print(String *str) if (ref) { if ((*ref)->type() != Item::CACHE_ITEM && ref_type() != VIEW_REF && - name && alias_name_used) + !table_name && name && alias_name_used) { THD *thd= current_thd; append_identifier(thd, str, name, (uint) strlen(name)); @@ -5361,18 +5512,7 @@ my_decimal *Item_ref::val_decimal(my_decimal *decimal_value) int Item_ref::save_in_field(Field *to, bool no_conversions) { int res; - if (result_field) - { - if (result_field->is_null()) - { - null_value= 1; - return set_field_to_null_with_conversions(to, no_conversions); - } - to->set_notnull(); - field_conv(to, result_field); - null_value= 0; - return 0; - } + DBUG_ASSERT(!result_field); res= (*ref)->save_in_field(to, no_conversions); null_value= (*ref)->null_value; return res; @@ -5462,7 +5602,7 @@ bool Item_direct_ref::get_date(TIME *ltime,uint fuzzydate) /* - Prepare referenced view viewld then call usual Item_direct_ref::fix_fields + Prepare referenced field then call usual Item_direct_ref::fix_fields SYNOPSIS Item_direct_view_ref::fix_fields() @@ -5486,6 +5626,31 @@ bool Item_direct_view_ref::fix_fields(THD *thd, Item **reference) } /* + Prepare referenced outer field then call usual Item_direct_ref::fix_fields + + SYNOPSIS + Item_outer_ref::fix_fields() + thd thread handler + reference reference on reference where this item stored + + RETURN + FALSE OK + TRUE Error +*/ + +bool Item_outer_ref::fix_fields(THD *thd, Item **reference) +{ + DBUG_ASSERT(*ref); + /* outer_field->check_cols() will be made in Item_direct_ref::fix_fields */ + outer_field->fixed_as_field= 1; + if (!outer_field->fixed && + (outer_field->fix_fields(thd, reference))) + return TRUE; + table_name= outer_field->table_name; + return Item_direct_ref::fix_fields(thd, reference); +} + +/* Compare two view column references for equality. SYNOPSIS @@ -5591,6 +5756,13 @@ int Item_default_value::save_in_field(Field *field_arg, bool no_conversions) { if (field_arg->flags & NO_DEFAULT_VALUE_FLAG) { + if (field_arg->reset()) + { + my_message(ER_CANT_CREATE_GEOMETRY_OBJECT, + ER(ER_CANT_CREATE_GEOMETRY_OBJECT), MYF(0)); + return -1; + } + if (context->error_processor == &view_error_processor) { TABLE_LIST *view= cached_table->top_table(); @@ -5609,7 +5781,6 @@ int Item_default_value::save_in_field(Field *field_arg, bool no_conversions) ER(ER_NO_DEFAULT_FOR_FIELD), field_arg->field_name); } - field_arg->set_default(); return 1; } field_arg->set_default(); @@ -6274,8 +6445,6 @@ Item_type_holder::Item_type_holder(THD *thd, Item *item) :Item(thd, item), enum_set_typelib(0), fld_type(get_real_type(item)) { DBUG_ASSERT(item->fixed); - - max_length= display_length(item); maybe_null= item->maybe_null; collation.set(item->collation); get_full_info(item); @@ -6447,11 +6616,17 @@ bool Item_type_holder::join_types(THD *thd, Item *item) { int delta1= max_length_orig - decimals_orig; int delta2= item->max_length - item->decimals; - if (fld_type == MYSQL_TYPE_DECIMAL) - max_length= max(delta1, delta2) + decimals; - else - max_length= min(max(delta1, delta2) + decimals, - (fld_type == MYSQL_TYPE_FLOAT) ? FLT_DIG+6 : DBL_DIG+7); + max_length= max(delta1, delta2) + decimals; + if (fld_type == MYSQL_TYPE_FLOAT && max_length > FLT_DIG + 2) + { + max_length= FLT_DIG + 6; + decimals= NOT_FIXED_DEC; + } + if (fld_type == MYSQL_TYPE_DOUBLE && max_length > DBL_DIG + 2) + { + max_length= DBL_DIG + 7; + decimals= NOT_FIXED_DEC; + } } else max_length= (fld_type == MYSQL_TYPE_FLOAT) ? FLT_DIG+6 : DBL_DIG+7; @@ -6513,7 +6688,7 @@ uint32 Item_type_holder::display_length(Item *item) case MYSQL_TYPE_SHORT: return 6; case MYSQL_TYPE_LONG: - return 11; + return MY_INT32_NUM_DECIMAL_DIGITS; case MYSQL_TYPE_FLOAT: return 25; case MYSQL_TYPE_DOUBLE: diff --git a/sql/item.h b/sql/item.h index 80aa72e746b..d03f3eededb 100644 --- a/sql/item.h +++ b/sql/item.h @@ -325,10 +325,10 @@ private: TABLE_LIST *save_first_name_resolution_table; TABLE_LIST *save_next_name_resolution_table; bool save_resolve_in_select_list; + TABLE_LIST *save_next_local; public: Name_resolution_context_state() {} /* Remove gcc warning */ - TABLE_LIST *save_next_local; public: /* Save the state of a name resolution context. */ @@ -350,6 +350,11 @@ public: context->first_name_resolution_table= save_first_name_resolution_table; context->resolve_in_select_list= save_resolve_in_select_list; } + + TABLE_LIST *get_first_name_resolution_table() + { + return save_first_name_resolution_table; + } }; @@ -911,6 +916,7 @@ public: virtual Item_field *filed_for_view_update() { return 0; } virtual Item *neg_transformer(THD *thd) { return NULL; } + virtual Item *update_value_transformer(byte *select_arg) { return this; } virtual Item *safe_charset_converter(CHARSET_INFO *tocs); void delete_self() { @@ -1109,7 +1115,7 @@ inline Item_result Item_splocal::result_type() const class Item_case_expr :public Item_sp_variable { public: - Item_case_expr(int case_expr_id); + Item_case_expr(uint case_expr_id); public: Item *this_item(); @@ -1128,7 +1134,7 @@ public: void print(String *str); private: - int m_case_expr_id; + uint m_case_expr_id; }; /***************************************************************************** @@ -1310,7 +1316,7 @@ public: uint have_privileges; /* field need any privileges (for VIEW creation) */ bool any_privileges; - + bool fixed_as_field; Item_field(Name_resolution_context *context_arg, const char *db_arg,const char *table_name_arg, const char *field_name_arg); @@ -1390,6 +1396,8 @@ public: Item_field *filed_for_view_update() { return this; } Item *safe_charset_converter(CHARSET_INFO *tocs); int fix_outer_field(THD *thd, Field **field, Item **reference); + virtual Item *update_value_transformer(byte *select_arg); + void print(String *str); friend class Item_default_value; friend class Item_insert_value; friend class st_select_lex_unit; @@ -1583,11 +1591,14 @@ class Item_int :public Item_num { public: longlong value; - Item_int(int32 i,uint length=11) :value((longlong) i) + Item_int(int32 i,uint length= MY_INT32_NUM_DECIMAL_DIGITS) + :value((longlong) i) { max_length=length; fixed= 1; } - Item_int(longlong i,uint length=21) :value(i) + Item_int(longlong i,uint length= MY_INT64_NUM_DECIMAL_DIGITS) + :value(i) { max_length=length; fixed= 1; } - Item_int(ulonglong i, uint length= 21) :value((longlong)i) + Item_int(ulonglong i, uint length= MY_INT64_NUM_DECIMAL_DIGITS) + :value((longlong)i) { max_length=length; fixed= 1; unsigned_flag= 1; } Item_int(const char *str_arg,longlong i,uint length) :value(i) { max_length=length; name=(char*) str_arg; fixed= 1; } @@ -1804,7 +1815,11 @@ public: str_value.length(), collation.collation); } Item *safe_charset_converter(CHARSET_INFO *tocs); - inline void append(char *str, uint length) { str_value.append(str, length); } + inline void append(char *str, uint length) + { + str_value.append(str, length); + max_length= str_value.numchars() * collation.collation->mbmaxlen; + } void print(String *str); // to prevent drop fixed flag (no need parent cleanup call) void cleanup() {} @@ -1868,7 +1883,10 @@ public: Item_hex_string(const char *str,uint str_length); enum Type type() const { return VARBIN_ITEM; } double val_real() - { DBUG_ASSERT(fixed == 1); return (double) Item_hex_string::val_int(); } + { + DBUG_ASSERT(fixed == 1); + return (double) (ulonglong) Item_hex_string::val_int(); + } longlong val_int(); bool basic_const_item() const { return 1; } String *val_str(String*) { DBUG_ASSERT(fixed == 1); return &str_value; } @@ -1920,7 +1938,7 @@ class Item_ref :public Item_ident protected: void set_properties(); public: - enum Ref_Type { REF, DIRECT_REF, VIEW_REF }; + enum Ref_Type { REF, DIRECT_REF, VIEW_REF, OUTER_REF }; Field *result_field; /* Save result here */ Item **ref; Item_ref(Name_resolution_context *context_arg, @@ -1943,7 +1961,8 @@ public: with Bar, and if we have a more broader set of problems like this. */ Item_ref(Name_resolution_context *context_arg, Item **item, - const char *table_name_arg, const char *field_name_arg); + const char *table_name_arg, const char *field_name_arg, + bool alias_name_used_arg= FALSE); /* Constructor need to process subselect with temporary tables (see Item) */ Item_ref(THD *thd, Item_ref *item) @@ -1981,9 +2000,14 @@ public: (*ref)->get_tmp_table_item(thd)); } table_map used_tables() const - { + { return depended_from ? OUTER_REF_TABLE_BIT : (*ref)->used_tables(); } + void update_used_tables() + { + if (!depended_from) + (*ref)->update_used_tables(); + } table_map not_null_tables() const { return (*ref)->not_null_tables(); } void set_result_field(Field *field) { result_field= field; } bool is_result_field() { return 1; } @@ -2018,8 +2042,11 @@ class Item_direct_ref :public Item_ref public: Item_direct_ref(Name_resolution_context *context_arg, Item **item, const char *table_name_arg, - const char *field_name_arg) - :Item_ref(context_arg, item, table_name_arg, field_name_arg) {} + const char *field_name_arg, + bool alias_name_used_arg= FALSE) + :Item_ref(context_arg, item, table_name_arg, + field_name_arg, alias_name_used_arg) + {} /* Constructor need to process subselect with temporary tables (see Item) */ Item_direct_ref(THD *thd, Item_direct_ref *item) : Item_ref(thd, item) {} @@ -2054,6 +2081,40 @@ public: }; +class Item_outer_ref :public Item_direct_ref +{ +public: + Item_field *outer_field; + Item_outer_ref(Name_resolution_context *context_arg, + Item_field *outer_field_arg) + :Item_direct_ref(context_arg, 0, outer_field_arg->table_name, + outer_field_arg->field_name), + outer_field(outer_field_arg) + { + ref= (Item**)&outer_field; + set_properties(); + fixed= 0; + } + void cleanup() + { + ref= (Item**)&outer_field; + fixed= 0; + Item_direct_ref::cleanup(); + outer_field->cleanup(); + } + void save_in_result_field(bool no_conversions) + { + outer_field->save_org_in_field(result_field); + } + bool fix_fields(THD *, Item **); + table_map used_tables() const + { + return (*ref)->const_item() ? 0 : OUTER_REF_TABLE_BIT; + } + virtual Ref_Type ref_type() { return OUTER_REF; } +}; + + class Item_in_subselect; diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 89f57846f9b..ffa6b4caf2a 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -840,6 +840,59 @@ int Arg_comparator::compare_e_row() } +void Item_func_truth::fix_length_and_dec() +{ + maybe_null= 0; + null_value= 0; + decimals= 0; + max_length= 1; +} + + +void Item_func_truth::print(String *str) +{ + str->append('('); + args[0]->print(str); + str->append(STRING_WITH_LEN(" is ")); + if (! affirmative) + str->append(STRING_WITH_LEN("not ")); + if (value) + str->append(STRING_WITH_LEN("true")); + else + str->append(STRING_WITH_LEN("false")); + str->append(')'); +} + + +bool Item_func_truth::val_bool() +{ + bool val= args[0]->val_bool(); + if (args[0]->null_value) + { + /* + NULL val IS {TRUE, FALSE} --> FALSE + NULL val IS NOT {TRUE, FALSE} --> TRUE + */ + return (! affirmative); + } + + if (affirmative) + { + /* {TRUE, FALSE} val IS {TRUE, FALSE} value */ + return (val == value); + } + + /* {TRUE, FALSE} val IS NOT {TRUE, FALSE} value */ + return (val != value); +} + + +longlong Item_func_truth::val_int() +{ + return (val_bool() ? 1 : 0); +} + + bool Item_in_optimizer::fix_left(THD *thd, Item **ref) { if (!args[0]->fixed && args[0]->fix_fields(thd, args) || @@ -903,6 +956,7 @@ bool Item_in_optimizer::fix_fields(THD *thd, Item **ref) longlong Item_in_optimizer::val_int() { + bool tmp; DBUG_ASSERT(fixed == 1); cache->store(args[0]); @@ -966,7 +1020,7 @@ longlong Item_in_optimizer::val_int() } return 0; } - bool tmp= args[1]->val_bool_result(); + tmp= args[1]->val_bool_result(); null_value= args[1]->null_value; return tmp; } @@ -1291,11 +1345,11 @@ void Item_func_between::fix_length_and_dec() They are compared as integers, so for const item this time-consuming conversion can be done only once, not for every single comparison */ - if (args[0]->type() == FIELD_ITEM && + if (args[0]->real_item()->type() == FIELD_ITEM && thd->lex->sql_command != SQLCOM_CREATE_VIEW && thd->lex->sql_command != SQLCOM_SHOW_CREATE) { - Field *field=((Item_field*) args[0])->field; + Field *field=((Item_field*) (args[0]->real_item()))->field; if (field->can_be_compared_as_longlong()) { /* @@ -1578,6 +1632,7 @@ Item_func_if::fix_length_and_dec() { maybe_null=args[1]->maybe_null || args[2]->maybe_null; decimals= max(args[1]->decimals, args[2]->decimals); + unsigned_flag=args[1]->unsigned_flag && args[2]->unsigned_flag; enum Item_result arg1_type=args[1]->result_type(); enum Item_result arg2_type=args[2]->result_type(); @@ -1607,12 +1662,20 @@ Item_func_if::fix_length_and_dec() collation.set(&my_charset_bin); // Number } } - max_length= - (cached_result_type == DECIMAL_RESULT || cached_result_type == INT_RESULT) ? - (max(args[1]->max_length - args[1]->decimals, - args[2]->max_length - args[2]->decimals) + decimals + - (unsigned_flag ? 0 : 1) ) : - max(args[1]->max_length, args[2]->max_length); + + if ((cached_result_type == DECIMAL_RESULT ) + || (cached_result_type == INT_RESULT)) + { + int len1= args[1]->max_length - args[1]->decimals + - (args[1]->unsigned_flag ? 0 : 1); + + int len2= args[2]->max_length - args[2]->decimals + - (args[2]->unsigned_flag ? 0 : 1); + + max_length=max(len1, len2) + decimals + (unsigned_flag ? 0 : 1); + } + else + max_length= max(args[1]->max_length, args[2]->max_length); } @@ -2127,9 +2190,100 @@ void Item_func_coalesce::fix_length_and_dec() Classes and function for the IN operator ****************************************************************************/ -static int cmp_longlong(void *cmp_arg, longlong *a,longlong *b) +/* + Determine which of the signed longlong arguments is bigger + + SYNOPSIS + cmp_longs() + a_val left argument + b_val right argument + + DESCRIPTION + This function will compare two signed longlong arguments + and will return -1, 0, or 1 if left argument is smaller than, + equal to or greater than the right argument. + + RETURN VALUE + -1 left argument is smaller than the right argument. + 0 left argument is equal to the right argument. + 1 left argument is greater than the right argument. +*/ +static inline int cmp_longs (longlong a_val, longlong b_val) +{ + return a_val < b_val ? -1 : a_val == b_val ? 0 : 1; +} + + +/* + Determine which of the unsigned longlong arguments is bigger + + SYNOPSIS + cmp_ulongs() + a_val left argument + b_val right argument + + DESCRIPTION + This function will compare two unsigned longlong arguments + and will return -1, 0, or 1 if left argument is smaller than, + equal to or greater than the right argument. + + RETURN VALUE + -1 left argument is smaller than the right argument. + 0 left argument is equal to the right argument. + 1 left argument is greater than the right argument. +*/ +static inline int cmp_ulongs (ulonglong a_val, ulonglong b_val) +{ + return a_val < b_val ? -1 : a_val == b_val ? 0 : 1; +} + + +/* + Compare two integers in IN value list format (packed_longlong) + + SYNOPSIS + cmp_longlong() + cmp_arg an argument passed to the calling function (qsort2) + a left argument + b right argument + + DESCRIPTION + This function will compare two integer arguments in the IN value list + format and will return -1, 0, or 1 if left argument is smaller than, + equal to or greater than the right argument. + It's used in sorting the IN values list and finding an element in it. + Depending on the signedness of the arguments cmp_longlong() will + compare them as either signed (using cmp_longs()) or unsigned (using + cmp_ulongs()). + + RETURN VALUE + -1 left argument is smaller than the right argument. + 0 left argument is equal to the right argument. + 1 left argument is greater than the right argument. +*/ +int cmp_longlong(void *cmp_arg, + in_longlong::packed_longlong *a, + in_longlong::packed_longlong *b) { - return *a < *b ? -1 : *a == *b ? 0 : 1; + if (a->unsigned_flag != b->unsigned_flag) + { + /* + One of the args is unsigned and is too big to fit into the + positive signed range. Report no match. + */ + if (a->unsigned_flag && ((ulonglong) a->val) > (ulonglong) LONGLONG_MAX || + b->unsigned_flag && ((ulonglong) b->val) > (ulonglong) LONGLONG_MAX) + return a->unsigned_flag ? 1 : -1; + /* + Although the signedness differs both args can fit into the signed + positive range. Make them signed and compare as usual. + */ + return cmp_longs (a->val, b->val); + } + if (a->unsigned_flag) + return cmp_ulongs ((ulonglong) a->val, (ulonglong) b->val); + else + return cmp_longs (a->val, b->val); } static int cmp_double(void *cmp_arg, double *a,double *b) @@ -2254,19 +2408,23 @@ void in_row::set(uint pos, Item *item) } in_longlong::in_longlong(uint elements) - :in_vector(elements,sizeof(longlong),(qsort2_cmp) cmp_longlong, 0) + :in_vector(elements,sizeof(packed_longlong),(qsort2_cmp) cmp_longlong, 0) {} void in_longlong::set(uint pos,Item *item) { - ((longlong*) base)[pos]=item->val_int(); + struct packed_longlong *buff= &((packed_longlong*) base)[pos]; + + buff->val= item->val_int(); + buff->unsigned_flag= item->unsigned_flag; } byte *in_longlong::get_value(Item *item) { - tmp= item->val_int(); + tmp.val= item->val_int(); if (item->null_value) return 0; + tmp.unsigned_flag= item->unsigned_flag; return (byte*) &tmp; } @@ -2300,7 +2458,8 @@ void in_decimal::set(uint pos, Item *item) dec->len= DECIMAL_BUFF_LENGTH; dec->fix_buffer_pointer(); my_decimal *res= item->val_decimal(dec); - if (res != dec) + /* if item->val_decimal() is evaluated to NULL then res == 0 */ + if (!item->null_value && res != dec) my_decimal2decimal(res, dec); } @@ -2567,6 +2726,7 @@ void Item_func_in::fix_length_and_dec() THD *thd= current_thd; uint found_types= 0; uint type_cnt= 0, i; + Item_result cmp_type= STRING_RESULT; left_result_type= args[0]->result_type(); found_types= collect_cmp_types(args, arg_count); @@ -2581,25 +2741,53 @@ void Item_func_in::fix_length_and_dec() for (i= 0; i <= (uint)DECIMAL_RESULT; i++) { if (found_types & 1 << i) + { (type_cnt)++; + cmp_type= (Item_result) i; + } + } + + if (type_cnt == 1) + { + if (cmp_type == STRING_RESULT && + agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1)) + return; + arg_types_compatible= TRUE; } + /* Row item with NULLs inside can return NULL or FALSE => they can't be processed as static */ if (type_cnt == 1 && const_itm && !nulls_in_row()) { - uint tmp_type; - Item_result cmp_type; - /* Only one cmp type was found. Extract it here */ - for (tmp_type= 0; found_types - 1; found_types>>= 1) - tmp_type++; - cmp_type= (Item_result)tmp_type; - + /* + IN must compare INT/DATE/DATETIME/TIMESTAMP columns and constants + as int values (the same way as equality does). + So we must check here if the column on the left and all the constant + values on the right can be compared as integers and adjust the + comparison type accordingly. + */ + if (args[0]->real_item()->type() == FIELD_ITEM && + thd->lex->sql_command != SQLCOM_CREATE_VIEW && + thd->lex->sql_command != SQLCOM_SHOW_CREATE && + cmp_type != INT_RESULT) + { + Field *field= ((Item_field*) (args[0]->real_item()))->field; + if (field->can_be_compared_as_longlong()) + { + bool all_converted= TRUE; + for (arg=args+1, arg_end=args+arg_count; arg != arg_end ; arg++) + { + if (!convert_constant_item (thd, field, &arg[0])) + all_converted= FALSE; + } + if (all_converted) + cmp_type= INT_RESULT; + } + } switch (cmp_type) { case STRING_RESULT: - if (agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1)) - return; array=new in_string(arg_count - 1,(qsort2_cmp) srtcmp_in, cmp_collation.collation); break; @@ -4184,11 +4372,9 @@ longlong Item_equal::val_int() void Item_equal::fix_length_and_dec() { - Item *item= const_item ? const_item : get_first(); + Item *item= get_first(); eval_item= cmp_item::get_comparator(item->result_type(), item->collation.collation); - if (item->result_type() == STRING_RESULT) - eval_item->cmp_charset= cmp_collation.collation; } bool Item_equal::walk(Item_processor processor, bool walk_subquery, byte *arg) diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index c5c67371a8a..21cbc10b42a 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -101,6 +101,92 @@ public: uint decimal_precision() const { return 1; } }; + +/** + Abstract Item class, to represent <code>X IS [NOT] (TRUE | FALSE)</code> + boolean predicates. +*/ + +class Item_func_truth : public Item_bool_func +{ +public: + virtual bool val_bool(); + virtual longlong val_int(); + virtual void fix_length_and_dec(); + virtual void print(String *str); + +protected: + Item_func_truth(Item *a, bool a_value, bool a_affirmative) + : Item_bool_func(a), value(a_value), affirmative(a_affirmative) + {} + + ~Item_func_truth() + {} +private: + /** + True for <code>X IS [NOT] TRUE</code>, + false for <code>X IS [NOT] FALSE</code> predicates. + */ + const bool value; + /** + True for <code>X IS Y</code>, false for <code>X IS NOT Y</code> predicates. + */ + const bool affirmative; +}; + + +/** + This Item represents a <code>X IS TRUE</code> boolean predicate. +*/ + +class Item_func_istrue : public Item_func_truth +{ +public: + Item_func_istrue(Item *a) : Item_func_truth(a, true, true) {} + ~Item_func_istrue() {} + virtual const char* func_name() const { return "istrue"; } +}; + + +/** + This Item represents a <code>X IS NOT TRUE</code> boolean predicate. +*/ + +class Item_func_isnottrue : public Item_func_truth +{ +public: + Item_func_isnottrue(Item *a) : Item_func_truth(a, true, false) {} + ~Item_func_isnottrue() {} + virtual const char* func_name() const { return "isnottrue"; } +}; + + +/** + This Item represents a <code>X IS FALSE</code> boolean predicate. +*/ + +class Item_func_isfalse : public Item_func_truth +{ +public: + Item_func_isfalse(Item *a) : Item_func_truth(a, false, true) {} + ~Item_func_isfalse() {} + virtual const char* func_name() const { return "isfalse"; } +}; + + +/** + This Item represents a <code>X IS NOT FALSE</code> boolean predicate. +*/ + +class Item_func_isnotfalse : public Item_func_truth +{ +public: + Item_func_isnotfalse(Item *a) : Item_func_truth(a, false, false) {} + ~Item_func_isnotfalse() {} + virtual const char* func_name() const { return "isnotfalse"; } +}; + + class Item_cache; #define UNKNOWN ((my_bool)-1) @@ -693,7 +779,16 @@ public: class in_longlong :public in_vector { - longlong tmp; + /* + Here we declare a temporary variable (tmp) of the same type as the + elements of this vector. tmp is used in finding if a given value is in + the list. + */ + struct packed_longlong + { + longlong val; + longlong unsigned_flag; // Use longlong, not bool, to preserve alignment + } tmp; public: in_longlong(uint elements); void set(uint pos,Item *item); @@ -709,9 +804,13 @@ public: } void value_to_item(uint pos, Item *item) { - ((Item_int*)item)->value= ((longlong*)base)[pos]; + ((Item_int*) item)->value= ((packed_longlong*) base)[pos].val; + ((Item_int*) item)->unsigned_flag= (my_bool) + ((packed_longlong*) base)[pos].unsigned_flag; } Item_result result_type() { return INT_RESULT; } + + friend int cmp_longlong(void *cmp_arg, packed_longlong *a,packed_longlong *b); }; class in_double :public in_vector @@ -1038,12 +1137,18 @@ public: */ in_vector *array; bool have_null; + /* + true when all arguments of the IN clause are of compatible types + and can be used safely as comparisons for key conditions + */ + bool arg_types_compatible; Item_result left_result_type; cmp_item *cmp_items[5]; /* One cmp_item for each result type */ DTCollation cmp_collation; Item_func_in(List<Item> &list) - :Item_func_opt_neg(list), array(0), have_null(0) + :Item_func_opt_neg(list), array(0), have_null(0), + arg_types_compatible(FALSE) { bzero(&cmp_items, sizeof(cmp_items)); allowed_arg_cols= 0; // Fetch this value from first argument @@ -1367,7 +1472,6 @@ class Item_equal: public Item_bool_func Item *const_item; /* optional constant item equal to fields items */ cmp_item *eval_item; bool cond_false; - DTCollation cmp_collation; public: inline Item_equal() : Item_bool_func(), const_item(0), eval_item(0), cond_false(0) diff --git a/sql/item_create.cc b/sql/item_create.cc index ff5825ef389..6a1e87a3aae 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -2877,9 +2877,6 @@ Create_func_convert_tz Create_func_convert_tz::s_singleton; Item* Create_func_convert_tz::create(THD *thd, Item *arg1, Item *arg2, Item *arg3) { - if (thd->lex->add_time_zone_tables_to_query_tables(thd)) - return NULL; - return new (thd->mem_root) Item_func_convert_tz(arg1, arg2, arg3); } diff --git a/sql/item_func.cc b/sql/item_func.cc index 82e6196183b..59894c981af 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -232,6 +232,8 @@ void Item_func::traverse_cond(Cond_traverser traverser, (*traverser)(this, argument); } } + else + (*traverser)(this, argument); } @@ -411,8 +413,13 @@ bool Item_func::eq(const Item *item, bool binary_cmp) const if (item->type() != FUNC_ITEM) return 0; Item_func *item_func=(Item_func*) item; - if (arg_count != item_func->arg_count || - func_name() != item_func->func_name()) + Item_func::Functype func_type; + if ((func_type= functype()) != item_func->functype() || + arg_count != item_func->arg_count || + (func_type != Item_func::FUNC_SP && + func_name() != item_func->func_name()) || + (func_type == Item_func::FUNC_SP && + my_strcasecmp(system_charset_info, func_name(), item_func->func_name()))) return 0; for (uint i=0; i < arg_count ; i++) if (!args[i]->eq(item_func->args[i], binary_cmp)) @@ -428,7 +435,7 @@ Field *Item_func::tmp_table_field(TABLE *table) switch (result_type()) { case INT_RESULT: - if (max_length > 11) + if (max_length > MY_INT32_NUM_DECIMAL_DIGITS) field= new Field_longlong(max_length, maybe_null, name, unsigned_flag); else field= new Field_long(max_length, maybe_null, name, unsigned_flag); @@ -2331,7 +2338,8 @@ longlong Item_func_coercibility::val_int() void Item_func_locate::fix_length_and_dec() { - maybe_null=0; max_length=11; + maybe_null= 0; + max_length= MY_INT32_NUM_DECIMAL_DIGITS; agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV, 1); } @@ -3085,14 +3093,8 @@ public: int count; bool locked; pthread_cond_t cond; -#ifndef EMBEDDED_LIBRARY - pthread_t thread; - void set_thread(THD *thd) { thread= thd->real_id; } -#else - THD *thread; - void set_thread(THD *thd) { thread= thd; } -#endif /*EMBEDDED_LIBRARY*/ - ulong thread_id; + my_thread_id thread_id; + void set_thread(THD *thd) { thread_id= thd->thread_id; } User_level_lock(const char *key_arg,uint length, ulong id) :key_length(length),count(1),locked(1), thread_id(id) @@ -3409,11 +3411,7 @@ longlong Item_func_release_lock::val_int() } else { -#ifdef EMBEDDED_LIBRARY - if (ull->locked && (current_thd == ull->thread)) -#else - if (ull->locked && pthread_equal(pthread_self(),ull->thread)) -#endif + if (ull->locked && current_thd->thread_id == ull->thread_id) { result=1; // Release is ok item_user_lock_release(ull); @@ -3455,10 +3453,11 @@ longlong Item_func_benchmark::val_int() DBUG_ASSERT(fixed == 1); char buff[MAX_FIELD_WIDTH]; String tmp(buff,sizeof(buff), &my_charset_bin); + my_decimal tmp_decimal; THD *thd=current_thd; ulong loop_count; - loop_count= args[0]->val_int(); + loop_count= (ulong) args[0]->val_int(); if (args[0]->null_value) { @@ -3479,6 +3478,9 @@ longlong Item_func_benchmark::val_int() case STRING_RESULT: (void) args[1]->val_str(&tmp); break; + case DECIMAL_RESULT: + (void) args[1]->val_decimal(&tmp_decimal); + break; case ROW_RESULT: default: // This case should never be chosen @@ -4228,7 +4230,14 @@ int get_var_with_binlog(THD *thd, enum_sql_command sql_command, user_var_entry *var_entry; var_entry= get_variable(&thd->user_vars, name, 0); - if (!(opt_bin_log && is_update_query(sql_command))) + /* + Any reference to user-defined variable which is done from stored + function or trigger affects their execution and the execution of the + calling statement. We must log all such variables even if they are + not involved in table-updating statements. + */ + if (!(opt_bin_log && + (is_update_query(sql_command) || thd->in_sub_stmt))) { *out_entry= var_entry; return 0; @@ -4979,10 +4988,10 @@ longlong Item_func_row_count::val_int() } -Item_func_sp::Item_func_sp(Name_resolution_context *context_arg, - sp_name *name_arg) - :Item_func(), context(context_arg), m_name(name_arg), m_sp(NULL), - result_field(NULL) + + +Item_func_sp::Item_func_sp(Name_resolution_context *context_arg, sp_name *name) + :Item_func(), context(context_arg), m_name(name), m_sp(NULL), sp_result_field(NULL) { maybe_null= 1; m_name->init_qname(current_thd); @@ -4992,9 +5001,8 @@ Item_func_sp::Item_func_sp(Name_resolution_context *context_arg, Item_func_sp::Item_func_sp(Name_resolution_context *context_arg, - sp_name *name_arg, List<Item> &list) - :Item_func(list), context(context_arg), m_name(name_arg), m_sp(NULL), - result_field(NULL) + sp_name *name, List<Item> &list) + :Item_func(list), context(context_arg), m_name(name), m_sp(NULL),sp_result_field(NULL) { maybe_null= 1; m_name->init_qname(current_thd); @@ -5006,10 +5014,10 @@ Item_func_sp::Item_func_sp(Name_resolution_context *context_arg, void Item_func_sp::cleanup() { - if (result_field) + if (sp_result_field) { - delete result_field; - result_field= NULL; + delete sp_result_field; + sp_result_field= NULL; } m_sp= NULL; dummy_table->alias= NULL; @@ -5038,81 +5046,118 @@ Item_func_sp::func_name() const } -Field * -Item_func_sp::sp_result_field(void) const + +/** + @brief Initialize the result field by creating a temporary dummy table + and assign it to a newly created field object. Meta data used to + create the field is fetched from the sp_head belonging to the stored + proceedure found in the stored procedure functon cache. + + @note This function should be called from fix_fields to init the result + field. It is some what related to Item_field. + + @see Item_field + + @param thd A pointer to the session and thread context. + + @return Function return error status. + @retval TRUE is returned on an error + @retval FALSE is returned on success. +*/ +bool +Item_func_sp::init_result_field(THD *thd) { - Field *field; - DBUG_ENTER("Item_func_sp::sp_result_field"); - DBUG_PRINT("info", ("sp: %s, flags: %x, level: %lu", - (m_sp ? "YES" : "NO"), - (m_sp ? m_sp->m_flags : (uint)0), - (m_sp ? m_sp->m_recursion_level : (ulong)0))); + DBUG_ENTER("Item_func_sp::init_result_field"); - if (!m_sp) + LEX_STRING empty_name= { C_STRING_WITH_LEN("") }; + + TABLE_SHARE *share; + + DBUG_ASSERT(m_sp == NULL); + DBUG_ASSERT(sp_result_field == NULL); + + if (!(m_sp= sp_find_routine(thd, TYPE_ENUM_FUNCTION, m_name, + &thd->sp_func_cache, TRUE))) { - THD *thd= current_thd; - if (!(m_sp= sp_find_routine(thd, TYPE_ENUM_FUNCTION, m_name, - &thd->sp_func_cache, TRUE))) - { - my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str); - DBUG_RETURN(0); - } + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str); + context->process_error(thd); + DBUG_RETURN(TRUE); + } + + /* + A Field need to be attached to a Table. + Below we "create" a dummy table by initializing + the needed pointers. + */ + + share= dummy_table->s; + dummy_table->alias = ""; + dummy_table->maybe_null = maybe_null; + dummy_table->in_use= thd; + dummy_table->copy_blobs= TRUE; + share->table_cache_key = empty_name; + share->table_name = empty_name; + + if (!(sp_result_field= m_sp->create_result_field(max_length, name, dummy_table))) + { + DBUG_RETURN(TRUE); } - if (!dummy_table->alias) + + if (sp_result_field->pack_length() > sizeof(result_buf)) { - char *empty_name= (char *) ""; - dummy_table->alias= empty_name; - dummy_table->maybe_null= maybe_null; - dummy_table->in_use= current_thd; - dummy_table->copy_blobs= TRUE; - dummy_table->s->table_cache_key.str = empty_name; - dummy_table->s->table_name.str= empty_name; - dummy_table->s->db.str= empty_name; + sp_result_field->move_field(sql_alloc(sp_result_field->pack_length())); + } else { + sp_result_field->move_field(result_buf); } - if (!(field= m_sp->create_result_field(max_length, name, dummy_table))) - my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); + + sp_result_field->null_ptr= (uchar *) &null_value; + sp_result_field->null_bit= 1; + - DBUG_RETURN(field); + DBUG_RETURN(FALSE); } +/** + @brief Initialize local members with values from the Field interface. -/* - Execute function & store value in field + @note called from Item::fix_fields. +*/ +void Item_func_sp::fix_length_and_dec() +{ + DBUG_ENTER("Item_func_sp::fix_length_and_dec"); - RETURN - 0 value <> NULL - 1 value = NULL or error + DBUG_ASSERT(sp_result_field); + decimals= sp_result_field->decimals(); + max_length= sp_result_field->field_length; + collation.set(sp_result_field->charset()); + maybe_null= 1; + unsigned_flag= test(sp_result_field->flags & UNSIGNED_FLAG); + + DBUG_VOID_RETURN; +} + +/** + @brief Execute function & store value in field. + + @return Function returns error status. + @retval FALSE on success. + @retval TRUE if an error occurred. */ bool -Item_func_sp::execute(Field **flp) +Item_func_sp::execute() { THD *thd= current_thd; - Field *f; - + /* Get field in virtual tmp table to store result. Create the field if invoked first time. */ - - if (!(f= *flp)) - { - if (!(*flp= f= sp_result_field())) - { - /* Error set by sp_result_field() */ - null_value= 1; - return TRUE; - } - f->move_field((f->pack_length() > sizeof(result_buf)) ? - sql_alloc(f->pack_length()) : result_buf); - f->null_ptr= (uchar *)&null_value; - f->null_bit= 1; - } /* Execute function and store the return value in the field. */ - if (execute_impl(thd, f)) + if (execute_impl(thd)) { null_value= 1; context->process_error(thd); @@ -5121,14 +5166,24 @@ Item_func_sp::execute(Field **flp) /* Check that the field (the value) is not NULL. */ - null_value= f->is_null(); + null_value= sp_result_field->is_null(); return null_value; } +/** + @brief Execute function and store the return value in the field. + + @note This function was intended to be the concrete implementation of + the interface function execute. This was never realized. + + @return The error state. + @retval FALSE on success + @retval TRUE if an error occurred. +*/ bool -Item_func_sp::execute_impl(THD *thd, Field *return_value_fld) +Item_func_sp::execute_impl(THD *thd) { bool err_status= TRUE; Sub_statement_state statement_state; @@ -5145,7 +5200,7 @@ Item_func_sp::execute_impl(THD *thd, Field *return_value_fld) thd->security_ctx= context->security_ctx; } #endif - if (find_and_check_access(thd)) + if (sp_check_access(thd)) goto error; /* @@ -5166,7 +5221,7 @@ Item_func_sp::execute_impl(THD *thd, Field *return_value_fld) function call into binlog. */ thd->reset_sub_statement_state(&statement_state, SUB_STMT_FUNCTION); - err_status= m_sp->execute_function(thd, args, arg_count, return_value_fld); + err_status= m_sp->execute_function(thd, args, arg_count, sp_result_field); thd->restore_sub_statement_state(&statement_state); error: @@ -5181,15 +5236,9 @@ error: void Item_func_sp::make_field(Send_field *tmp_field) { - Field *field; DBUG_ENTER("Item_func_sp::make_field"); - if ((field= sp_result_field())) - { - field->make_field(tmp_field); - delete field; - DBUG_VOID_RETURN; - } - init_make_field(tmp_field, MYSQL_TYPE_VARCHAR); + DBUG_ASSERT(sp_result_field); + sp_result_field->make_field(tmp_field); DBUG_VOID_RETURN; } @@ -5197,67 +5246,20 @@ Item_func_sp::make_field(Send_field *tmp_field) enum enum_field_types Item_func_sp::field_type() const { - Field *field; DBUG_ENTER("Item_func_sp::field_type"); - - if (result_field) - DBUG_RETURN(result_field->type()); - if ((field= sp_result_field())) - { - enum_field_types result= field->type(); - delete field; - DBUG_RETURN(result); - } - DBUG_RETURN(MYSQL_TYPE_VARCHAR); + DBUG_ASSERT(sp_result_field); + DBUG_RETURN(sp_result_field->type()); } - Item_result Item_func_sp::result_type() const { - Field *field; DBUG_ENTER("Item_func_sp::result_type"); - DBUG_PRINT("info", ("m_sp: 0x%lx", (long) m_sp)); - - if (result_field) - DBUG_RETURN(result_field->result_type()); - if ((field= sp_result_field())) - { - Item_result result= field->result_type(); - delete field; - DBUG_RETURN(result); - } - DBUG_RETURN(STRING_RESULT); + DBUG_PRINT("info", ("m_sp = %p", m_sp)); + DBUG_ASSERT(sp_result_field); + DBUG_RETURN(sp_result_field->result_type()); } -void -Item_func_sp::fix_length_and_dec() -{ - Field *field; - DBUG_ENTER("Item_func_sp::fix_length_and_dec"); - - if (result_field) - { - decimals= result_field->decimals(); - max_length= result_field->field_length; - collation.set(result_field->charset()); - DBUG_VOID_RETURN; - } - - if (!(field= sp_result_field())) - { - context->process_error(current_thd); - DBUG_VOID_RETURN; - } - decimals= field->decimals(); - max_length= field->field_length; - collation.set(field->charset()); - maybe_null= 1; - delete field; - DBUG_VOID_RETURN; -} - - longlong Item_func_found_rows::val_int() { DBUG_ASSERT(fixed == 1); @@ -5268,57 +5270,39 @@ longlong Item_func_found_rows::val_int() Field * Item_func_sp::tmp_table_field(TABLE *t_arg) { - Field *field= 0; DBUG_ENTER("Item_func_sp::tmp_table_field"); - if (m_sp) - field= m_sp->create_result_field(max_length, (const char*) name, t_arg); - - if (!field) - field= Item_func::tmp_table_field(t_arg); - - if (!field) - my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); - - DBUG_RETURN(field); + DBUG_ASSERT(sp_result_field); + DBUG_RETURN(sp_result_field); } -/* - Find the function and check access rights to the function - - SYNOPSIS - find_and_check_access() - thd thread handler - - RETURN - FALSE Access granted - TRUE Requested access can't be granted or function doesn't exists - - NOTES - Checks if requested access to function can be granted to user. +/** + @brief Checks if requested access to function can be granted to user. If function isn't found yet, it searches function first. If function can't be found or user don't have requested access error is raised. + + @param thd thread handler + + @return Indication if the access was granted or not. + @retval FALSE Access is granted. + @retval TRUE Requested access can't be granted or function doesn't exists. + */ bool -Item_func_sp::find_and_check_access(THD *thd) +Item_func_sp::sp_check_access(THD *thd) { - if (! m_sp && ! (m_sp= sp_find_routine(thd, TYPE_ENUM_FUNCTION, m_name, - &thd->sp_func_cache, TRUE))) - { - my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str); - return TRUE; - } - + DBUG_ENTER("Item_func_sp::sp_check_access"); + DBUG_ASSERT(m_sp); #ifndef NO_EMBEDDED_ACCESS_CHECKS if (check_routine_access(thd, EXECUTE_ACL, m_sp->m_db.str, m_sp->m_name.str, 0, FALSE)) - return TRUE; + DBUG_RETURN(TRUE); #endif - return FALSE; + DBUG_RETURN(FALSE); } @@ -5326,9 +5310,25 @@ bool Item_func_sp::fix_fields(THD *thd, Item **ref) { bool res; + DBUG_ENTER("Item_func_sp::fix_fields"); DBUG_ASSERT(fixed == 0); + + /* + We must call init_result_field before Item_func::fix_fields() + to make m_sp and result_field members available to fix_length_and_dec(), + which is called from Item_func::fix_fields(). + */ + res= init_result_field(thd); + + if (res) + DBUG_RETURN(res); + res= Item_func::fix_fields(thd, ref); - if (!res && thd->lex->view_prepare_mode) + + if (res) + DBUG_RETURN(res); + + if (thd->lex->view_prepare_mode) { /* Here we check privileges of the stored routine only during view @@ -5340,15 +5340,17 @@ Item_func_sp::fix_fields(THD *thd, Item **ref) good idea especially if the view has SQL SECURITY DEFINER and the used stored procedure has SQL SECURITY DEFINER. */ - res= find_and_check_access(thd); + res= sp_check_access(thd); #ifndef NO_EMBEDDED_ACCESS_CHECKS + /* + Try to set and restore the security context to see whether it's valid + */ Security_context *save_secutiry_ctx; - if (!res && !(res= set_routine_security_ctx(thd, m_sp, false, - &save_secutiry_ctx))) - { + res= set_routine_security_ctx(thd, m_sp, false, &save_secutiry_ctx); + if (!res) sp_restore_security_context(thd, save_secutiry_ctx); - } + #endif /* ! NO_EMBEDDED_ACCESS_CHECKS */ } - return res; + DBUG_RETURN(res); } diff --git a/sql/item_func.h b/sql/item_func.h index 342880d65c5..5bd24dbf4b1 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -1411,12 +1411,15 @@ private: sp_name *m_name; mutable sp_head *m_sp; TABLE *dummy_table; - Field *result_field; char result_buf[64]; + /* + The result field of the concrete stored function. + */ + Field *sp_result_field; - bool execute(Field **flp); - bool execute_impl(THD *thd, Field *return_value_fld); - Field *sp_result_field(void) const; + bool execute(); + bool execute_impl(THD *thd); + bool init_result_field(THD *thd); public: @@ -1442,23 +1445,23 @@ public: longlong val_int() { - if (execute(&result_field)) + if (execute()) return (longlong) 0; - return result_field->val_int(); + return sp_result_field->val_int(); } double val_real() { - if (execute(&result_field)) + if (execute()) return 0.0; - return result_field->val_real(); + return sp_result_field->val_real(); } my_decimal *val_decimal(my_decimal *dec_buf) { - if (execute(&result_field)) + if (execute()) return NULL; - return result_field->val_decimal(dec_buf); + return sp_result_field->val_decimal(dec_buf); } String *val_str(String *str) @@ -1467,7 +1470,7 @@ public: char buff[20]; buf.set(buff, 20, str->charset()); buf.length(0); - if (execute(&result_field)) + if (execute()) return NULL; /* result_field will set buf pointing to internal buffer @@ -1475,7 +1478,7 @@ public: when SP is executed. In order to prevent occasional corruption of returned value, we make here a copy. */ - result_field->val_str(&buf); + sp_result_field->val_str(&buf); str->copy(buf); return str; } @@ -1483,11 +1486,11 @@ public: virtual bool change_context_processor(byte *cntx) { context= (Name_resolution_context *)cntx; return FALSE; } - void fix_length_and_dec(); - bool find_and_check_access(THD * thd); + bool sp_check_access(THD * thd); virtual enum Functype functype() const { return FUNC_SP; } bool fix_fields(THD *thd, Item **ref); + void fix_length_and_dec(void); bool is_expensive() { return 1; } }; diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 35a9f026b1d..78741483c0b 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -38,6 +38,7 @@ void Item_geometry_func::fix_length_and_dec() collation.set(&my_charset_bin); decimals=0; max_length=MAX_BLOB_WIDTH; + maybe_null= 1; } int Item_geometry_func::get_geometry_type() const @@ -66,11 +67,8 @@ String *Item_func_geometry_from_text::val_str(String *str) return 0; str->length(0); str->q_append(srid); - if (!Geometry::create_from_wkt(&buffer, &trs, str, 0)) - /* We shouldn't return NULL here as NULL is a legal spatial object */ - /* Geometry::bad_spatial_data will produce error message beeing stored*/ - /* in GEOMETRY field */ - return &Geometry::bad_geometry_data; + if ((null_value= !Geometry::create_from_wkt(&buffer, &trs, str, 0))) + return 0; return str; } @@ -124,6 +122,7 @@ String *Item_func_as_wkt::val_str(String *str) void Item_func_as_wkt::fix_length_and_dec() { max_length=MAX_BLOB_WIDTH; + maybe_null= 1; } @@ -389,7 +388,8 @@ String *Item_func_spatial_collection::val_str(String *str) for (i= 0; i < arg_count; ++i) { String *res= args[i]->val_str(&arg_value); - if (args[i]->null_value) + uint32 len; + if (args[i]->null_value || ((len= res->length()) < WKB_HEADER_SIZE)) goto err; if (coll_type == Geometry::wkb_geometrycollection) @@ -398,13 +398,12 @@ String *Item_func_spatial_collection::val_str(String *str) In the case of GeometryCollection we don't need any checkings for item types, so just copy them into target collection */ - if (str->append(res->ptr(), res->length(), (uint32) 512)) + if (str->append(res->ptr(), len, (uint32) 512)) goto err; } else { enum Geometry::wkbType wkb_type; - uint32 len=res->length(); const char *data= res->ptr() + 1; /* @@ -412,8 +411,6 @@ String *Item_func_spatial_collection::val_str(String *str) are of specific type, let's do this checking now */ - if (len < 5) - goto err; wkb_type= (Geometry::wkbType) uint4korr(data); data+= 4; len-= 5; @@ -535,9 +532,13 @@ longlong Item_func_spatial_rel::val_int() longlong Item_func_isempty::val_int() { DBUG_ASSERT(fixed == 1); - String tmp; - null_value=0; - return args[0]->null_value ? 1 : 0; + String tmp; + String *swkb= args[0]->val_str(&tmp); + Geometry_buffer buffer; + + null_value= args[0]->null_value || + !(Geometry::construct(&buffer, swkb->ptr(), swkb->length())); + return null_value ? 1 : 0; } @@ -545,10 +546,11 @@ longlong Item_func_issimple::val_int() { DBUG_ASSERT(fixed == 1); String tmp; - String *wkb=args[0]->val_str(&tmp); - - if ((null_value= (!wkb || args[0]->null_value))) - return 0; + String *swkb= args[0]->val_str(&tmp); + Geometry_buffer buffer; + + null_value= args[0]->null_value || + !(Geometry::construct(&buffer, swkb->ptr(), swkb->length())); /* TODO: Ramil or Holyfoot, add real IsSimple calculation */ return 0; } diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h index 5361a02aa83..068caa447ef 100644 --- a/sql/item_geofunc.h +++ b/sql/item_geofunc.h @@ -34,6 +34,7 @@ public: enum_field_types field_type() const { return MYSQL_TYPE_GEOMETRY; } Field *tmp_table_field(TABLE *t_arg); virtual int get_geometry_type() const; + bool is_null() { (void) val_int(); return null_value; } }; class Item_func_geometry_from_text: public Item_geometry_func @@ -81,6 +82,7 @@ public: void fix_length_and_dec() { max_length=20; // "GeometryCollection" is the most long + maybe_null= 1; }; }; @@ -225,6 +227,8 @@ public: } } void print(String *str) { Item_func::print(str); } + void fix_length_and_dec() { maybe_null= 1; } + bool is_null() { (void) val_int(); return null_value; } }; class Item_func_isempty: public Item_bool_func @@ -234,6 +238,7 @@ public: longlong val_int(); optimize_type select_optimize() const { return OPTIMIZE_NONE; } const char *func_name() const { return "isempty"; } + void fix_length_and_dec() { maybe_null= 1; } }; class Item_func_issimple: public Item_bool_func @@ -243,6 +248,7 @@ public: longlong val_int(); optimize_type select_optimize() const { return OPTIMIZE_NONE; } const char *func_name() const { return "issimple"; } + void fix_length_and_dec() { maybe_null= 1; } }; class Item_func_isclosed: public Item_bool_func @@ -252,6 +258,7 @@ public: longlong val_int(); optimize_type select_optimize() const { return OPTIMIZE_NONE; } const char *func_name() const { return "isclosed"; } + void fix_length_and_dec() { maybe_null= 1; } }; class Item_func_dimension: public Item_int_func @@ -261,7 +268,7 @@ public: Item_func_dimension(Item *a): Item_int_func(a) {} longlong val_int(); const char *func_name() const { return "dimension"; } - void fix_length_and_dec() { max_length=10; } + void fix_length_and_dec() { max_length= 10; maybe_null= 1; } }; class Item_func_x: public Item_real_func @@ -271,6 +278,11 @@ public: Item_func_x(Item *a): Item_real_func(a) {} double val_real(); const char *func_name() const { return "x"; } + void fix_length_and_dec() + { + Item_real_func::fix_length_and_dec(); + maybe_null= 1; + } }; @@ -281,6 +293,11 @@ public: Item_func_y(Item *a): Item_real_func(a) {} double val_real(); const char *func_name() const { return "y"; } + void fix_length_and_dec() + { + Item_real_func::fix_length_and_dec(); + maybe_null= 1; + } }; @@ -291,7 +308,7 @@ public: Item_func_numgeometries(Item *a): Item_int_func(a) {} longlong val_int(); const char *func_name() const { return "numgeometries"; } - void fix_length_and_dec() { max_length=10; } + void fix_length_and_dec() { max_length= 10; maybe_null= 1; } }; @@ -302,7 +319,7 @@ public: Item_func_numinteriorring(Item *a): Item_int_func(a) {} longlong val_int(); const char *func_name() const { return "numinteriorrings"; } - void fix_length_and_dec() { max_length=10; } + void fix_length_and_dec() { max_length= 10; maybe_null= 1; } }; @@ -313,7 +330,7 @@ public: Item_func_numpoints(Item *a): Item_int_func(a) {} longlong val_int(); const char *func_name() const { return "numpoints"; } - void fix_length_and_dec() { max_length=10; } + void fix_length_and_dec() { max_length= 10; maybe_null= 1; } }; @@ -324,6 +341,11 @@ public: Item_func_area(Item *a): Item_real_func(a) {} double val_real(); const char *func_name() const { return "area"; } + void fix_length_and_dec() + { + Item_real_func::fix_length_and_dec(); + maybe_null= 1; + } }; @@ -334,6 +356,11 @@ public: Item_func_glength(Item *a): Item_real_func(a) {} double val_real(); const char *func_name() const { return "glength"; } + void fix_length_and_dec() + { + Item_real_func::fix_length_and_dec(); + maybe_null= 1; + } }; @@ -344,7 +371,7 @@ public: Item_func_srid(Item *a): Item_int_func(a) {} longlong val_int(); const char *func_name() const { return "srid"; } - void fix_length_and_dec() { max_length= 10; } + void fix_length_and_dec() { max_length= 10; maybe_null= 1; } }; #define GEOM_NEW(thd, obj_constructor) new (thd->mem_root) obj_constructor diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index faea5380a66..e6f951bfc7b 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -25,9 +25,6 @@ #include "mysql_priv.h" #include <m_ctype.h> -#ifdef HAVE_OPENSSL -#include <openssl/des.h> -#endif /* HAVE_OPENSSL */ #include "md5.h" #include "sha1.h" #include "my_aes.h" @@ -967,18 +964,18 @@ String *Item_func_insert::val_str(String *str) args[3]->null_value) goto null; /* purecov: inspected */ - if ((start < 0) || (start > res->length() + 1)) + if ((start < 0) || (start > res->length())) return res; // Wrong param; skip insert - if ((length < 0) || (length > res->length() + 1)) - length= res->length() + 1; + if ((length < 0) || (length > res->length())) + length= res->length(); /* start and length are now sufficiently valid to pass to charpos function */ start= res->charpos((int) start); length= res->charpos((int) length, (uint32) start); /* Re-testing with corrected params */ - if (start > res->length() + 1) - return res; // Wrong param; skip insert + if (start > res->length()) + return res; /* purecov: inspected */ // Wrong param; skip insert if (length > res->length() - start) length= res->length() - start; @@ -1184,11 +1181,10 @@ void Item_func_substr::fix_length_and_dec() if (args[1]->const_item()) { int32 start= (int32) args[1]->val_int(); - start= (int32)((start < 0) ? max_length + start : start - 1); - if (start < 0 || start >= (int32) max_length) - max_length=0; /* purecov: inspected */ + if (start < 0) + max_length= ((uint)(-start) > max_length) ? 0 : (uint)(-start); else - max_length-= (uint) start; + max_length-= min((uint)(start - 1), max_length); } if (arg_count == 3 && args[2]->const_item()) { @@ -1817,7 +1813,8 @@ void Item_func_soundex::fix_length_and_dec() { collation.set(args[0]->collation); max_length=args[0]->max_length; - set_if_bigger(max_length,4); + set_if_bigger(max_length, 4 * collation.collation->mbminlen); + tmp_value.set_charset(collation.collation); } @@ -1827,14 +1824,15 @@ void Item_func_soundex::fix_length_and_dec() else return 0 */ -static char soundex_toupper(char ch) +static int soundex_toupper(int ch) { return (ch >= 'a' && ch <= 'z') ? ch - 'a' + 'A' : ch; } -static char get_scode(char *ptr) + +static char get_scode(int wc) { - uchar ch= soundex_toupper(*ptr); + int ch= soundex_toupper(wc); if (ch < 'A' || ch > 'Z') { // Thread extended alfa (country spec) @@ -1844,46 +1842,121 @@ static char get_scode(char *ptr) } +static bool my_uni_isalpha(int wc) +{ + /* + Return true for all Basic Latin letters: a..z A..Z. + Return true for all Unicode characters with code higher than U+00C0: + - characters between 'z' and U+00C0 are controls and punctuations. + - "U+00C0 LATIN CAPITAL LETTER A WITH GRAVE" is the first letter after 'z'. + */ + return (wc >= 'a' && wc <= 'z') || + (wc >= 'A' && wc <= 'Z') || + (wc >= 0xC0); +} + + String *Item_func_soundex::val_str(String *str) { DBUG_ASSERT(fixed == 1); String *res =args[0]->val_str(str); char last_ch,ch; CHARSET_INFO *cs= collation.collation; + my_wc_t wc; + uint nchars; + int rc; - if ((null_value=args[0]->null_value)) + if ((null_value= args[0]->null_value)) return 0; /* purecov: inspected */ - if (tmp_value.alloc(max(res->length(),4))) + if (tmp_value.alloc(max(res->length(), 4 * cs->mbminlen))) return str; /* purecov: inspected */ char *to= (char *) tmp_value.ptr(); - char *from= (char *) res->ptr(), *end=from+res->length(); - tmp_value.set_charset(cs); + char *to_end= to + tmp_value.alloced_length(); + char *from= (char *) res->ptr(), *end= from + res->length(); + + for ( ; ; ) /* Skip pre-space */ + { + if ((rc= cs->cset->mb_wc(cs, &wc, (uchar*) from, (uchar*) end)) <= 0) + return &my_empty_string; /* EOL or invalid byte sequence */ + + if (rc == 1 && cs->ctype) + { + /* Single byte letter found */ + if (my_isalpha(cs, *from)) + { + last_ch= get_scode(*from); // Code of the first letter + *to++= soundex_toupper(*from++); // Copy first letter + break; + } + from++; + } + else + { + from+= rc; + if (my_uni_isalpha(wc)) + { + /* Multibyte letter found */ + wc= soundex_toupper(wc); + last_ch= get_scode(wc); // Code of the first letter + if ((rc= cs->cset->wc_mb(cs, wc, (uchar*) to, (uchar*) to_end)) <= 0) + { + /* Extra safety - should not really happen */ + DBUG_ASSERT(false); + return &my_empty_string; + } + to+= rc; + break; + } + } + } - while (from != end && !my_isalpha(cs,*from)) // Skip pre-space - from++; /* purecov: inspected */ - if (from == end) - return &my_empty_string; // No alpha characters. - *to++ = soundex_toupper(*from); // Copy first letter - last_ch = get_scode(from); // code of the first letter - // for the first 'double-letter check. - // Loop on input letters until - // end of input (null) or output - // letter code count = 3 - for (from++ ; from < end ; from++) - { - if (!my_isalpha(cs,*from)) - continue; - ch=get_scode(from); + /* + last_ch is now set to the first 'double-letter' check. + loop on input letters until end of input + */ + for (nchars= 1 ; ; ) + { + if ((rc= cs->cset->mb_wc(cs, &wc, (uchar*) from, (uchar*) end)) <= 0) + break; /* EOL or invalid byte sequence */ + + if (rc == 1 && cs->ctype) + { + if (!my_isalpha(cs, *from++)) + continue; + } + else + { + from+= rc; + if (!my_uni_isalpha(wc)) + continue; + } + + ch= get_scode(wc); if ((ch != '0') && (ch != last_ch)) // if not skipped or double { - *to++ = ch; // letter, copy to output - last_ch = ch; // save code of last input letter - } // for next double-letter check + // letter, copy to output + if ((rc= cs->cset->wc_mb(cs, (my_wc_t) ch, + (uchar*) to, (uchar*) to_end)) <= 0) + { + // Extra safety - should not really happen + DBUG_ASSERT(false); + break; + } + to+= rc; + nchars++; + last_ch= ch; // save code of last input letter + } // for next double-letter check + } + + /* Pad up to 4 characters with DIGIT ZERO, if the string is shorter */ + if (nchars < 4) + { + uint nbytes= (4 - nchars) * cs->mbminlen; + cs->cset->fill(cs, to, nbytes, '0'); + to+= nbytes; } - for (end=(char*) tmp_value.ptr()+4 ; to < end ; to++) - *to = '0'; - *to=0; // end string + tmp_value.length((uint) (to-tmp_value.ptr())); return &tmp_value; } @@ -1926,7 +1999,7 @@ String *Item_func_format::val_str(String *str) int diff; DBUG_ASSERT(fixed == 1); - dec= args[1]->val_int(); + dec= (int) args[1]->val_int(); if (args[1]->null_value) { null_value=1; @@ -2274,8 +2347,10 @@ String *Item_func_repeat::val_str(String *str) if (args[0]->null_value || args[1]->null_value) goto err; // string and/or delim are null null_value= 0; - if ((count <= 0) && !args[1]->unsigned_flag) // For nicer SQL code + + if (count == 0 || count < 0 && !args[1]->unsigned_flag) return &my_empty_string; + /* Assumes that the maximum length of a String is < INT_MAX32. */ /* Bounds check on count: If this is triggered, we will error. */ if ((ulonglong) count > INT_MAX32) @@ -2803,6 +2878,11 @@ String *Item_load_file::val_str(String *str) (void) fn_format(path, file_name->c_ptr(), mysql_real_data_home, "", MY_RELATIVE_PATH | MY_UNPACK_FILENAME); + /* Read only allowed from within dir specified by secure_file_priv */ + if (opt_secure_file_priv && + strncmp(opt_secure_file_priv, path, strlen(opt_secure_file_priv))) + goto err; + if (!my_stat(path, &stat_info, MYF(0))) goto err; diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index 41466eb92cf..790911dcd04 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -592,7 +592,11 @@ class Item_func_unhex :public Item_str_func { String tmp_value; public: - Item_func_unhex(Item *a) :Item_str_func(a) {} + Item_func_unhex(Item *a) :Item_str_func(a) + { + /* there can be bad hex strings */ + maybe_null= 1; + } const char *func_name() const { return "unhex"; } String *val_str(String *); void fix_length_and_dec() diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index de67a314631..79b09967a9d 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -51,6 +51,10 @@ Item_subselect::Item_subselect(): void Item_subselect::init(st_select_lex *select_lex, select_subselect *result) { + /* + Please see Item_singlerow_subselect::invalidate_and_restore_select_lex(), + which depends on alterations to the parse tree implemented here. + */ DBUG_ENTER("Item_subselect::init"); DBUG_PRINT("enter", ("select_lex: 0x%lx", (long) select_lex)); @@ -91,6 +95,12 @@ void Item_subselect::init(st_select_lex *select_lex, DBUG_VOID_RETURN; } +st_select_lex * +Item_subselect::get_select_lex() +{ + return unit->first_select(); +} + void Item_subselect::cleanup() { DBUG_ENTER("Item_subselect::cleanup"); @@ -311,6 +321,26 @@ Item_singlerow_subselect::Item_singlerow_subselect(st_select_lex *select_lex) DBUG_VOID_RETURN; } +st_select_lex * +Item_singlerow_subselect::invalidate_and_restore_select_lex() +{ + DBUG_ENTER("Item_singlerow_subselect::invalidate_and_restore_select_lex"); + st_select_lex *result= get_select_lex(); + + DBUG_ASSERT(result); + + /* + This code restore the parse tree in it's state before the execution of + Item_singlerow_subselect::Item_singlerow_subselect(), + and in particular decouples this object from the SELECT_LEX, + so that the SELECT_LEX can be used with a different flavor + or Item_subselect instead, as part of query rewriting. + */ + unit->item= NULL; + + DBUG_RETURN(result); +} + Item_maxmin_subselect::Item_maxmin_subselect(THD *thd_param, Item_subselect *parent, st_select_lex *select_lex, @@ -1050,7 +1080,8 @@ Item_in_subselect::single_value_transformer(JOIN *join, Item *having= item, *orig_item= item; select_lex->item_list.empty(); select_lex->item_list.push_back(new Item_int("Not_used", - (longlong) 1, 21)); + (longlong) 1, + MY_INT64_NUM_DECIMAL_DIGITS)); select_lex->ref_pointer_array[0]= select_lex->item_list.head(); item= func->create(expr, item); @@ -2015,7 +2046,8 @@ int subselect_uniquesubquery_engine::exec() table->file->ha_index_init(tab->ref.key, 0); error= table->file->index_read(table->record[0], tab->ref.key_buff, - tab->ref.key_length,HA_READ_KEY_EXACT); + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT); if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) error= report_error(table, error); @@ -2124,7 +2156,8 @@ int subselect_indexsubquery_engine::exec() table->file->ha_index_init(tab->ref.key, 1); error= table->file->index_read(table->record[0], tab->ref.key_buff, - tab->ref.key_length,HA_READ_KEY_EXACT); + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT); if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) error= report_error(table, error); diff --git a/sql/item_subselect.h b/sql/item_subselect.h index fdf3708cabb..37264f2136f 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -127,6 +127,12 @@ public: enum_parsing_place place() { return parsing_place; } bool walk(Item_processor processor, bool walk_subquery, byte *arg); + /** + Get the SELECT_LEX structure associated with this Item. + @return the SELECT_LEX structure associated with this Item + */ + st_select_lex* get_select_lex(); + friend class select_subselect; friend class Item_in_optimizer; friend bool Item_field::fix_fields(THD *, Item **); @@ -170,6 +176,20 @@ public: bool null_inside(); void bring_value(); + /** + This method is used to implement a special case of semantic tree + rewriting, mandated by a SQL:2003 exception in the specification. + The only caller of this method is handle_sql2003_note184_exception(), + see the code there for more details. + Note that this method breaks the object internal integrity, by + removing it's association with the corresponding SELECT_LEX, + making this object orphan from the parse tree. + No other method, beside the destructor, should be called on this + object, as it is now invalid. + @return the SELECT_LEX structure that was given in the constructor. + */ + st_select_lex* invalidate_and_restore_select_lex(); + friend class select_singlerow_subselect; }; diff --git a/sql/item_sum.cc b/sql/item_sum.cc index f34fc008186..7c3762d4785 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -63,6 +63,7 @@ bool Item_sum::init_sum_func_check(THD *thd) nest_level= thd->lex->current_select->nest_level; ref_by= 0; aggr_level= -1; + aggr_sel= NULL; max_arg_level= -1; max_sum_func_level= -1; return FALSE; @@ -148,9 +149,14 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref) if (register_sum_func(thd, ref)) return TRUE; invalid= aggr_level < 0 && !(allow_sum_func & (1 << nest_level)); + if (!invalid && thd->variables.sql_mode & MODE_ANSI) + invalid= aggr_level < 0 && max_arg_level < nest_level; } if (!invalid && aggr_level < 0) + { aggr_level= nest_level; + aggr_sel= thd->lex->current_select; + } /* By this moment we either found a subquery where the set function is to be aggregated and assigned a value that is >= 0 to aggr_level, @@ -160,8 +166,9 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref) Additionally we have to check whether possible nested set functions are acceptable here: they are not, if the level of aggregation of some of them is less than aggr_level. - */ - invalid= aggr_level <= max_sum_func_level; + */ + if (!invalid) + invalid= aggr_level <= max_sum_func_level; if (invalid) { my_message(ER_INVALID_GROUP_FUNC_USE, ER(ER_INVALID_GROUP_FUNC_USE), @@ -176,6 +183,7 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref) */ set_if_bigger(in_sum_func->max_sum_func_level, aggr_level); } + update_used_tables(); thd->lex->in_sum_func= in_sum_func; return FALSE; } @@ -210,7 +218,6 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref) bool Item_sum::register_sum_func(THD *thd, Item **ref) { SELECT_LEX *sl; - SELECT_LEX *aggr_sl= NULL; nesting_map allow_sum_func= thd->lex->allow_sum_func; for (sl= thd->lex->current_select->master_unit()->outer_select() ; sl && sl->nest_level > max_arg_level; @@ -220,7 +227,7 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref) { /* Found the most nested subquery where the function can be aggregated */ aggr_level= sl->nest_level; - aggr_sl= sl; + aggr_sel= sl; } } if (sl && (allow_sum_func & (1 << sl->nest_level))) @@ -231,21 +238,22 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref) The set function will be aggregated in this subquery. */ aggr_level= sl->nest_level; - aggr_sl= sl; + aggr_sel= sl; + } if (aggr_level >= 0) { ref_by= ref; - /* Add the object to the list of registered objects assigned to aggr_sl */ - if (!aggr_sl->inner_sum_func_list) + /* Add the object to the list of registered objects assigned to aggr_sel */ + if (!aggr_sel->inner_sum_func_list) next= this; else { - next= aggr_sl->inner_sum_func_list->next; - aggr_sl->inner_sum_func_list->next= this; + next= aggr_sel->inner_sum_func_list->next; + aggr_sel->inner_sum_func_list->next= this; } - aggr_sl->inner_sum_func_list= this; - aggr_sl->with_sum_func= 1; + aggr_sel->inner_sum_func_list= this; + aggr_sel->with_sum_func= 1; /* Mark Item_subselect(s) as containing aggregate function all the way up @@ -263,16 +271,17 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref) has aggregate functions directly referenced (i.e. not through a sub-select). */ for (sl= thd->lex->current_select; - sl && sl != aggr_sl && sl->master_unit()->item; + sl && sl != aggr_sel && sl->master_unit()->item; sl= sl->master_unit()->outer_select() ) sl->master_unit()->item->with_sum_func= 1; } + thd->lex->current_select->mark_as_dependent(aggr_sel); return FALSE; } -Item_sum::Item_sum(List<Item> &list) - :arg_count(list.elements) +Item_sum::Item_sum(List<Item> &list) :arg_count(list.elements), + forced_const(FALSE) { if ((args=(Item**) sql_alloc(sizeof(Item*)*arg_count))) { @@ -296,7 +305,10 @@ Item_sum::Item_sum(List<Item> &list) Item_sum::Item_sum(THD *thd, Item_sum *item): Item_result_field(thd, item), arg_count(item->arg_count), - quick_group(item->quick_group) + aggr_sel(item->aggr_sel), + nest_level(item->nest_level), aggr_level(item->aggr_level), + quick_group(item->quick_group), used_tables_cache(item->used_tables_cache), + forced_const(item->forced_const) { if (arg_count <= 2) args=tmp_args; @@ -407,7 +419,7 @@ Field *Item_sum::create_tmp_field(bool group, TABLE *table, break; case STRING_RESULT: if (max_length/collation.collation->mbmaxlen <= 255 || - max_length/collation.collation->mbmaxlen >=UINT_MAX16 || + convert_blob_length >=UINT_MAX16 || !convert_blob_length) return make_string_field(table); field= new Field_varstring(convert_blob_length, maybe_null, @@ -429,6 +441,25 @@ Field *Item_sum::create_tmp_field(bool group, TABLE *table, } +void Item_sum::update_used_tables () +{ + if (!forced_const) + { + used_tables_cache= 0; + for (uint i=0 ; i < arg_count ; i++) + { + args[i]->update_used_tables(); + used_tables_cache|= args[i]->used_tables(); + } + + used_tables_cache&= PSEUDO_TABLE_BITS; + + /* the aggregate function is aggregated into its local context */ + used_tables_cache |= (1 << aggr_sel->join->tables) - 1; + } +} + + String * Item_sum_num::val_str(String *str) { @@ -488,7 +519,7 @@ Item_sum_num::fix_fields(THD *thd, Item **ref) Item_sum_hybrid::Item_sum_hybrid(THD *thd, Item_sum_hybrid *item) :Item_sum(thd, item), value(item->value), hybrid_type(item->hybrid_type), hybrid_field_type(item->hybrid_field_type), cmp_sign(item->cmp_sign), - used_table_cache(item->used_table_cache), was_values(item->was_values) + was_values(item->was_values) { /* copy results from old value */ switch (hybrid_type) { @@ -1082,7 +1113,6 @@ void Item_sum_count::cleanup() DBUG_ENTER("Item_sum_count::cleanup"); count= 0; Item_sum_int::cleanup(); - used_table_cache= ~(table_map) 0; DBUG_VOID_RETURN; } @@ -1105,8 +1135,10 @@ void Item_sum_avg::fix_length_and_dec() f_scale= args[0]->decimals; dec_bin_size= my_decimal_get_binary_size(f_precision, f_scale); } - else + else { decimals= min(args[0]->decimals + prec_increment, NOT_FIXED_DEC); + max_length= args[0]->max_length + prec_increment; + } } @@ -1572,7 +1604,7 @@ void Item_sum_hybrid::cleanup() { DBUG_ENTER("Item_sum_hybrid::cleanup"); Item_sum::cleanup(); - used_table_cache= ~(table_map) 0; + forced_const= FALSE; /* by default it is TRUE to avoid TRUE reporting by diff --git a/sql/item_sum.h b/sql/item_sum.h index 4cf16fc79af..5cf4f93af0e 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -215,7 +215,9 @@ TODO: to catch queries where the limit is exceeded to make the code clean here. -*/ +*/ + +class st_select_lex; class Item_sum :public Item_result_field { @@ -231,25 +233,32 @@ public: Item_sum *next; /* next in the circular chain of registered objects */ uint arg_count; Item_sum *in_sum_func; /* embedding set function if any */ + st_select_lex * aggr_sel; /* select where the function is aggregated */ int8 nest_level; /* number of the nesting level of the set function */ int8 aggr_level; /* nesting level of the aggregating subquery */ int8 max_arg_level; /* max level of unbound column references */ int8 max_sum_func_level;/* max level of aggregation for embedded functions */ bool quick_group; /* If incremental update of fields */ +protected: + table_map used_tables_cache; + bool forced_const; + +public: + void mark_as_sum_func(); - Item_sum() :arg_count(0), quick_group(1) + Item_sum() :arg_count(0), quick_group(1), forced_const(FALSE) { mark_as_sum_func(); } - Item_sum(Item *a) - :args(tmp_args), arg_count(1), quick_group(1) + Item_sum(Item *a) :args(tmp_args), arg_count(1), quick_group(1), + forced_const(FALSE) { args[0]=a; mark_as_sum_func(); } - Item_sum( Item *a, Item *b ) - :args(tmp_args), arg_count(2), quick_group(1) + Item_sum( Item *a, Item *b ) :args(tmp_args), arg_count(2), quick_group(1), + forced_const(FALSE) { args[0]=a; args[1]=b; mark_as_sum_func(); @@ -319,10 +328,20 @@ public: virtual const char *func_name() const= 0; virtual Item *result_item(Field *field) { return new Item_field(field); } - table_map used_tables() const { return ~(table_map) 0; } /* Not used */ - bool const_item() const { return 0; } + table_map used_tables() const { return used_tables_cache; } + void update_used_tables (); + void cleanup() + { + Item::cleanup(); + forced_const= FALSE; + } bool is_null() { return null_value; } - void update_used_tables() { } + void make_const () + { + used_tables_cache= 0; + forced_const= TRUE; + } + virtual bool const_item() const { return forced_const; } void make_field(Send_field *field); void print(String *str); void fix_num_length_and_dec(); @@ -346,6 +365,8 @@ public: bool init_sum_func_check(THD *thd); bool check_sum_func(THD *thd, Item **ref); bool register_sum_func(THD *thd, Item **ref); + st_select_lex *depended_from() + { return (nest_level == aggr_level ? 0 : aggr_sel); } }; @@ -509,23 +530,23 @@ public: class Item_sum_count :public Item_sum_int { longlong count; - table_map used_table_cache; public: Item_sum_count(Item *item_par) - :Item_sum_int(item_par),count(0),used_table_cache(~(table_map) 0) + :Item_sum_int(item_par),count(0) {} Item_sum_count(THD *thd, Item_sum_count *item) - :Item_sum_int(thd, item), count(item->count), - used_table_cache(item->used_table_cache) + :Item_sum_int(thd, item), count(item->count) {} - table_map used_tables() const { return used_table_cache; } - bool const_item() const { return !used_table_cache; } enum Sumfunctype sum_func () const { return COUNT_FUNC; } void clear(); void no_rows_in_result() { count=0; } bool add(); - void make_const(longlong count_arg) { count=count_arg; used_table_cache=0; } + void make_const(longlong count_arg) + { + count=count_arg; + Item_sum::make_const(); + } longlong val_int(); void reset_field(); void cleanup(); @@ -805,28 +826,22 @@ protected: Item_result hybrid_type; enum_field_types hybrid_field_type; int cmp_sign; - table_map used_table_cache; bool was_values; // Set if we have found at least one row (for max/min only) public: Item_sum_hybrid(Item *item_par,int sign) :Item_sum(item_par), sum(0.0), sum_int(0), hybrid_type(INT_RESULT), hybrid_field_type(MYSQL_TYPE_LONGLONG), - cmp_sign(sign), used_table_cache(~(table_map) 0), - was_values(TRUE) + cmp_sign(sign), was_values(TRUE) { collation.set(&my_charset_bin); } Item_sum_hybrid(THD *thd, Item_sum_hybrid *item); bool fix_fields(THD *, Item **); - table_map used_tables() const { return used_table_cache; } - bool const_item() const { return !used_table_cache; } - void clear(); double val_real(); longlong val_int(); my_decimal *val_decimal(my_decimal *); void reset_field(); String *val_str(String *); - void make_const() { used_table_cache=0; } bool keep_field_type(void) const { return 1; } enum Item_result result_type () const { return hybrid_type; } enum enum_field_types field_type() const { return hybrid_field_type; } diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 00f077839c3..7636deab782 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -1587,7 +1587,7 @@ int Item_func_now::save_in_field(Field *to, bool no_conversions) void Item_func_sysdate_local::store_now_in_TIME(TIME *now_time) { THD *thd= current_thd; - thd->variables.time_zone->gmt_sec_to_TIME(now_time, time(NULL)); + thd->variables.time_zone->gmt_sec_to_TIME(now_time, (my_time_t) time(NULL)); thd->time_zone_used= 1; } @@ -1927,19 +1927,6 @@ void Item_func_convert_tz::fix_length_and_dec() } -bool -Item_func_convert_tz::fix_fields(THD *thd_arg, Item **ref) -{ - String str; - if (Item_date_func::fix_fields(thd_arg, ref)) - return TRUE; - - tz_tables= thd_arg->lex->time_zone_tables_used; - - return FALSE; -} - - String *Item_func_convert_tz::val_str(String *str) { TIME time_tmp; @@ -1974,16 +1961,17 @@ bool Item_func_convert_tz::get_date(TIME *ltime, { my_time_t my_time_tmp; String str; + THD *thd= current_thd; if (!from_tz_cached) { - from_tz= my_tz_find(args[1]->val_str(&str), tz_tables); + from_tz= my_tz_find(thd, args[1]->val_str(&str)); from_tz_cached= args[1]->const_item(); } if (!to_tz_cached) { - to_tz= my_tz_find(args[2]->val_str(&str), tz_tables); + to_tz= my_tz_find(thd, args[2]->val_str(&str)); to_tz_cached= args[2]->const_item(); } diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index e83ebe46f1d..7d81921d344 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -349,7 +349,7 @@ public: enum_field_types field_type() const { return MYSQL_TYPE_DATE; } String *val_str(String *str); longlong val_int(); - double val_real() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } + double val_real() { return val_real_from_decimal(); } const char *func_name() const { return "date"; } void fix_length_and_dec() { @@ -387,6 +387,7 @@ public: return tmp_table_field_from_field_type(table, 0); } bool result_as_longlong() { return TRUE; } + double val_real() { return (double) val_int(); } my_decimal *val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); @@ -409,13 +410,14 @@ public: enum_field_types field_type() const { return MYSQL_TYPE_TIME; } void fix_length_and_dec() { - decimals=0; + decimals= DATETIME_DEC; max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; } Field *tmp_table_field(TABLE *table) { return tmp_table_field_from_field_type(table, 0); } + double val_real() { return val_real_from_decimal(); } my_decimal *val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); @@ -519,7 +521,6 @@ public: Item_func_now() :Item_date_func() {} Item_func_now(Item *a) :Item_date_func(a) {} enum Item_result result_type () const { return STRING_RESULT; } - double val_real() { DBUG_ASSERT(fixed == 1); return (double) value; } longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } int save_in_field(Field *to, bool no_conversions); String *val_str(String *str); @@ -608,11 +609,6 @@ class Item_func_from_unixtime :public Item_date_func THD *thd; public: Item_func_from_unixtime(Item *a) :Item_date_func(a) {} - double val_real() - { - DBUG_ASSERT(fixed == 1); - return (double) Item_func_from_unixtime::val_int(); - } longlong val_int(); String *val_str(String *str); const char *func_name() const { return "from_unixtime"; } @@ -637,8 +633,6 @@ class Time_zone; */ class Item_func_convert_tz :public Item_date_func { - /* Cached pointer to list of pre-opened time zone tables. */ - TABLE_LIST *tz_tables; /* If time zone parameters are constants we are caching objects that represent them (we use separate from_tz_cached/to_tz_cached members @@ -651,10 +645,8 @@ class Item_func_convert_tz :public Item_date_func Item_func_convert_tz(Item *a, Item *b, Item *c): Item_date_func(a, b, c), from_tz_cached(0), to_tz_cached(0) {} longlong val_int(); - double val_real() { return (double) val_int(); } String *val_str(String *str); const char *func_name() const { return "convert_tz"; } - bool fix_fields(THD *, Item **); void fix_length_and_dec(); bool get_date(TIME *res, uint fuzzy_date); void cleanup(); @@ -677,7 +669,6 @@ public: Item_str_timefunc::fix_length_and_dec(); collation.set(&my_charset_bin); maybe_null=1; - decimals= DATETIME_DEC; } const char *func_name() const { return "sec_to_time"; } bool result_as_longlong() { return TRUE; } @@ -698,7 +689,6 @@ public: const char *func_name() const { return "date_add_interval"; } void fix_length_and_dec(); enum_field_types field_type() const { return cached_field_type; } - double val_real() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } longlong val_int(); bool get_date(TIME *res, uint fuzzy_date); bool eq(const Item *item, bool binary_cmp) const; @@ -800,6 +790,7 @@ public: } bool result_as_longlong() { return TRUE; } longlong val_int(); + double val_real() { return (double) val_int(); } my_decimal *val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); @@ -827,6 +818,7 @@ public: } bool result_as_longlong() { return TRUE; } longlong val_int(); + double val_real() { return val_real_from_decimal(); } my_decimal *val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); @@ -851,8 +843,15 @@ public: { return tmp_table_field_from_field_type(table, 0); } + void fix_length_and_dec() + { + Item_typecast_maybe_null::fix_length_and_dec(); + decimals= DATETIME_DEC; + } bool result_as_longlong() { return TRUE; } longlong val_int(); + double val_real() { return val_real_from_decimal(); } + double val() { return (double) val_int(); } my_decimal *val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); @@ -899,6 +898,7 @@ public: } void print(String *str); const char *func_name() const { return "add_time"; } + double val_real() { return val_real_from_decimal(); } my_decimal *val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc index 9321992e566..26474990644 100644 --- a/sql/item_xmlfunc.cc +++ b/sql/item_xmlfunc.cc @@ -1044,12 +1044,12 @@ static struct my_xpath_keyword_names_st my_keyword_names[] = {MY_XPATH_LEX_OR , "or" , 2, 0 }, {MY_XPATH_LEX_DIV , "div" , 3, 0 }, {MY_XPATH_LEX_MOD , "mod" , 3, 0 }, - - {MY_XPATH_LEX_NODETYPE, "comment" , 7, 0 }, - {MY_XPATH_LEX_NODETYPE, "text" , 4, 0 }, - {MY_XPATH_LEX_NODETYPE, "processing-instruction" , 22,0 }, - {MY_XPATH_LEX_NODETYPE, "node" , 4, 0 }, - + {0,NULL,0,0} +}; + + +static struct my_xpath_keyword_names_st my_axis_names[]= +{ {MY_XPATH_LEX_AXIS,"ancestor" , 8,MY_XPATH_AXIS_ANCESTOR }, {MY_XPATH_LEX_AXIS,"ancestor-or-self" ,16,MY_XPATH_AXIS_ANCESTOR_OR_SELF }, {MY_XPATH_LEX_AXIS,"attribute" , 9,MY_XPATH_AXIS_ATTRIBUTE }, @@ -1063,7 +1063,16 @@ static struct my_xpath_keyword_names_st my_keyword_names[] = {MY_XPATH_LEX_AXIS,"preceding" , 9,MY_XPATH_AXIS_PRECEDING }, {MY_XPATH_LEX_AXIS,"preceding-sibling" ,17,MY_XPATH_AXIS_PRECEDING_SIBLING }, {MY_XPATH_LEX_AXIS,"self" , 4,MY_XPATH_AXIS_SELF }, + {0,NULL,0,0} +}; + +static struct my_xpath_keyword_names_st my_nodetype_names[]= +{ + {MY_XPATH_LEX_NODETYPE, "comment" , 7, 0 }, + {MY_XPATH_LEX_NODETYPE, "text" , 4, 0 }, + {MY_XPATH_LEX_NODETYPE, "processing-instruction" , 22,0 }, + {MY_XPATH_LEX_NODETYPE, "node" , 4, 0 }, {0,NULL,0,0} }; @@ -1078,11 +1087,14 @@ static struct my_xpath_keyword_names_st my_keyword_names[] = - Token type, on lookup success. - MY_XPATH_LEX_IDENT, on lookup failure. */ -static int my_xpath_keyword(MY_XPATH *x, const char *beg, const char *end) +static int +my_xpath_keyword(MY_XPATH *x, + struct my_xpath_keyword_names_st *keyword_names, + const char *beg, const char *end) { struct my_xpath_keyword_names_st *k; size_t length= end-beg; - for (k= my_keyword_names; k->name; k++) + for (k= keyword_names; k->name; k++) { if (length == k->length && !strncasecmp(beg, k->name, length)) { @@ -1368,15 +1380,32 @@ my_xpath_lex_scan(MY_XPATH *xpath, beg+= length) /* no op */; lex->end= beg; - // check if a function call - if (*beg == '(' && (xpath->func= my_xpath_function(lex->beg, beg))) + if (beg < end) { - lex->term= MY_XPATH_LEX_FUNC; - return; + if (*beg == '(') + { + /* + check if a function call, e.g.: count(/a/b) + or a nodetype test, e.g.: /a/b/text() + */ + if ((xpath->func= my_xpath_function(lex->beg, beg))) + lex->term= MY_XPATH_LEX_FUNC; + else + lex->term= my_xpath_keyword(xpath, my_nodetype_names, + lex->beg, beg); + return; + } + // check if an axis specifier, e.g.: /a/b/child::* + else if (*beg == ':' && beg + 1 < end && beg[1] == ':') + { + lex->term= my_xpath_keyword(xpath, my_axis_names, + lex->beg, beg); + return; + } } - // check if a keyword - lex->term= my_xpath_keyword(xpath, lex->beg, beg); + lex->term= my_xpath_keyword(xpath, my_keyword_names, + lex->beg, beg); return; } @@ -2329,6 +2358,36 @@ static int my_xpath_parse_Number(MY_XPATH *xpath) /* + Scan NCName. + + SYNOPSYS + + The keywords AND, OR, MOD, DIV are valid identitiers + when they are in identifier context: + + SELECT + ExtractValue('<and><or><mod><div>VALUE</div></mod></or></and>', + '/and/or/mod/div') + -> VALUE + + RETURN + 1 - success + 0 - failure +*/ + +static int +my_xpath_parse_NCName(MY_XPATH *xpath) +{ + return + my_xpath_parse_term(xpath, MY_XPATH_LEX_IDENT) || + my_xpath_parse_term(xpath, MY_XPATH_LEX_AND) || + my_xpath_parse_term(xpath, MY_XPATH_LEX_OR) || + my_xpath_parse_term(xpath, MY_XPATH_LEX_MOD) || + my_xpath_parse_term(xpath, MY_XPATH_LEX_DIV) ? 1 : 0; +} + + +/* QName grammar can be found in a separate document http://www.w3.org/TR/REC-xml-names/#NT-QName @@ -2336,16 +2395,17 @@ static int my_xpath_parse_Number(MY_XPATH *xpath) [7] Prefix ::= NCName [8] LocalPart ::= NCName */ + static int my_xpath_parse_QName(MY_XPATH *xpath) { const char *beg; - if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_IDENT)) + if (!my_xpath_parse_NCName(xpath)) return 0; beg= xpath->prevtok.beg; if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_COLON)) return 1; /* Non qualified name */ - if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_IDENT)) + if (!my_xpath_parse_NCName(xpath)) return 0; xpath->prevtok.beg= beg; return 1; diff --git a/sql/key.cc b/sql/key.cc index bd614b10a70..faa7bf1f04b 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -29,6 +29,7 @@ field Field to search after key_length On partial match, contains length of fields before field + keypart key part # of a field NOTES Used when calculating key for NEXT_NUMBER @@ -45,7 +46,7 @@ */ int find_ref_key(KEY *key, uint key_count, byte *record, Field *field, - uint *key_length) + uint *key_length, uint *keypart) { reg2 int i; reg3 KEY *key_info; @@ -60,8 +61,8 @@ int find_ref_key(KEY *key, uint key_count, byte *record, Field *field, { if (key_info->key_part[0].offset == fieldpos) { /* Found key. Calc keylength */ - *key_length=0; - return(i); /* Use this key */ + *key_length= *keypart= 0; + return i; /* Use this key */ } } @@ -78,8 +79,11 @@ int find_ref_key(KEY *key, uint key_count, byte *record, Field *field, j++, key_part++) { if (key_part->offset == fieldpos) - return(i); /* Use this key */ - *key_length+=key_part->store_length; + { + *keypart= j; + return i; /* Use this key */ + } + *key_length+= key_part->store_length; } } return(-1); /* No key is ok */ diff --git a/sql/lex.h b/sql/lex.h index 2bf0e08c825..45155da7692 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -222,7 +222,7 @@ static SYMBOL symbols[] = { { "GLOBAL", SYM(GLOBAL_SYM)}, { "GRANT", SYM(GRANT)}, { "GRANTS", SYM(GRANTS)}, - { "GROUP", SYM(GROUP)}, + { "GROUP", SYM(GROUP_SYM)}, { "HANDLER", SYM(HANDLER_SYM)}, { "HASH", SYM(HASH_SYM)}, { "HAVING", SYM(HAVING)}, diff --git a/sql/lock.cc b/sql/lock.cc index 533307c6b85..4427e57a938 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -485,7 +485,7 @@ bool mysql_lock_abort_for_thread(THD *thd, TABLE *table) for (uint i=0; i < locked->lock_count; i++) { if (thr_abort_locks_for_thread(locked->locks[i]->lock, - table->in_use->real_id)) + table->in_use->thread_id)) result= TRUE; } my_free((gptr) locked,MYF(0)); @@ -582,7 +582,7 @@ TABLE_LIST *mysql_lock_have_duplicate(THD *thd, TABLE_LIST *needle, goto end; /* A temporary table does not have locks. */ - if (table->s->tmp_table == TMP_TABLE) + if (table->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE) goto end; /* Get command lock or LOCK TABLES lock. Maybe empty for INSERT DELAYED. */ @@ -604,10 +604,10 @@ TABLE_LIST *mysql_lock_have_duplicate(THD *thd, TABLE_LIST *needle, for (; haystack; haystack= haystack->next_global) { - if (haystack->placeholder() || haystack->schema_table) + if (haystack->placeholder()) continue; table2= haystack->table; - if (table2->s->tmp_table == TMP_TABLE) + if (table2->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE) continue; /* All tables in list must be in lock. */ @@ -692,9 +692,10 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, DBUG_PRINT("info", ("count %d", count)); *write_lock_used=0; + uint system_count= 0; for (i=tables=lock_count=0 ; i < count ; i++) { - if (table_ptr[i]->s->tmp_table != TMP_TABLE) + if (table_ptr[i]->s->tmp_table != NON_TRANSACTIONAL_TMP_TABLE) { tables+=table_ptr[i]->file->lock_count(); lock_count++; @@ -705,7 +706,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, */ if (!table_ptr[i]-> file-> check_if_locking_is_allowed(thd->lex->sql_command, thd->lex->type, - table_ptr[i], count, + table_ptr[i], count, i, &system_count, (thd == logger.get_general_log_thd()) || (thd == logger.get_slow_log_thd()) || (thd == logger.get_privileged_thread()))) @@ -736,7 +737,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, TABLE *table; enum thr_lock_type lock_type; - if ((table=table_ptr[i])->s->tmp_table == TMP_TABLE) + if ((table=table_ptr[i])->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE) continue; lock_type= table->reginfo.lock_type; if (lock_type >= TL_WRITE_ALLOW_WRITE) diff --git a/sql/log.cc b/sql/log.cc index 5e9ebfcb902..fe3a4f7df5e 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -31,15 +31,6 @@ #include <mysql/plugin.h> -/* - Define placement versions of operator new and operator delete since - we cannot be sure that the <new> include exists. - */ -inline void *operator new(size_t, void *ptr) { return ptr; } -inline void *operator new[](size_t, void *ptr) { return ptr; } -inline void operator delete(void*, void*) { /* Do nothing */ } -inline void operator delete[](void*, void*) { /* Do nothing */ } - /* max size of the log message */ #define MAX_LOG_BUFFER_SIZE 1024 #define MAX_USER_HOST_SIZE 512 @@ -75,9 +66,9 @@ sql_print_message_func sql_print_message_handlers[3] = char *make_default_log_name(char *buff,const char* log_ext) { - strmake(buff, glob_hostname, FN_REFLEN-5); + strmake(buff, pidfile_name, FN_REFLEN-5); return fn_format(buff, buff, mysql_data_home, log_ext, - MYF(MY_UNPACK_FILENAME|MY_APPEND_EXT)); + MYF(MY_UNPACK_FILENAME|MY_REPLACE_EXT)); } /* @@ -148,6 +139,7 @@ public: void truncate(my_off_t pos) { DBUG_PRINT("info", ("truncating to position %lu", (ulong) pos)); + DBUG_PRINT("info", ("before_stmt_pos=%lu", (ulong) pos)); delete pending(); set_pending(0); reinit_io_cache(&trans_log, WRITE_CACHE, pos, 0, 0); @@ -267,7 +259,7 @@ bool Log_to_csv_event_handler::open_log_table(uint log_table_type) table->table_name_length= 8; break; default: - DBUG_ASSERT(0); + assert(0); // Impossible } /* @@ -311,6 +303,7 @@ bool Log_to_csv_event_handler::open_log_table(uint log_table_type) { table->table->use_all_columns(); table->table->locked_by_logger= TRUE; + table->table->no_replicate= TRUE; } /* restore thread settings */ if (curr) @@ -906,7 +899,7 @@ bool LOGGER::slow_log_print(THD *thd, const char *query, uint query_length, bool is_command= FALSE; char user_host_buff[MAX_USER_HOST_SIZE]; - my_time_t current_time; + time_t current_time; Security_context *sctx= thd->security_ctx; uint user_host_len= 0; longlong query_time= 0, lock_time= 0; @@ -1161,7 +1154,7 @@ void LOGGER::deactivate_log_handler(THD *thd, uint log_type) log_thd= table_log_handler->general_log_thd; break; default: - DBUG_ASSERT(0); + assert(0); // Impossible } if (!(*tmp_opt)) @@ -1310,7 +1303,7 @@ void Log_to_csv_event_handler:: table= &slow_log; break; default: - DBUG_ASSERT(0); + assert(0); // Impossible } /* @@ -1555,7 +1548,7 @@ static int binlog_commit(handlerton *hton, THD *thd, bool all) (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; DBUG_ASSERT(mysql_bin_log.is_open()); - if (all && trx_data->empty()) + if (trx_data->empty()) { // we're here because trans_log was flushed in MYSQL_BIN_LOG::log_xid() trx_data->reset(); @@ -1742,7 +1735,7 @@ void setup_windows_event_source() /* Register EventMessageFile */ dwError = RegSetValueEx(hRegKey, "EventMessageFile", 0, REG_EXPAND_SZ, - (PBYTE) szPath, strlen(szPath)+1); + (PBYTE) szPath, (DWORD) (strlen(szPath) + 1)); /* Register supported event types */ dwTypes= (EVENTLOG_ERROR_TYPE | EVENTLOG_WARNING_TYPE | @@ -2297,7 +2290,7 @@ const char *MYSQL_LOG::generate_name(const char *log_name, TODO: The following should be using fn_format(); We just need to first change fn_format() to cut the file name if it's too long. */ - strmake(buff, glob_hostname, FN_REFLEN - 5); + strmake(buff, pidfile_name, FN_REFLEN - 5); strmov(fn_ext(buff), suffix); return (const char *)buff; } @@ -2506,7 +2499,7 @@ bool MYSQL_BIN_LOG::open(const char *log_name, /* Set 'created' to 0, so that in next relay logs this event does not trigger cleaning actions on the slave in - Format_description_log_event::exec_event(). + Format_description_log_event::apply_event_impl(). */ description_event_for_queue->created= 0; /* Don't set log_pos in event header */ @@ -3213,8 +3206,10 @@ void MYSQL_BIN_LOG::new_file_impl(bool need_lock) { tc_log_page_waits++; pthread_mutex_lock(&LOCK_prep_xids); - while (prepared_xids) + while (prepared_xids) { + DBUG_PRINT("info", ("prepared_xids=%lu", prepared_xids)); pthread_cond_wait(&COND_prep_xids, &LOCK_prep_xids); + } pthread_mutex_unlock(&LOCK_prep_xids); } @@ -3352,13 +3347,13 @@ bool MYSQL_BIN_LOG::flush_and_sync() return err; } -void MYSQL_BIN_LOG::start_union_events(THD *thd) +void MYSQL_BIN_LOG::start_union_events(THD *thd, query_id_t query_id_param) { DBUG_ASSERT(!thd->binlog_evt_union.do_union); thd->binlog_evt_union.do_union= TRUE; thd->binlog_evt_union.unioned_events= FALSE; thd->binlog_evt_union.unioned_events_trans= FALSE; - thd->binlog_evt_union.first_query_id= thd->query_id; + thd->binlog_evt_union.first_query_id= query_id_param; } void MYSQL_BIN_LOG::stop_union_events(THD *thd) @@ -3473,9 +3468,9 @@ int THD::binlog_flush_transaction_cache() { DBUG_ENTER("binlog_flush_transaction_cache"); binlog_trx_data *trx_data= (binlog_trx_data*) ha_data[binlog_hton->slot]; - DBUG_PRINT("enter", ("trx_data: 0x%lx", (ulong) trx_data)); + DBUG_PRINT("enter", ("trx_data=0x%lu", (ulong) trx_data)); if (trx_data) - DBUG_PRINT("enter", ("trx_data->before_stmt_pos: %lu", + DBUG_PRINT("enter", ("trx_data->before_stmt_pos=%lu", (ulong) trx_data->before_stmt_pos)); /* @@ -3780,7 +3775,7 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info) nb_elements())); /* If the auto_increment was second in a table's index (possible with - MyISAM or BDB) (table->next_number_key_offset != 0), such event is + MyISAM or BDB) (table->next_number_keypart != 0), such event is in fact not necessary. We could avoid logging it. */ Intvar_log_event e(thd, (uchar) INSERT_ID_EVENT, @@ -3887,7 +3882,7 @@ void MYSQL_BIN_LOG::rotate_and_purge(uint flags) #ifdef HAVE_REPLICATION if (expire_logs_days) { - long purge_time= time(0) - expire_logs_days*24*60*60; + long purge_time= (long) (time(0) - expire_logs_days*24*60*60); if (purge_time >= 0) purge_logs_before_date(purge_time); } @@ -4525,7 +4520,7 @@ int TC_LOG_MMAP::open(const char *opt_name) goto err; if (using_heuristic_recover()) return 1; - if ((fd= my_create(logname, O_RDWR, 0, MYF(MY_WME))) < 0) + if ((fd= my_create(logname, CREATE_MODE, O_RDWR, MYF(MY_WME))) < 0) goto err; inited=1; file_length= opt_tc_log_size; @@ -5068,8 +5063,10 @@ void TC_LOG_BINLOG::unlog(ulong cookie, my_xid xid) { pthread_mutex_lock(&LOCK_prep_xids); DBUG_ASSERT(prepared_xids > 0); - if (--prepared_xids == 0) + if (--prepared_xids == 0) { + DBUG_PRINT("info", ("prepared_xids=%lu", prepared_xids)); pthread_cond_signal(&COND_prep_xids); + } pthread_mutex_unlock(&LOCK_prep_xids); rotate_and_purge(0); // as ::write() did not rotate } diff --git a/sql/log.h b/sql/log.h index 80aa4b20ee6..ed0c3557d08 100644 --- a/sql/log.h +++ b/sql/log.h @@ -238,7 +238,7 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG fix_max_relay_log_size). */ ulong max_size; - ulong prepared_xids; /* for tc log - number of xids to remember */ + long prepared_xids; /* for tc log - number of xids to remember */ // current file sequence number for load data infile binary logging uint file_id; uint open_count; // For replication @@ -341,7 +341,7 @@ public: int write_cache(IO_CACHE *cache, bool lock_log, bool flush_and_sync); - void start_union_events(THD *thd); + void start_union_events(THD *thd, query_id_t query_id_param); void stop_union_events(THD *thd); bool is_query_in_union(THD *thd, query_id_t query_id_param); diff --git a/sql/log_event.cc b/sql/log_event.cc index 54d75449cd5..e3c94b5e1c9 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -75,8 +75,7 @@ public: ~Write_on_release_cache() { - if (!my_b_copy_to_file(m_cache, m_file)) - reinit_io_cache(m_cache, WRITE_CACHE, 0L, FALSE, TRUE); + copy_event_cache_to_file_and_reinit(m_cache, m_file); if (m_flags | FLUSH_F) fflush(m_file); } @@ -88,9 +87,10 @@ public: operator&() DESCRIPTION - Function to return a pointer to the internal, so that the object - can be treated as a IO_CACHE and used with the my_b_* IO_CACHE - functions + + Function to return a pointer to the internal cache, so that the + object can be treated as a IO_CACHE and used with the my_b_* + IO_CACHE functions RETURN VALUE A pointer to the internal IO_CACHE. @@ -305,7 +305,7 @@ static bool write_str(IO_CACHE *file, char *str, uint length) read_str() */ -static inline int read_str(char **buf, char *buf_end, char **str, +static inline int read_str(const char **buf, const char *buf_end, const char **str, uint8 *len) { if (*buf + ((uint) (uchar) **buf) >= buf_end) @@ -420,6 +420,7 @@ const char* Log_event::get_type_str() case DELETE_ROWS_EVENT: return "Delete_rows"; case BEGIN_LOAD_QUERY_EVENT: return "Begin_load_query"; case EXECUTE_LOAD_QUERY_EVENT: return "Execute_load_query"; + case INCIDENT_EVENT: return "Incident"; default: return "Unknown"; /* impossible */ } } @@ -531,25 +532,19 @@ Log_event::Log_event(const char* buf, #ifndef MYSQL_CLIENT #ifdef HAVE_REPLICATION -/* - Log_event::exec_event() -*/ - -int Log_event::exec_event(struct st_relay_log_info* rli) +int Log_event::do_update_pos(RELAY_LOG_INFO *rli) { - DBUG_ENTER("Log_event::exec_event"); - /* - rli is null when (as far as I (Guilhem) know) - the caller is - Load_log_event::exec_event *and* that one is called from - Execute_load_log_event::exec_event. - In this case, we don't do anything here ; - Execute_load_log_event::exec_event will call Log_event::exec_event - again later with the proper rli. - Strictly speaking, if we were sure that rli is null - only in the case discussed above, 'if (rli)' is useless here. - But as we are not 100% sure, keep it for now. + rli is null when (as far as I (Guilhem) know) the caller is + Load_log_event::do_apply_event *and* that one is called from + Execute_load_log_event::do_apply_event. In this case, we don't + do anything here ; Execute_load_log_event::do_apply_event will + call Log_event::do_apply_event again later with the proper rli. + Strictly speaking, if we were sure that rli is null only in the + case discussed above, 'if (rli)' is useless here. But as we are + not 100% sure, keep it for now. + + Matz: I don't think we will need this check with this refactoring. */ if (rli) { @@ -584,18 +579,37 @@ int Log_event::exec_event(struct st_relay_log_info* rli) { rli->inc_group_relay_log_pos(log_pos); flush_relay_log_info(rli); - /* - Note that Rotate_log_event::exec_event() does not call this - function, so there is no chance that a fake rotate event resets - last_master_timestamp. - Note that we update without mutex (probably ok - except in some very - rare cases, only consequence is that value may take some time to - display in Seconds_Behind_Master - not critical). + /* + Note that Rotate_log_event::do_apply_event() does not call + this function, so there is no chance that a fake rotate event + resets last_master_timestamp. Note that we update without + mutex (probably ok - except in some very rare cases, only + consequence is that value may take some time to display in + Seconds_Behind_Master - not critical). */ rli->last_master_timestamp= when; } } - DBUG_RETURN(0); + + return 0; // Cannot fail currently +} + + +Log_event::enum_skip_reason +Log_event::do_shall_skip(RELAY_LOG_INFO *rli) +{ + DBUG_PRINT("info", ("ev->server_id=%lu, ::server_id=%lu," + " rli->replicate_same_server_id=%d," + " rli->slave_skip_counter=%d", + (ulong) server_id, (ulong) ::server_id, + rli->replicate_same_server_id, + rli->slave_skip_counter)); + if (server_id == ::server_id && !rli->replicate_same_server_id) + return EVENT_SKIP_IGNORE; + else if (rli->slave_skip_counter > 0) + return EVENT_SKIP_COUNT; + else + return EVENT_SKIP_NOT; } @@ -642,12 +656,13 @@ int Log_event::net_send(Protocol *protocol, const char* log_name, my_off_t pos) void Log_event::init_show_field_list(List<Item>* field_list) { field_list->push_back(new Item_empty_string("Log_name", 20)); - field_list->push_back(new Item_return_int("Pos", 11, + field_list->push_back(new Item_return_int("Pos", MY_INT32_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG)); field_list->push_back(new Item_empty_string("Event_type", 20)); field_list->push_back(new Item_return_int("Server_id", 10, MYSQL_TYPE_LONG)); - field_list->push_back(new Item_return_int("End_log_pos", 11, + field_list->push_back(new Item_return_int("End_log_pos", + MY_INT32_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG)); field_list->push_back(new Item_empty_string("Info", 20)); } @@ -742,7 +757,7 @@ int Log_event::read_log_event(IO_CACHE* file, String* packet, ulong data_len; int result=0; char buf[LOG_EVENT_MINIMAL_HEADER_LEN]; - DBUG_ENTER("read_log_event"); + DBUG_ENTER("Log_event::read_log_event"); if (log_lock) pthread_mutex_lock(log_lock); @@ -817,7 +832,7 @@ Log_event* Log_event::read_log_event(IO_CACHE* file, const Format_description_log_event *description_event) #endif { - DBUG_ENTER("Log_event::read_log_event(IO_CACHE *, Format_description_log_event *"); + DBUG_ENTER("Log_event::read_log_event"); DBUG_ASSERT(description_event != 0); char head[LOG_EVENT_MINIMAL_HEADER_LEN]; /* @@ -997,10 +1012,14 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len, ev = new Begin_load_query_log_event(buf, event_len, description_event); break; case EXECUTE_LOAD_QUERY_EVENT: - ev = new Execute_load_query_log_event(buf, event_len, description_event); + ev= new Execute_load_query_log_event(buf, event_len, description_event); + break; + case INCIDENT_EVENT: + ev = new Incident_log_event(buf, event_len, description_event); break; default: - DBUG_PRINT("error",("Unknown evernt code: %d",(int) buf[EVENT_TYPE_OFFSET])); + DBUG_PRINT("error",("Unknown event code: %d", + (int) buf[EVENT_TYPE_OFFSET])); ev= NULL; break; } @@ -1130,13 +1149,18 @@ void Log_event::print_header(IO_CACHE* file, char emit_buf[256]; int const bytes_written= my_snprintf(emit_buf, sizeof(emit_buf), - "# %8.8lx %-48.48s |%s|\n# ", + "# %8.8lx %-48.48s |%s|\n", (unsigned long) (hexdump_from + (i & 0xfffffff0)), hex_string, char_string); DBUG_ASSERT(bytes_written >= 0); DBUG_ASSERT(static_cast<my_size_t>(bytes_written) < sizeof(emit_buf)); my_b_write(file, (byte*) emit_buf, bytes_written); } + /* + need a # to prefix the rest of printouts for example those of + Rows_log_event::print_helper(). + */ + my_b_write(file, reinterpret_cast<const byte*>("# "), 2); } DBUG_VOID_RETURN; } @@ -1148,7 +1172,6 @@ void Log_event::print_base64(IO_CACHE* file, { const uchar *ptr= (const uchar *)temp_buf; uint32 size= uint4korr(ptr + EVENT_LEN_OFFSET); - DBUG_ENTER("Log_event::print_base64"); size_t const tmp_str_sz= base64_needed_encoded_length((int) size); @@ -1159,8 +1182,10 @@ void Log_event::print_base64(IO_CACHE* file, DBUG_VOID_RETURN; } - int const res= base64_encode(ptr, (size_t) size, tmp_str); - DBUG_ASSERT(res == 0); + if (base64_encode(ptr, (size_t) size, tmp_str)) + { + DBUG_ASSERT(0); + } if (my_b_tell(file) == 0) my_b_printf(file, "\nBINLOG '\n"); @@ -1275,7 +1300,8 @@ bool Query_log_event::write(IO_CACHE* file) 1+4+ // code of autoinc and the 2 autoinc variables 1+6+ // code of charset and charset 1+1+MAX_TIME_ZONE_NAME_LENGTH+ // code of tz and tz length and tz name - 1+2 // code of lc_time_names and lc_time_names_number + 1+2+ // code of lc_time_names and lc_time_names_number + 1+2 // code of charset_database and charset_database_number ], *start, *start_of_status; ulong event_length; @@ -1394,6 +1420,13 @@ bool Query_log_event::write(IO_CACHE* file) int2store(start, lc_time_names_number); start+= 2; } + if (charset_database_number) + { + DBUG_ASSERT(charset_database_number <= 0xFFFF); + *start++= Q_CHARSET_DATABASE_CODE; + int2store(start, charset_database_number); + start+= 2; + } /* Here there could be code like if (command-line-option-which-says-"log_this_variable" && inited) @@ -1459,7 +1492,8 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg, sql_mode(thd_arg->variables.sql_mode), auto_increment_increment(thd_arg->variables.auto_increment_increment), auto_increment_offset(thd_arg->variables.auto_increment_offset), - lc_time_names_number(thd_arg->variables.lc_time_names->number) + lc_time_names_number(thd_arg->variables.lc_time_names->number), + charset_database_number(0) { time_t end_time; time(&end_time); @@ -1467,6 +1501,9 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg, catalog_len = (catalog) ? (uint32) strlen(catalog) : 0; /* status_vars_len is set just before writing the event */ db_len = (db) ? (uint32) strlen(db) : 0; + if (thd_arg->variables.collation_database != thd_arg->db_charset) + charset_database_number= thd_arg->variables.collation_database->number; + /* If we don't use flags2 for anything else than options contained in thd->options, it would be more efficient to flags2=thd_arg->options @@ -1537,7 +1574,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, db(NullS), catalog_len(0), status_vars_len(0), flags2_inited(0), sql_mode_inited(0), charset_inited(0), auto_increment_increment(1), auto_increment_offset(1), - time_zone_len(0), lc_time_names_number(0) + time_zone_len(0), lc_time_names_number(0), charset_database_number(0) { ulong data_len; uint32 tmp; @@ -1642,6 +1679,10 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, lc_time_names_number= uint2korr(pos); pos+= 2; break; + case Q_CHARSET_DATABASE_CODE: + charset_database_number= uint2korr(pos); + pos+= 2; + break; default: /* That's why you must write status vars in growing order of code */ DBUG_PRINT("info",("Query_log_event has unknown status vars (first has\ @@ -1840,6 +1881,16 @@ void Query_log_event::print_query_header(IO_CACHE* file, lc_time_names_number, print_event_info->delimiter); print_event_info->lc_time_names_number= lc_time_names_number; } + if (charset_database_number != print_event_info->charset_database_number) + { + if (charset_database_number) + my_b_printf(file, "SET @@session.collation_database=%d%s\n", + charset_database_number, print_event_info->delimiter); + else + my_b_printf(file, "SET @@session.collation_database=DEFAULT%s\n", + print_event_info->delimiter); + print_event_info->charset_database_number= charset_database_number; + } } @@ -1855,27 +1906,28 @@ void Query_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) /* - Query_log_event::exec_event() + Query_log_event::do_apply_event() */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Query_log_event::exec_event(struct st_relay_log_info* rli) +int Query_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { - return exec_event(rli, query, q_len); + return do_apply_event(rli, query, q_len); } -int Query_log_event::exec_event(struct st_relay_log_info* rli, - const char *query_arg, uint32 q_len_arg) +int Query_log_event::do_apply_event(RELAY_LOG_INFO const *rli, + const char *query_arg, uint32 q_len_arg) { LEX_STRING new_db; int expected_error,actual_error= 0; /* - Colleagues: please never free(thd->catalog) in MySQL. This would lead to - bugs as here thd->catalog is a part of an alloced block, not an entire - alloced block (see Query_log_event::exec_event()). Same for thd->db. - Thank you. + Colleagues: please never free(thd->catalog) in MySQL. This would + lead to bugs as here thd->catalog is a part of an alloced block, + not an entire alloced block (see + Query_log_event::do_apply_event()). Same for thd->db. Thank + you. */ thd->catalog= catalog_len ? (char *) catalog : (char *)""; new_db.length= db_len; @@ -1894,11 +1946,11 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, END of the current log event (COMMIT). We save it in rli so that InnoDB can access it. */ - rli->future_group_master_log_pos= log_pos; + const_cast<RELAY_LOG_INFO*>(rli)->future_group_master_log_pos= log_pos; DBUG_PRINT("info", ("log_pos: %lu", (ulong) log_pos)); - clear_all_errors(thd, rli); - rli->clear_tables_to_lock(); + clear_all_errors(thd, const_cast<RELAY_LOG_INFO*>(rli)); + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); /* Note: We do not need to execute reset_one_shot_variables() if this @@ -1907,8 +1959,8 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, its companion query. If the SET is ignored because of db_ok(), the companion query will also be ignored, and if the companion query is ignored in the db_ok() test of - ::exec_event(), then the companion SET also have so we - don't need to reset_one_shot_variables(). + ::do_apply_event(), then the companion SET also have so + we don't need to reset_one_shot_variables(). */ if (rpl_filter->db_ok(thd->db)) { @@ -1974,8 +2026,7 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, if (time_zone_len) { String tmp(time_zone_str, time_zone_len, &my_charset_bin); - if (!(thd->variables.time_zone= - my_tz_find_with_opening_tz_tables(thd, &tmp))) + if (!(thd->variables.time_zone= my_tz_find(thd, &tmp))) { my_error(ER_UNKNOWN_TIME_ZONE, MYF(0), tmp.c_ptr()); thd->variables.time_zone= global_system_variables.time_zone; @@ -1995,7 +2046,21 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, } else thd->variables.lc_time_names= &my_locale_en_US; - + if (charset_database_number) + { + CHARSET_INFO *cs; + if (!(cs= get_charset(charset_database_number, MYF(0)))) + { + char buf[20]; + int10_to_str((int) charset_database_number, buf, -10); + my_error(ER_UNKNOWN_COLLATION, MYF(0), buf); + goto compare_errors; + } + thd->variables.collation_database= cs; + } + else + thd->variables.collation_database= thd->db_charset; + /* Execute the query (note that we bypass dispatch_command()) */ mysql_parse(thd, thd->query, thd->query_length); @@ -2010,7 +2075,7 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, to check/fix it. */ if (mysql_test_parse_for_slave(thd, thd->query, thd->query_length)) - clear_all_errors(thd, rli); /* Can ignore query */ + clear_all_errors(thd, const_cast<RELAY_LOG_INFO*>(rli)); /* Can ignore query */ else { slave_print_msg(ERROR_LEVEL, rli, expected_error, @@ -2061,7 +2126,7 @@ Default database: '%s'. Query: '%s'", ignored_error_code(actual_error)) { DBUG_PRINT("info",("error ignored")); - clear_all_errors(thd, rli); + clear_all_errors(thd, const_cast<RELAY_LOG_INFO*>(rli)); } /* Other cases: mostly we expected no error and get one. @@ -2128,16 +2193,26 @@ end: thd->first_successful_insert_id_in_prev_stmt= 0; thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); + return thd->query_error; +} + +int Query_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ /* - If there was an error we stop. Otherwise we increment positions. Note that - we will not increment group* positions if we are just after a SET - ONE_SHOT, because SET ONE_SHOT should not be separated from its following - updating query. + Note that we will not increment group* positions if we are just + after a SET ONE_SHOT, because SET ONE_SHOT should not be separated + from its following updating query. */ - return (thd->query_error ? thd->query_error : - (thd->one_shot_set ? (rli->inc_event_relay_log_pos(),0) : - Log_event::exec_event(rli))); + if (thd->one_shot_set) + { + rli->inc_event_relay_log_pos(); + return 0; + } + else + return Log_event::do_update_pos(rli); } + + #endif @@ -2240,6 +2315,8 @@ Start_log_event_v3::Start_log_event_v3(const char* buf, binlog_version= uint2korr(buf+ST_BINLOG_VER_OFFSET); memcpy(server_version, buf+ST_SERVER_VER_OFFSET, ST_SERVER_VER_LEN); + // prevent overrun if log is corrupted on disk + server_version[ST_SERVER_VER_LEN-1]= 0; created= uint4korr(buf+ST_CREATED_OFFSET); /* We use log_pos to mark if this was an artificial event or not */ artificial_event= (log_pos == 0); @@ -2264,7 +2341,7 @@ bool Start_log_event_v3::write(IO_CACHE* file) /* - Start_log_event_v3::exec_event() + Start_log_event_v3::do_apply_event() The master started @@ -2283,9 +2360,9 @@ bool Start_log_event_v3::write(IO_CACHE* file) */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Start_log_event_v3::exec_event(struct st_relay_log_info* rli) +int Start_log_event_v3::do_apply_event(RELAY_LOG_INFO const *rli) { - DBUG_ENTER("Start_log_event_v3::exec_event"); + DBUG_ENTER("Start_log_event_v3::do_apply_event"); switch (binlog_version) { case 3: @@ -2327,7 +2404,7 @@ int Start_log_event_v3::exec_event(struct st_relay_log_info* rli) /* this case is impossible */ DBUG_RETURN(1); } - DBUG_RETURN(Log_event::exec_event(rli)); + DBUG_RETURN(0); } #endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ @@ -2363,6 +2440,8 @@ Format_description_log_event(uint8 binlog_ver, const char* server_ver) switch (binlog_ver) { case 4: /* MySQL 5.0 */ memcpy(server_version, ::server_version, ST_SERVER_VER_LEN); + DBUG_EXECUTE_IF("pretend_version_50034_in_binlog", + strmov(server_version, "5.0.34");); common_header_len= LOG_EVENT_HEADER_LEN; number_of_event_types= LOG_EVENT_TYPES; /* we'll catch my_malloc() error in is_valid() */ @@ -2406,6 +2485,7 @@ Format_description_log_event(uint8 binlog_ver, const char* server_ver) post_header_len[DELETE_ROWS_EVENT-1]= 6;); post_header_len[BEGIN_LOAD_QUERY_EVENT-1]= post_header_len[APPEND_BLOCK_EVENT-1]; post_header_len[EXECUTE_LOAD_QUERY_EVENT-1]= EXECUTE_LOAD_QUERY_HEADER_LEN; + post_header_len[INCIDENT_EVENT-1]= INCIDENT_HEADER_LEN; } break; @@ -2453,6 +2533,7 @@ Format_description_log_event(uint8 binlog_ver, const char* server_ver) post_header_len= 0; /* will make is_valid() fail */ break; } + calc_server_version_split(); } @@ -2492,6 +2573,7 @@ Format_description_log_event(const char* buf, post_header_len= (uint8*) my_memdup((byte*)buf+ST_COMMON_HEADER_LEN_OFFSET+1, number_of_event_types* sizeof(*post_header_len), MYF(0)); + calc_server_version_split(); DBUG_VOID_RETURN; } @@ -2514,24 +2596,10 @@ bool Format_description_log_event::write(IO_CACHE* file) } #endif -/* - SYNOPSIS - Format_description_log_event::exec_event() - - IMPLEMENTATION - Save the information which describes the binlog's format, to be able to - read all coming events. - Call Start_log_event_v3::exec_event(). -*/ - #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Format_description_log_event::exec_event(struct st_relay_log_info* rli) +int Format_description_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { - DBUG_ENTER("Format_description_log_event::exec_event"); - - /* save the information describing this binlog */ - delete rli->relay_log.description_event_for_exec; - rli->relay_log.description_event_for_exec= this; + DBUG_ENTER("Format_description_log_event::do_apply_event"); #ifdef USING_TRANSACTIONS /* @@ -2553,14 +2621,36 @@ int Format_description_log_event::exec_event(struct st_relay_log_info* rli) "or ROLLBACK in relay log). A probable cause is that " "the master died while writing the transaction to " "its binary log, thus rolled back too."); - rli->cleanup_context(thd, 1); + const_cast<RELAY_LOG_INFO*>(rli)->cleanup_context(thd, 1); } #endif /* - If this event comes from ourselves, there is no cleaning task to perform, - we don't call Start_log_event_v3::exec_event() (this was just to update the - log's description event). + If this event comes from ourselves, there is no cleaning task to + perform, we don't call Start_log_event_v3::do_apply_event() + (this was just to update the log's description event). */ + if (server_id != (uint32) ::server_id) + { + /* + If the event was not requested by the slave i.e. the master sent + it while the slave asked for a position >4, the event will make + rli->group_master_log_pos advance. Say that the slave asked for + position 1000, and the Format_desc event's end is 96. Then in + the beginning of replication rli->group_master_log_pos will be + 0, then 96, then jump to first really asked event (which is + >96). So this is ok. + */ + DBUG_RETURN(Start_log_event_v3::do_apply_event(rli)); + } + DBUG_RETURN(0); +} + +int Format_description_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ + /* save the information describing this binlog */ + delete rli->relay_log.description_event_for_exec; + rli->relay_log.description_event_for_exec= this; + if (server_id == (uint32) ::server_id) { /* @@ -2577,21 +2667,53 @@ int Format_description_log_event::exec_event(struct st_relay_log_info* rli) the Intvar_log_event respectively. */ rli->inc_event_relay_log_pos(); - DBUG_RETURN(0); + return 0; } + else + { + return Log_event::do_update_pos(rli); + } +} - /* - If the event was not requested by the slave i.e. the master sent it while - the slave asked for a position >4, the event will make - rli->group_master_log_pos advance. Say that the slave asked for position - 1000, and the Format_desc event's end is 96. Then in the beginning of - replication rli->group_master_log_pos will be 0, then 96, then jump to - first really asked event (which is >96). So this is ok. - */ - DBUG_RETURN(Start_log_event_v3::exec_event(rli)); +Log_event::enum_skip_reason +Format_description_log_event::do_shall_skip(RELAY_LOG_INFO *rli) +{ + return Log_event::EVENT_SKIP_NOT; } + #endif + +/** + Splits the event's 'server_version' string into three numeric pieces stored + into 'server_version_split': + X.Y.Zabc (X,Y,Z numbers, a not a digit) -> {X,Y,Z} + X.Yabc -> {X,Y,0} + Xabc -> {X,0,0} + 'server_version_split' is then used for lookups to find if the server which + created this event has some known bug. +*/ +void Format_description_log_event::calc_server_version_split() +{ + char *p= server_version, *r; + ulong number; + for (uint i= 0; i<=2; i++) + { + number= strtoul(p, &r, 10); + server_version_split[i]= (uchar)number; + DBUG_ASSERT(number < 256); // fit in uchar + p= r; + DBUG_ASSERT(!((i == 0) && (*r != '.'))); // should be true in practice + if (*r == '.') + p++; // skip the dot + } + DBUG_PRINT("info",("Format_description_log_event::server_version_split:" + " '%s' %d %d %d", server_version, + server_version_split[0], + server_version_split[1], server_version_split[2])); +} + + /************************************************************************** Load_log_event methods General note about Load_log_event: the binlogging of LOAD DATA INFILE is @@ -3072,30 +3194,32 @@ void Load_log_event::set_fields(const char* affected_db, Does the data loading job when executing a LOAD DATA on the slave SYNOPSIS - Load_log_event::exec_event - net - rli - use_rli_only_for_errors - if set to 1, rli is provided to - Load_log_event::exec_event only for this - function to have RPL_LOG_NAME and - rli->last_slave_error, both being used by - error reports. rli's position advancing - is skipped (done by the caller which is - Execute_load_log_event::exec_event). - - if set to 0, rli is provided for full use, - i.e. for error reports and position - advancing. + Load_log_event::do_apply_event + net + rli + use_rli_only_for_errors - if set to 1, rli is provided to + Load_log_event::do_apply_event + only for this function to have + RPL_LOG_NAME and + rli->last_slave_error, both being + used by error reports. rli's + position advancing is skipped (done + by the caller which is + Execute_load_log_event::do_apply_event). + - if set to 0, rli is provided for + full use, i.e. for error reports and + position advancing. DESCRIPTION Does the data loading job when executing a LOAD DATA on the slave - + RETURN VALUE - 0 Success + 0 Success 1 Failure */ -int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, - bool use_rli_only_for_errors) +int Load_log_event::do_apply_event(NET* net, RELAY_LOG_INFO const *rli, + bool use_rli_only_for_errors) { LEX_STRING new_db; new_db.length= db_len; @@ -3104,9 +3228,9 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, DBUG_ASSERT(thd->query == 0); thd->query_length= 0; // Should not be needed thd->query_error= 0; - clear_all_errors(thd, rli); + clear_all_errors(thd, const_cast<RELAY_LOG_INFO*>(rli)); - /* see Query_log_event::exec_event() and BUG#13360 */ + /* see Query_log_event::do_apply_event() and BUG#13360 */ DBUG_ASSERT(!rli->m_table_map.count()); /* Usually mysql_init_query() is called by mysql_parse(), but we need it here @@ -3115,22 +3239,26 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, mysql_init_query(thd, 0, 0); if (!use_rli_only_for_errors) { - /* Saved for InnoDB, see comment in Query_log_event::exec_event() */ - rli->future_group_master_log_pos= log_pos; + /* + Saved for InnoDB, see comment in + Query_log_event::do_apply_event() + */ + const_cast<RELAY_LOG_INFO*>(rli)->future_group_master_log_pos= log_pos; DBUG_PRINT("info", ("log_pos: %lu", (ulong) log_pos)); } /* - We test replicate_*_db rules. Note that we have already prepared the file - to load, even if we are going to ignore and delete it now. So it is - possible that we did a lot of disk writes for nothing. In other words, a - big LOAD DATA INFILE on the master will still consume a lot of space on - the slave (space in the relay log + space of temp files: twice the space - of the file to load...) even if it will finally be ignored. - TODO: fix this; this can be done by testing rules in - Create_file_log_event::exec_event() and then discarding Append_block and - al. Another way is do the filtering in the I/O thread (more efficient: no - disk writes at all). + We test replicate_*_db rules. Note that we have already prepared + the file to load, even if we are going to ignore and delete it + now. So it is possible that we did a lot of disk writes for + nothing. In other words, a big LOAD DATA INFILE on the master will + still consume a lot of space on the slave (space in the relay log + + space of temp files: twice the space of the file to load...) + even if it will finally be ignored. TODO: fix this; this can be + done by testing rules in Create_file_log_event::do_apply_event() + and then discarding Append_block and al. Another way is do the + filtering in the I/O thread (more efficient: no disk writes at + all). Note: We do not need to execute reset_one_shot_variables() if this @@ -3139,8 +3267,8 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, its companion query. If the SET is ignored because of db_ok(), the companion query will also be ignored, and if the companion query is ignored in the db_ok() test of - ::exec_event(), then the companion SET also have so we - don't need to reset_one_shot_variables(). + ::do_apply_event(), then the companion SET also have so + we don't need to reset_one_shot_variables(). */ if (rpl_filter->db_ok(thd->db)) { @@ -3252,8 +3380,8 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, ex.skip_lines = skip_lines; List<Item> field_list; - thd->main_lex.select_lex.context.resolve_in_table_list_only(&tables); - set_fields(tables.db, field_list, &thd->main_lex.select_lex.context); + thd->lex->select_lex.context.resolve_in_table_list_only(&tables); + set_fields(tables.db, field_list, &thd->lex->select_lex.context); thd->variables.pseudo_thread_id= thread_id; if (net) { @@ -3336,7 +3464,7 @@ Fatal error running LOAD DATA INFILE on table '%s'. Default database: '%s'", return 1; } - return ( use_rli_only_for_errors ? 0 : Log_event::exec_event(rli) ); + return ( use_rli_only_for_errors ? 0 : Log_event::do_apply_event(rli) ); } #endif @@ -3430,6 +3558,7 @@ Rotate_log_event::Rotate_log_event(const char* buf, uint event_len, ident_offset = post_header_len; set_if_smaller(ident_len,FN_REFLEN-1); new_log_ident= my_strndup(buf + ident_offset, (uint) ident_len, MYF(MY_WME)); + DBUG_PRINT("debug", ("new_log_ident: '%s'", new_log_ident)); DBUG_VOID_RETURN; } @@ -3449,8 +3578,20 @@ bool Rotate_log_event::write(IO_CACHE* file) } #endif +/** + Helper function to detect if the event is inside a group. + */ +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +static bool is_in_group(THD *const thd, RELAY_LOG_INFO *const rli) +{ + return (thd->options & OPTION_BEGIN) != 0 || + (rli->last_event_start_time > 0); +} +#endif + + /* - Rotate_log_event::exec_event() + Rotate_log_event::do_apply_event() Got a rotate log event from the master @@ -3467,34 +3608,49 @@ bool Rotate_log_event::write(IO_CACHE* file) */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Rotate_log_event::exec_event(struct st_relay_log_info* rli) +int Rotate_log_event::do_update_pos(RELAY_LOG_INFO *rli) { - DBUG_ENTER("Rotate_log_event::exec_event"); + DBUG_ENTER("Rotate_log_event::do_update_pos"); +#ifndef DBUG_OFF + char buf[32]; +#endif + + DBUG_PRINT("info", ("server_id=%lu; ::server_id=%lu", + (ulong) this->server_id, (ulong) ::server_id)); + DBUG_PRINT("info", ("new_log_ident: %s", this->new_log_ident)); + DBUG_PRINT("info", ("pos: %s", llstr(this->pos, buf))); pthread_mutex_lock(&rli->data_lock); rli->event_relay_log_pos= my_b_tell(rli->cur_log); /* - If we are in a transaction: the only normal case is when the I/O thread was - copying a big transaction, then it was stopped and restarted: we have this - in the relay log: + If we are in a transaction or in a group: the only normal case is + when the I/O thread was copying a big transaction, then it was + stopped and restarted: we have this in the relay log: + BEGIN ... ROTATE (a fake one) ... COMMIT or ROLLBACK - In that case, we don't want to touch the coordinates which correspond to - the beginning of the transaction. - Starting from 5.0.0, there also are some rotates from the slave itself, in - the relay log. + + In that case, we don't want to touch the coordinates which + correspond to the beginning of the transaction. Starting from + 5.0.0, there also are some rotates from the slave itself, in the + relay log, which shall not change the group positions. */ - if (!(thd->options & OPTION_BEGIN)) + if ((server_id != ::server_id || rli->replicate_same_server_id) && + !is_in_group(thd, rli)) { + DBUG_PRINT("info", ("old group_master_log_name: '%s' " + "old group_master_log_pos: %lu", + rli->group_master_log_name, + (ulong) rli->group_master_log_pos)); memcpy(rli->group_master_log_name, new_log_ident, ident_len+1); rli->notify_group_master_log_name_update(); rli->group_master_log_pos= pos; rli->group_relay_log_pos= rli->event_relay_log_pos; - DBUG_PRINT("info", ("group_master_log_name: '%s' " - "group_master_log_pos: %lu", + DBUG_PRINT("info", ("new group_master_log_name: '%s' " + "new group_master_log_pos: %lu", rli->group_master_log_name, (ulong) rli->group_master_log_pos)); /* @@ -3513,8 +3669,28 @@ int Rotate_log_event::exec_event(struct st_relay_log_info* rli) pthread_mutex_unlock(&rli->data_lock); pthread_cond_broadcast(&rli->data_cond); flush_relay_log_info(rli); + DBUG_RETURN(0); } + + +Log_event::enum_skip_reason +Rotate_log_event::do_shall_skip(RELAY_LOG_INFO *rli) +{ + enum_skip_reason reason= Log_event::do_shall_skip(rli); + + switch (reason) { + case Log_event::EVENT_SKIP_NOT: + case Log_event::EVENT_SKIP_COUNT: + return Log_event::EVENT_SKIP_NOT; + + case Log_event::EVENT_SKIP_IGNORE: + return Log_event::EVENT_SKIP_IGNORE; + } + DBUG_ASSERT(0); + return Log_event::EVENT_SKIP_NOT; // To keep compiler happy +} + #endif @@ -3621,11 +3797,11 @@ void Intvar_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) /* - Intvar_log_event::exec_event() + Intvar_log_event::do_apply_event() */ #if defined(HAVE_REPLICATION)&& !defined(MYSQL_CLIENT) -int Intvar_log_event::exec_event(struct st_relay_log_info* rli) +int Intvar_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { switch (type) { case LAST_INSERT_ID_EVENT: @@ -3636,9 +3812,33 @@ int Intvar_log_event::exec_event(struct st_relay_log_info* rli) thd->force_one_auto_inc_interval(val); break; } + return 0; +} + +int Intvar_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ rli->inc_event_relay_log_pos(); return 0; } + + +Log_event::enum_skip_reason +Intvar_log_event::do_shall_skip(RELAY_LOG_INFO *rli) +{ + /* + It is a common error to set the slave skip counter to 1 instead of + 2 when recovering from an insert which used a auto increment, + rand, or user var. Therefore, if the slave skip counter is 1, we + just say that this event should be skipped by ignoring it, meaning + that we do not change the value of the slave skip counter since it + will be decreased by the following insert event. + */ + if (rli->slave_skip_counter == 1) + return Log_event::EVENT_SKIP_IGNORE; + else + return Log_event::do_shall_skip(rli); +} + #endif @@ -3701,13 +3901,37 @@ void Rand_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Rand_log_event::exec_event(struct st_relay_log_info* rli) +int Rand_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { thd->rand.seed1= (ulong) seed1; thd->rand.seed2= (ulong) seed2; + return 0; +} + +int Rand_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ rli->inc_event_relay_log_pos(); return 0; } + + +Log_event::enum_skip_reason +Rand_log_event::do_shall_skip(RELAY_LOG_INFO *rli) +{ + /* + It is a common error to set the slave skip counter to 1 instead of + 2 when recovering from an insert which used a auto increment, + rand, or user var. Therefore, if the slave skip counter is 1, we + just say that this event should be skipped by ignoring it, meaning + that we do not change the value of the slave skip counter since it + will be decreased by the following insert event. + */ + if (rli->slave_skip_counter == 1) + return Log_event::EVENT_SKIP_IGNORE; + else + return Log_event::do_shall_skip(rli); +} + #endif /* !MYSQL_CLIENT */ @@ -3774,12 +3998,12 @@ void Xid_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Xid_log_event::exec_event(struct st_relay_log_info* rli) +int Xid_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { /* For a slave Xid_log_event is COMMIT */ general_log_print(thd, COM_QUERY, "COMMIT /* implicit, from Xid_log_event */"); - return end_trans(thd, COMMIT) || Log_event::exec_event(rli); + return end_trans(thd, COMMIT); } #endif /* !MYSQL_CLIENT */ @@ -4057,11 +4281,11 @@ void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) /* - User_var_log_event::exec_event() + User_var_log_event::do_apply_event() */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int User_var_log_event::exec_event(struct st_relay_log_info* rli) +int User_var_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { Item *it= 0; CHARSET_INFO *charset; @@ -4123,9 +4347,31 @@ int User_var_log_event::exec_event(struct st_relay_log_info* rli) e.update_hash(val, val_len, type, charset, DERIVATION_IMPLICIT, 0); free_root(thd->mem_root,0); + return 0; +} + +int User_var_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ rli->inc_event_relay_log_pos(); return 0; } + +Log_event::enum_skip_reason +User_var_log_event::do_shall_skip(RELAY_LOG_INFO *rli) +{ + /* + It is a common error to set the slave skip counter to 1 instead + of 2 when recovering from an insert which used a auto increment, + rand, or user var. Therefore, if the slave skip counter is 1, we + just say that this event should be skipped by ignoring it, meaning + that we do not change the value of the slave skip counter since it + will be decreased by the following insert event. + */ + if (rli->slave_skip_counter == 1) + return Log_event::EVENT_SKIP_IGNORE; + else + return Log_event::do_shall_skip(rli); +} #endif /* !MYSQL_CLIENT */ @@ -4165,7 +4411,7 @@ void Slave_log_event::pack_info(Protocol *protocol) #ifndef MYSQL_CLIENT Slave_log_event::Slave_log_event(THD* thd_arg, - struct st_relay_log_info* rli) + RELAY_LOG_INFO* rli) :Log_event(thd_arg, 0, 0) , mem_pool(0), master_host(0) { DBUG_ENTER("Slave_log_event"); @@ -4275,11 +4521,11 @@ Slave_log_event::Slave_log_event(const char* buf, uint event_len) #ifndef MYSQL_CLIENT -int Slave_log_event::exec_event(struct st_relay_log_info* rli) +int Slave_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { if (mysql_bin_log.is_open()) mysql_bin_log.write(this); - return Log_event::exec_event(rli); + return 0; } #endif /* !MYSQL_CLIENT */ @@ -4308,21 +4554,21 @@ void Stop_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) /* - Stop_log_event::exec_event() - - The master stopped. - We used to clean up all temporary tables but this is useless as, as the - master has shut down properly, it has written all DROP TEMPORARY TABLE - (prepared statements' deletion is TODO only when we binlog prep stmts). - We used to clean up slave_load_tmpdir, but this is useless as it has been - cleared at the end of LOAD DATA INFILE. - So we have nothing to do here. - The place were we must do this cleaning is in Start_log_event_v3::exec_event(), - not here. Because if we come here, the master was sane. + Stop_log_event::do_apply_event() + + The master stopped. We used to clean up all temporary tables but + this is useless as, as the master has shut down properly, it has + written all DROP TEMPORARY TABLE (prepared statements' deletion is + TODO only when we binlog prep stmts). We used to clean up + slave_load_tmpdir, but this is useless as it has been cleared at the + end of LOAD DATA INFILE. So we have nothing to do here. The place + were we must do this cleaning is in + Start_log_event_v3::do_apply_event(), not here. Because if we come + here, the master was sane. */ #ifndef MYSQL_CLIENT -int Stop_log_event::exec_event(struct st_relay_log_info* rli) +int Stop_log_event::do_update_pos(RELAY_LOG_INFO *rli) { /* We do not want to update master_log pos because we get a rotate event @@ -4340,6 +4586,7 @@ int Stop_log_event::exec_event(struct st_relay_log_info* rli) } return 0; } + #endif /* !MYSQL_CLIENT */ #endif /* HAVE_REPLICATION */ @@ -4530,11 +4777,11 @@ void Create_file_log_event::pack_info(Protocol *protocol) /* - Create_file_log_event::exec_event() + Create_file_log_event::do_apply_event() */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Create_file_log_event::exec_event(struct st_relay_log_info* rli) +int Create_file_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { char proc_info[17+FN_REFLEN+10], *fname_buf; char *ext; @@ -4596,7 +4843,7 @@ err: if (fd >= 0) my_close(fd, MYF(0)); thd->proc_info= 0; - return error ? 1 : Log_event::exec_event(rli); + return error == 0; } #endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ @@ -4704,15 +4951,15 @@ int Append_block_log_event::get_create_or_append() const } /* - Append_block_log_event::exec_event() + Append_block_log_event::do_apply_event() */ -int Append_block_log_event::exec_event(struct st_relay_log_info* rli) +int Append_block_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { char proc_info[17+FN_REFLEN+10], *fname= proc_info+17; int fd; int error = 1; - DBUG_ENTER("Append_block_log_event::exec_event"); + DBUG_ENTER("Append_block_log_event::do_apply_event"); fname= strmov(proc_info, "Making temp file "); slave_load_file_stem(fname, file_id, server_id, ".data"); @@ -4751,7 +4998,7 @@ err: if (fd >= 0) my_close(fd, MYF(0)); thd->proc_info= 0; - DBUG_RETURN(error ? error : Log_event::exec_event(rli)); + DBUG_RETURN(error); } #endif @@ -4835,18 +5082,18 @@ void Delete_file_log_event::pack_info(Protocol *protocol) #endif /* - Delete_file_log_event::exec_event() + Delete_file_log_event::do_apply_event() */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Delete_file_log_event::exec_event(struct st_relay_log_info* rli) +int Delete_file_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { char fname[FN_REFLEN+10]; char *ext= slave_load_file_stem(fname, file_id, server_id, ".data"); (void) my_delete(fname, MYF(MY_WME)); strmov(ext, ".info"); (void) my_delete(fname, MYF(MY_WME)); - return Log_event::exec_event(rli); + return 0; } #endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ @@ -4932,10 +5179,10 @@ void Execute_load_log_event::pack_info(Protocol *protocol) /* - Execute_load_log_event::exec_event() + Execute_load_log_event::do_apply_event() */ -int Execute_load_log_event::exec_event(struct st_relay_log_info* rli) +int Execute_load_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { char fname[FN_REFLEN+10]; char *ext; @@ -4966,14 +5213,15 @@ int Execute_load_log_event::exec_event(struct st_relay_log_info* rli) lev->thd = thd; /* - lev->exec_event should use rli only for errors - i.e. should not advance rli's position. - lev->exec_event is the place where the table is loaded (it calls - mysql_load()). + lev->do_apply_event should use rli only for errors i.e. should + not advance rli's position. + + lev->do_apply_event is the place where the table is loaded (it + calls mysql_load()). */ - rli->future_group_master_log_pos= log_pos; - if (lev->exec_event(0,rli,1)) + const_cast<RELAY_LOG_INFO*>(rli)->future_group_master_log_pos= log_pos; + if (lev->do_apply_event(0,rli,1)) { /* We want to indicate the name of the file that could not be loaded @@ -5016,7 +5264,7 @@ err: my_close(fd, MYF(0)); end_io_cache(&file); } - return error ? error : Log_event::exec_event(rli); + return error; } #endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ @@ -5184,7 +5432,7 @@ void Execute_load_query_log_event::pack_info(Protocol *protocol) int -Execute_load_query_log_event::exec_event(struct st_relay_log_info* rli) +Execute_load_query_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { char *p; char *buf; @@ -5221,7 +5469,7 @@ Execute_load_query_log_event::exec_event(struct st_relay_log_info* rli) p= strmake(p, STRING_WITH_LEN(" INTO")); p= strmake(p, query+fn_pos_end, q_len-fn_pos_end); - error= Query_log_event::exec_event(rli, buf, p-buf); + error= Query_log_event::do_apply_event(rli, buf, p-buf); /* Forging file name for deletion in same buffer */ *fname_end= 0; @@ -5277,7 +5525,7 @@ bool sql_ex_info::write_data(IO_CACHE* file) sql_ex_info::init() */ -char* sql_ex_info::init(char* buf,char* buf_end,bool use_new_format) +char *sql_ex_info::init(char *buf, char *buf_end, bool use_new_format) { cached_new_format = use_new_format; if (use_new_format) @@ -5290,11 +5538,12 @@ char* sql_ex_info::init(char* buf,char* buf_end,bool use_new_format) the case when we have old format because we will be reusing net buffer to read the actual file before we write out the Create_file event. */ - if (read_str(&buf, buf_end, &field_term, &field_term_len) || - read_str(&buf, buf_end, &enclosed, &enclosed_len) || - read_str(&buf, buf_end, &line_term, &line_term_len) || - read_str(&buf, buf_end, &line_start, &line_start_len) || - read_str(&buf, buf_end, &escaped, &escaped_len)) + const char *ptr= buf; + if (read_str(&ptr, buf_end, (const char **) &field_term, &field_term_len) || + read_str(&ptr, buf_end, (const char **) &enclosed, &enclosed_len) || + read_str(&ptr, buf_end, (const char **) &line_term, &line_term_len) || + read_str(&ptr, buf_end, (const char **) &line_start, &line_start_len) || + read_str(&ptr, buf_end, (const char **) &escaped, &escaped_len)) return 0; opt_flags = *buf++; } @@ -5362,7 +5611,10 @@ Rows_log_event::Rows_log_event(THD *thd_arg, TABLE *tbl_arg, ulong tid, memcpy(m_cols.bitmap, cols->bitmap, no_bytes_in_map(cols)); } else - m_cols.bitmap= 0; // to not free it + { + // Needed because bitmap_init() does not set it to null on failure + m_cols.bitmap= 0; + } } #endif @@ -5399,14 +5651,57 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, m_flags= uint2korr(post_start); - byte const *const var_start= (const byte *)buf + common_header_len + - post_header_len; + byte const *const var_start= + (const byte *)buf + common_header_len + post_header_len; byte const *const ptr_width= var_start; uchar *ptr_after_width= (uchar*) ptr_width; + DBUG_PRINT("debug", ("Reading from %p", ptr_after_width)); m_width = net_field_length(&ptr_after_width); + DBUG_PRINT("debug", ("m_width=%lu", m_width)); + /* if bitmap_init fails, catched in is_valid() */ + if (likely(!bitmap_init(&m_cols, + m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL, + (m_width + 7) & ~7UL, + false))) + { + DBUG_PRINT("debug", ("Reading from %p", ptr_after_width)); + memcpy(m_cols.bitmap, ptr_after_width, (m_width + 7) / 8); + ptr_after_width+= (m_width + 7) / 8; + DBUG_DUMP("m_cols", (char*) m_cols.bitmap, no_bytes_in_map(&m_cols)); + } + else + { + // Needed because bitmap_init() does not set it to null on failure + m_cols.bitmap= NULL; + DBUG_VOID_RETURN; + } + + m_cols_ai.bitmap= m_cols.bitmap; /* See explanation in is_valid() */ + + if (event_type == UPDATE_ROWS_EVENT) + { + DBUG_PRINT("debug", ("Reading from %p", ptr_after_width)); + + /* if bitmap_init fails, catched in is_valid() */ + if (likely(!bitmap_init(&m_cols_ai, + m_width <= sizeof(m_bitbuf_ai)*8 ? m_bitbuf_ai : NULL, + (m_width + 7) & ~7UL, + false))) + { + DBUG_PRINT("debug", ("Reading from %p", ptr_after_width)); + memcpy(m_cols_ai.bitmap, ptr_after_width, (m_width + 7) / 8); + ptr_after_width+= (m_width + 7) / 8; + DBUG_DUMP("m_cols_ai", (char*) m_cols_ai.bitmap, no_bytes_in_map(&m_cols_ai)); + } + else + { + // Needed because bitmap_init() does not set it to null on failure + m_cols_ai.bitmap= 0; + DBUG_VOID_RETURN; + } + } - const uint byte_count= (m_width + 7) / 8; - const byte* const ptr_rows_data= var_start + byte_count + 1; + const byte* const ptr_rows_data= (const byte*) ptr_after_width; my_size_t const data_size= event_len - (ptr_rows_data - (const byte *) buf); DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %lu", @@ -5415,12 +5710,6 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, m_rows_buf= (byte*)my_malloc(data_size, MYF(MY_WME)); if (likely((bool)m_rows_buf)) { - /* if bitmap_init fails, catched in is_valid() */ - if (likely(!bitmap_init(&m_cols, - m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL, - (m_width + 7) & ~7UL, - false))) - memcpy(m_cols.bitmap, ptr_after_width, byte_count); m_rows_end= m_rows_buf + data_size; m_rows_cur= m_rows_end; memcpy(m_rows_buf, ptr_rows_data, data_size); @@ -5439,6 +5728,29 @@ Rows_log_event::~Rows_log_event() my_free((gptr)m_rows_buf, MYF(MY_ALLOW_ZERO_PTR)); } +int Rows_log_event::get_data_size() +{ + int const type_code= get_type_code(); + + char buf[sizeof(m_width)+1]; + char *end= net_store_length(buf, (m_width + 7) / 8); + + DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master", + return 6 + no_bytes_in_map(&m_cols) + (end - buf) + + (type_code == UPDATE_ROWS_EVENT ? no_bytes_in_map(&m_cols_ai) : 0) + + (m_rows_cur - m_rows_buf);); + int data_size= ROWS_HEADER_LEN; + data_size+= no_bytes_in_map(&m_cols); + data_size+= end - buf; + + if (type_code == UPDATE_ROWS_EVENT) + data_size+= no_bytes_in_map(&m_cols_ai); + + data_size+= (m_rows_cur - m_rows_buf); + return data_size; +} + + #ifndef MYSQL_CLIENT int Rows_log_event::do_add_row_data(byte *const row_data, my_size_t const length) @@ -5464,14 +5776,14 @@ int Rows_log_event::do_add_row_data(byte *const row_data, DBUG_ASSERT(m_rows_cur <= m_rows_end); /* The cast will always work since m_rows_cur <= m_rows_end */ - if (static_cast<my_size_t>(m_rows_end - m_rows_cur) < length) + if (static_cast<my_size_t>(m_rows_end - m_rows_cur) <= length) { my_size_t const block_size= 1024; my_ptrdiff_t const cur_size= m_rows_cur - m_rows_buf; my_ptrdiff_t const new_alloc= - block_size * ((cur_size + length) / block_size + block_size - 1); + block_size * ((cur_size + length + block_size - 1) / block_size); - byte* const new_buf= (byte*)my_realloc((gptr)m_rows_buf, new_alloc, + byte* const new_buf= (byte*)my_realloc((gptr)m_rows_buf, (uint) new_alloc, MYF(MY_ALLOW_ZERO_PTR|MY_WME)); if (unlikely(!new_buf)) DBUG_RETURN(HA_ERR_OUT_OF_MEM); @@ -5490,7 +5802,7 @@ int Rows_log_event::do_add_row_data(byte *const row_data, m_rows_end= m_rows_buf + new_alloc; } - DBUG_ASSERT(m_rows_cur + length < m_rows_end); + DBUG_ASSERT(m_rows_cur + length <= m_rows_end); memcpy(m_rows_cur, row_data, length); m_rows_cur+= length; m_row_count++; @@ -5512,10 +5824,10 @@ int Rows_log_event::do_add_row_data(byte *const row_data, row_end Pointer to variable that will hold the value of the one-after-end position for the row master_reclength - Pointer to variable that will be set to the length of the - record on the master side - rw_set Pointer to bitmap that holds either the read_set or the - write_set of the table + Pointer to variable that will be set to the length of the + record on the master side + rw_set Pointer to bitmap that holds either the read_set or the + write_set of the table DESCRIPTION @@ -5541,70 +5853,80 @@ int Rows_log_event::do_add_row_data(byte *const row_data, the master does not have a default value (and isn't nullable) */ static int -unpack_row(RELAY_LOG_INFO *rli, +unpack_row(RELAY_LOG_INFO const *rli, TABLE *table, uint const colcnt, - char const *row, MY_BITMAP const *cols, - char const **row_end, ulong *master_reclength, + char const *const row_data, MY_BITMAP const *cols, + char const **const row_end, ulong *const master_reclength, MY_BITMAP* const rw_set, Log_event_type const event_type) { - byte *const record= table->record[0]; DBUG_ENTER("unpack_row"); - DBUG_ASSERT(record && row); - DBUG_PRINT("enter", ("row: 0x%lx table->record[0]: 0x%lx", (long) row, (long) record)); - my_size_t master_null_bytes= table->s->null_bytes; - - if (colcnt != table->s->fields) - { - Field **fptr= &table->field[colcnt-1]; - do - master_null_bytes= (*fptr)->last_null_byte(); - while (master_null_bytes == Field::LAST_NULL_BYTE_UNDEF && - fptr-- > table->field); - - /* - If master_null_bytes is LAST_NULL_BYTE_UNDEF (0) at this time, - there were no nullable fields nor BIT fields at all in the - columns that are common to the master and the slave. In that - case, there is only one null byte holding the X bit. + DBUG_ASSERT(row_data); + my_size_t const master_null_byte_count= (bitmap_bits_set(cols) + 7) / 8; + int error= 0; - OBSERVE! There might still be nullable columns following the - common columns, so table->s->null_bytes might be greater than 1. - */ - if (master_null_bytes == Field::LAST_NULL_BYTE_UNDEF) - master_null_bytes= 1; - } + char const *null_ptr= row_data; + char const *pack_ptr= row_data + master_null_byte_count; - DBUG_ASSERT(master_null_bytes <= table->s->null_bytes); - memcpy(record, row, master_null_bytes); // [1] - int error= 0; + bitmap_clear_all(rw_set); - bitmap_set_all(rw_set); + empty_record(table); Field **const begin_ptr = table->field; Field **field_ptr; - char const *ptr= row + master_null_bytes; Field **const end_ptr= begin_ptr + colcnt; + + DBUG_ASSERT(null_ptr < row_data + master_null_byte_count); + + // Mask to mask out the correct bit among the null bits + unsigned int null_mask= 1U; + // The "current" null bits + unsigned int null_bits= *null_ptr++; for (field_ptr= begin_ptr ; field_ptr < end_ptr ; ++field_ptr) { Field *const f= *field_ptr; + /* + No need to bother about columns that does not exist: they have + gotten default values when being emptied above. + */ if (bitmap_is_set(cols, field_ptr - begin_ptr)) { - DBUG_ASSERT((const char *)table->record[0] <= f->ptr); - DBUG_ASSERT(f->ptr < (char*)(table->record[0] + table->s->reclength + - (f->pack_length_in_rec() == 0))); + if ((null_mask & 0xFF) == 0) + { + DBUG_ASSERT(null_ptr < row_data + master_null_byte_count); + null_mask= 1U; + null_bits= *null_ptr++; + } + + DBUG_ASSERT(null_mask & 0xFF); // One of the 8 LSB should be set - DBUG_PRINT("info", ("unpacking column '%s' to 0x%lx", f->field_name, - (long) f->ptr)); - ptr= f->unpack(f->ptr, ptr); /* Field...::unpack() cannot return 0 */ - DBUG_ASSERT(ptr != NULL); + DBUG_ASSERT(pack_ptr != NULL); + + if ((null_bits & null_mask) && f->maybe_null()) + f->set_null(); + else + { + f->set_notnull(); + + /* + We only unpack the field if it was non-null + */ + pack_ptr= f->unpack(f->ptr, pack_ptr); + } + + bitmap_set_bit(rw_set, f->field_index); + null_mask <<= 1; } - else - bitmap_clear_bit(rw_set, field_ptr - begin_ptr); } - *row_end = ptr; + /* + We should now have read all the null bytes, otherwise something is + really wrong. + */ + DBUG_ASSERT(null_ptr == row_data + master_null_byte_count); + + *row_end = pack_ptr; if (master_reclength) { if (*field_ptr) @@ -5629,9 +5951,8 @@ unpack_row(RELAY_LOG_INFO *rli, uint32 const mask= NOT_NULL_FLAG | NO_DEFAULT_VALUE_FLAG; Field *const f= *field_ptr; - DBUG_PRINT("info", ("processing column '%s' @ 0x%lx", f->field_name, - (long) f->ptr)); - if (event_type == WRITE_ROWS_EVENT && (f->flags & mask) == mask) + if (event_type == WRITE_ROWS_EVENT && + ((*field_ptr)->flags & mask) == mask) { slave_print_msg(ERROR_LEVEL, rli, ER_NO_DEFAULT_FOR_FIELD, "Field `%s` of table `%s`.`%s` " @@ -5647,17 +5968,17 @@ unpack_row(RELAY_LOG_INFO *rli, DBUG_RETURN(error); } -int Rows_log_event::exec_event(st_relay_log_info *rli) +int Rows_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { - DBUG_ENTER("Rows_log_event::exec_event(st_relay_log_info*)"); + DBUG_ENTER("Rows_log_event::do_apply_event(st_relay_log_info*)"); int error= 0; char const *row_start= (char const *)m_rows_buf; /* - If m_table_id == ~0UL, then we have a dummy event that does - not contain any data. In that case, we just remove all tables in - the tables_to_lock list, close the thread tables, step the relay - log position, and return with success. + If m_table_id == ~0UL, then we have a dummy event that does not + contain any data. In that case, we just remove all tables in the + tables_to_lock list, close the thread tables, and return with + success. */ if (m_table_id == ~0UL) { @@ -5667,16 +5988,16 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) */ DBUG_ASSERT(get_flags(STMT_END_F)); - rli->clear_tables_to_lock(); + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); close_thread_tables(thd); thd->clear_error(); - rli->inc_event_relay_log_pos(); DBUG_RETURN(0); } /* 'thd' has been set by exec_relay_log_event(), just before calling - exec_event(). We still check here to prevent future coding errors. + do_apply_event(). We still check here to prevent future coding + errors. */ DBUG_ASSERT(rli->sql_thd == thd); @@ -5692,8 +6013,9 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) /* lock_tables() reads the contents of thd->lex, so they must be - initialized. Contrary to in Table_map_log_event::exec_event() we don't - call mysql_init_query() as that may reset the binlog format. + initialized. Contrary to in + Table_map_log_event::do_apply_event() we don't call + mysql_init_query() as that may reset the binlog format. */ lex_start(thd, NULL, 0); @@ -5722,7 +6044,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) "Error in %s event: when locking tables", get_type_str()); } - rli->clear_tables_to_lock(); + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); DBUG_RETURN(error); } @@ -5740,10 +6062,11 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) need to add code to assert that is the case. */ thd->binlog_flush_pending_rows_event(false); - close_tables_for_reopen(thd, &rli->tables_to_lock); + TABLE_LIST *tables= rli->tables_to_lock; + close_tables_for_reopen(thd, &tables); - if ((error= open_tables(thd, &rli->tables_to_lock, - &rli->tables_to_lock_count, 0))) + uint tables_count= rli->tables_to_lock_count; + if ((error= open_tables(thd, &tables, &tables_count, 0))) { if (thd->query_error || thd->is_fatal_error) { @@ -5758,38 +6081,68 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) "unexpected success or fatal error")); thd->query_error= 1; } - rli->clear_tables_to_lock(); + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); DBUG_RETURN(error); } } + + /* + When the open and locking succeeded, we check all tables to + ensure that they still have the correct type. + + We can use a down cast here since we know that every table added + to the tables_to_lock is a RPL_TABLE_LIST. + */ + + { + RPL_TABLE_LIST *ptr= rli->tables_to_lock; + for ( ; ptr ; ptr= static_cast<RPL_TABLE_LIST*>(ptr->next_global)) + { + if (ptr->m_tabledef.compatible_with(rli, ptr->table)) + { + mysql_unlock_tables(thd, thd->lock); + thd->lock= 0; + thd->query_error= 1; + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); + DBUG_RETURN(ERR_BAD_TABLE_DEF); + } + } + } + /* - When the open and locking succeeded, we add all the tables to - the table map and remove them from tables to lock. + ... and then we add all the tables to the table map and remove + them from tables to lock. We also invalidate the query cache for all the tables, since they will now be changed. + + TODO [/Matz]: Maybe the query cache should not be invalidated + here? It might be that a table is not changed, even though it + was locked for the statement. We do know that each + Rows_log_event contain at least one row, so after processing one + Rows_log_event, we can invalidate the query cache for the + associated table. */ - TABLE_LIST *ptr; - for (ptr= rli->tables_to_lock ; ptr ; ptr= ptr->next_global) + for (TABLE_LIST *ptr= rli->tables_to_lock ; ptr ; ptr= ptr->next_global) { - rli->m_table_map.set_table(ptr->table_id, ptr->table); + const_cast<RELAY_LOG_INFO*>(rli)->m_table_map.set_table(ptr->table_id, ptr->table); } #ifdef HAVE_QUERY_CACHE query_cache.invalidate_locked_for_write(rli->tables_to_lock); #endif - rli->clear_tables_to_lock(); + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); } DBUG_ASSERT(rli->tables_to_lock == NULL && rli->tables_to_lock_count == 0); - TABLE* table= rli->m_table_map.get_table(m_table_id); + TABLE* table= const_cast<RELAY_LOG_INFO*>(rli)->m_table_map.get_table(m_table_id); if (table) { /* table == NULL means that this table should not be replicated - (this was set up by Table_map_log_event::exec_event() which - tested replicate-* rules). + (this was set up by Table_map_log_event::do_apply_event() + which tested replicate-* rules). */ /* @@ -5846,7 +6199,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) break; default: - slave_print_msg(ERROR_LEVEL, rli, error, + slave_print_msg(ERROR_LEVEL, rli, thd->net.last_errno, "Error in %s event: row application failed", get_type_str()); thd->query_error= 1; @@ -5856,7 +6209,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) row_start= row_end; } DBUG_EXECUTE_IF("STOP_SLAVE_after_first_Rows_event", - rli->abort_slave=1;); + const_cast<RELAY_LOG_INFO*>(rli)->abort_slave= 1;); error= do_after_row_operations(table, error); if (!cache_stmt) { @@ -5867,11 +6220,12 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) if (error) { /* error has occured during the transaction */ - slave_print_msg(ERROR_LEVEL, rli, error, + slave_print_msg(ERROR_LEVEL, rli, thd->net.last_errno, "Error in %s event: error during transaction execution " "on table %s.%s", get_type_str(), table->s->db.str, table->s->table_name.str); + /* If one day we honour --skip-slave-errors in row-based replication, and the error should be skipped, then we would clear mappings, rollback, @@ -5884,7 +6238,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) rollback at the caller along with sbr. */ thd->reset_current_stmt_binlog_row_based(); - rli->cleanup_context(thd, 0); /* rollback at caller in step with sbr */ + const_cast<RELAY_LOG_INFO*>(rli)->cleanup_context(thd, error); thd->query_error= 1; DBUG_RETURN(error); } @@ -5928,8 +6282,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) */ thd->reset_current_stmt_binlog_row_based(); - rli->cleanup_context(thd, 0); - rli->transaction_end(thd); + const_cast<RELAY_LOG_INFO*>(rli)->cleanup_context(thd, 0); if (error == 0) { @@ -5942,7 +6295,6 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) do not become visible. We still prefer to wipe them out. */ thd->clear_error(); - error= Log_event::exec_event(rli); } else slave_print_msg(ERROR_LEVEL, rli, error, @@ -5969,17 +6321,17 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) wait (reached end of last relay log and nothing gets appended there), we timeout after one minute, and notify DBA about the problem. When WL#2975 is implemented, just remove the member - st_relay_log_info::unsafe_to_stop_at and all its occurences. + st_relay_log_info::last_event_start_time and all its occurences. */ - rli->unsafe_to_stop_at= time(0); + const_cast<RELAY_LOG_INFO*>(rli)->last_event_start_time= time(0); } DBUG_ASSERT(error == 0); thd->clear_error(); - rli->inc_event_relay_log_pos(); - + DBUG_RETURN(0); } + #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ #ifndef MYSQL_CLIENT @@ -6006,15 +6358,35 @@ bool Rows_log_event::write_data_body(IO_CACHE*file) */ char sbuf[sizeof(m_width)]; my_ptrdiff_t const data_size= m_rows_cur - m_rows_buf; + bool res= false; char *const sbuf_end= net_store_length((char*) sbuf, (uint) m_width); DBUG_ASSERT(static_cast<my_size_t>(sbuf_end - sbuf) <= sizeof(sbuf)); - return (my_b_safe_write(file, reinterpret_cast<byte*>(sbuf), - sbuf_end - sbuf) || - my_b_safe_write(file, reinterpret_cast<byte*>(m_cols.bitmap), - no_bytes_in_map(&m_cols)) || - my_b_safe_write(file, m_rows_buf, data_size)); + DBUG_DUMP("m_width", sbuf, sbuf_end - sbuf); + res= res || my_b_safe_write(file, + reinterpret_cast<byte*>(sbuf), + sbuf_end - sbuf); + + DBUG_DUMP("m_cols", (char*) m_cols.bitmap, no_bytes_in_map(&m_cols)); + res= res || my_b_safe_write(file, + reinterpret_cast<byte*>(m_cols.bitmap), + no_bytes_in_map(&m_cols)); + /* + TODO[refactor write]: Remove the "down cast" here (and elsewhere). + */ + if (get_type_code() == UPDATE_ROWS_EVENT) + { + DBUG_DUMP("m_cols_ai", (char*) m_cols_ai.bitmap, no_bytes_in_map(&m_cols_ai)); + res= res || my_b_safe_write(file, + reinterpret_cast<byte*>(m_cols_ai.bitmap), + no_bytes_in_map(&m_cols_ai)); + } + DBUG_DUMP("rows", m_rows_buf, data_size); + res= res || my_b_safe_write(file, m_rows_buf, (uint) data_size); + + return res; + } #endif @@ -6041,16 +6413,14 @@ void Rows_log_event::print_helper(FILE *file, { bool const last_stmt_event= get_flags(STMT_END_F); print_header(head, print_event_info, !last_stmt_event); - my_b_printf(head, "\t%s: table id %lu", name, m_table_id); + my_b_printf(head, "\t%s: table id %lu\n", name, m_table_id); print_base64(body, print_event_info, !last_stmt_event); } if (get_flags(STMT_END_F)) { - my_b_copy_to_file(head, file); - my_b_copy_to_file(body, file); - reinit_io_cache(head, WRITE_CACHE, 0, FALSE, TRUE); - reinit_io_cache(body, WRITE_CACHE, 0, FALSE, TRUE); + copy_event_cache_to_file_and_reinit(head, file); + copy_event_cache_to_file_and_reinit(body, file); } } #endif @@ -6159,15 +6529,15 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len, const char *const vpart= buf + common_header_len + post_header_len; /* Extract the length of the various parts from the buffer */ - byte const* const ptr_dblen= (byte const*)vpart + 0; + byte const *const ptr_dblen= (byte const*)vpart + 0; m_dblen= *(uchar*) ptr_dblen; /* Length of database name + counter + terminating null */ - byte const* const ptr_tbllen= ptr_dblen + m_dblen + 2; + byte const *const ptr_tbllen= ptr_dblen + m_dblen + 2; m_tbllen= *(uchar*) ptr_tbllen; /* Length of table name + counter + terminating null */ - byte const* const ptr_colcnt= ptr_tbllen + m_tbllen + 2; + byte const *const ptr_colcnt= ptr_tbllen + m_tbllen + 2; uchar *ptr_after_colcnt= (uchar*) ptr_colcnt; m_colcnt= net_field_length(&ptr_after_colcnt); @@ -6212,9 +6582,9 @@ Table_map_log_event::~Table_map_log_event() */ #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) -int Table_map_log_event::exec_event(st_relay_log_info *rli) +int Table_map_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { - DBUG_ENTER("Table_map_log_event::exec_event(st_relay_log_info*)"); + DBUG_ENTER("Table_map_log_event::do_apply_event(st_relay_log_info*)"); DBUG_ASSERT(rli->sql_thd == thd); @@ -6223,11 +6593,11 @@ int Table_map_log_event::exec_event(st_relay_log_info *rli) thd->query_id= next_query_id(); pthread_mutex_unlock(&LOCK_thread_count); - TABLE_LIST *table_list; + RPL_TABLE_LIST *table_list; char *db_mem, *tname_mem; void *const memory= my_multi_malloc(MYF(MY_WME), - &table_list, sizeof(TABLE_LIST), + &table_list, sizeof(RPL_TABLE_LIST), &db_mem, NAME_LEN + 1, &tname_mem, NAME_LEN + 1, NULL); @@ -6273,11 +6643,27 @@ int Table_map_log_event::exec_event(st_relay_log_info *rli) } /* - Open the table if it is not already open and add the table to table map. - Note that for any table that should not be replicated, a filter is needed. + Open the table if it is not already open and add the table to + table map. Note that for any table that should not be + replicated, a filter is needed. + + The creation of a new TABLE_LIST is used to up-cast the + table_list consisting of RPL_TABLE_LIST items. This will work + since the only case where the argument to open_tables() is + changed, is when thd->lex->query_tables == table_list, i.e., + when the statement requires prelocking. Since this is not + executed when a statement is executed, this case will not occur. + As a precaution, an assertion is added to ensure that the bad + case is not a fact. + + Either way, the memory in the list is *never* released + internally in the open_tables() function, hence we take a copy + of the pointer to make sure that it's not lost. */ uint count; - if ((error= open_tables(thd, &table_list, &count, 0))) + DBUG_ASSERT(thd->lex->query_tables != table_list); + TABLE_LIST *tmp_table_list= table_list; + if ((error= open_tables(thd, &tmp_table_list, &count, 0))) { if (thd->query_error || thd->is_fatal_error) { @@ -6304,43 +6690,41 @@ int Table_map_log_event::exec_event(st_relay_log_info *rli) */ DBUG_ASSERT(m_table->in_use); - table_def const def(m_coltype, m_colcnt); - if (def.compatible_with(rli, m_table)) - { - thd->query_error= 1; - error= ERR_BAD_TABLE_DEF; - goto err; - /* purecov: end */ - } + /* + Use placement new to construct the table_def instance in the + memory allocated for it inside table_list. + + The memory allocated by the table_def structure (i.e., not the + memory allocated *for* the table_def structure) is released + inside st_relay_log_info::clear_tables_to_lock() by calling the + table_def destructor explicitly. + */ + new (&table_list->m_tabledef) table_def(m_coltype, m_colcnt); + table_list->m_tabledef_valid= TRUE; /* We record in the slave's information that the table should be locked by linking the table into the list of tables to lock. */ table_list->next_global= table_list->next_local= rli->tables_to_lock; - rli->tables_to_lock= table_list; - rli->tables_to_lock_count++; + const_cast<RELAY_LOG_INFO*>(rli)->tables_to_lock= table_list; + const_cast<RELAY_LOG_INFO*>(rli)->tables_to_lock_count++; /* 'memory' is freed in clear_tables_to_lock */ } - /* - We explicitly do not call Log_event::exec_event() here since we do not - want the relay log position to be flushed to disk. The flushing will be - done by the last Rows_log_event that either ends a statement (outside a - transaction) or a transaction. - - A table map event can *never* end a transaction or a statement, so we - just step the relay log position. - */ - - if (likely(!error)) - rli->inc_event_relay_log_pos(); DBUG_RETURN(error); err: my_free((gptr) memory, MYF(MY_WME)); DBUG_RETURN(error); } + +int Table_map_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ + rli->inc_event_relay_log_pos(); + return 0; +} + #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ #ifndef MYSQL_CLIENT @@ -6367,8 +6751,8 @@ bool Table_map_log_event::write_data_body(IO_CACHE *file) DBUG_ASSERT(m_dblen < 128); DBUG_ASSERT(m_tbllen < 128); - byte const dbuf[]= { m_dblen }; - byte const tbuf[]= { m_tbllen }; + byte const dbuf[]= { (byte) m_dblen }; + byte const tbuf[]= { (byte) m_tbllen }; char cbuf[sizeof(m_colcnt)]; char *const cbuf_end= net_store_length((char*) cbuf, (uint) m_colcnt); @@ -6505,7 +6889,7 @@ int Write_rows_log_event::do_after_row_operations(TABLE *table, int error) return error; } -int Write_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli, +int Write_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO const *rli, TABLE *table, char const *const row_start, char const **const row_end) @@ -6646,6 +7030,42 @@ copy_extra_record_fields(TABLE *table, return 0; // All OK } +#define DBUG_PRINT_BITSET(N,FRM,BS) \ + do { \ + char buf[256]; \ + for (uint i = 0 ; i < (BS)->n_bits ; ++i) \ + buf[i] = bitmap_is_set((BS), i) ? '1' : '0'; \ + buf[(BS)->n_bits] = '\0'; \ + DBUG_PRINT((N), ((FRM), buf)); \ + } while (0) + + +/** + Check if an error is a duplicate key error. + + This function is used to check if an error code is one of the + duplicate key error, i.e., and error code for which it is sensible + to do a <code>get_dup_key()</code> to retrieve the duplicate key. + + @param errcode The error code to check. + + @return <code>true</code> if the error code is such that + <code>get_dup_key()</code> will return true, <code>false</code> + otherwise. + */ +bool +is_duplicate_key_error(int errcode) +{ + switch (errcode) + { + case HA_ERR_FOUND_DUPP_KEY: + case HA_ERR_FOUND_DUPP_UNIQUE: + return true; + } + return false; +} + + /* Replace the provided record in the database. @@ -6678,6 +7098,12 @@ replace_record(THD *thd, TABLE *table, int keynum; auto_afree_ptr<char> key(NULL); +#ifndef DBUG_OFF + DBUG_DUMP("record[0]", table->record[0], table->s->reclength); + DBUG_PRINT_BITSET("debug", "write_set = %s", table->write_set); + DBUG_PRINT_BITSET("debug", "read_set = %s", table->read_set); +#endif + while ((error= table->file->ha_write_row(table->record[0]))) { if (error == HA_ERR_LOCK_DEADLOCK || error == HA_ERR_LOCK_WAIT_TIMEOUT) @@ -6688,7 +7114,7 @@ replace_record(THD *thd, TABLE *table, if ((keynum= table->file->get_dup_key(error)) < 0) { /* We failed to retrieve the duplicate key */ - DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY); + DBUG_RETURN(error); } /* @@ -6705,7 +7131,10 @@ replace_record(THD *thd, TABLE *table, { error= table->file->rnd_pos(table->record[1], table->file->dup_ref); if (error) + { + table->file->print_error(error, MYF(0)); DBUG_RETURN(error); + } } else { @@ -6722,12 +7151,15 @@ replace_record(THD *thd, TABLE *table, } key_copy((byte*)key.get(), table->record[0], table->key_info + keynum, 0); - error= table->file->index_read_idx(table->record[1], keynum, + error= table->file->index_read_idx(table->record[1], keynum, (const byte*)key.get(), - table->key_info[keynum].key_length, + HA_WHOLE_KEY, HA_READ_KEY_EXACT); if (error) + { + table->file->print_error(error, MYF(0)); DBUG_RETURN(error); + } } /* @@ -6760,15 +7192,21 @@ replace_record(THD *thd, TABLE *table, { error=table->file->ha_update_row(table->record[1], table->record[0]); + if (error) + table->file->print_error(error, MYF(0)); DBUG_RETURN(error); } else { if ((error= table->file->ha_delete_row(table->record[1]))) + { + table->file->print_error(error, MYF(0)); DBUG_RETURN(error); + } /* Will retry ha_write_row() with the offending row removed. */ } } + DBUG_RETURN(error); } @@ -6799,20 +7237,75 @@ void Write_rows_log_event::print(FILE *file, PRINT_EVENT_INFO* print_event_info) */ static bool record_compare(TABLE *table) { + /* + Need to set the X bit and the filler bits in both records since + there are engines that do not set it correctly. + + In addition, since MyISAM checks that one hasn't tampered with the + record, it is necessary to restore the old bytes into the record + after doing the comparison. + + TODO[record format ndb]: Remove it once NDB returns correct + records. Check that the other engines also return correct records. + */ + + bool result= FALSE; + byte saved_x[2], saved_filler[2]; + + if (table->s->null_bytes > 0) + { + for (int i = 0 ; i < 2 ; ++i) + { + saved_x[i]= table->record[i][0]; + saved_filler[i]= table->record[i][table->s->null_bytes - 1]; + table->record[i][0]|= 1U; + table->record[i][table->s->null_bytes - 1]|= + 256U - (1U << table->s->last_null_bit_pos); + } + } + if (table->s->blob_fields + table->s->varchar_fields == 0) - return cmp_record(table,record[1]); + { + result= cmp_record(table,record[1]); + goto record_compare_exit; + } + /* Compare null bits */ if (memcmp(table->null_flags, table->null_flags+table->s->rec_buff_length, table->s->null_bytes)) - return TRUE; // Diff in NULL value + { + result= TRUE; // Diff in NULL value + goto record_compare_exit; + } + /* Compare updated fields */ for (Field **ptr=table->field ; *ptr ; ptr++) { if ((*ptr)->cmp_binary_offset(table->s->rec_buff_length)) - return TRUE; + { + result= TRUE; + goto record_compare_exit; + } } - return FALSE; + +record_compare_exit: + /* + Restore the saved bytes. + + TODO[record format ndb]: Remove this code once NDB returns the + correct record format. + */ + if (table->s->null_bytes > 0) + { + for (int i = 0 ; i < 2 ; ++i) + { + table->record[i][0]= saved_x[i]; + table->record[i][table->s->null_bytes - 1]= saved_filler[i]; + } + } + + return result; } @@ -6848,6 +7341,8 @@ static int find_and_fetch_row(TABLE *table, byte *key) DBUG_ASSERT(table->in_use != NULL); + DBUG_DUMP("record[0]", table->record[0], table->s->reclength); + if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) && table->s->primary_key < MAX_KEY) { @@ -6908,8 +7403,7 @@ static int find_and_fetch_row(TABLE *table, byte *key) my_ptrdiff_t const pos= table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0; table->record[1][pos]= 0xFF; - if ((error= table->file->index_read(table->record[1], key, - table->key_info->key_length, + if ((error= table->file->index_read(table->record[1], key, HA_WHOLE_KEY, HA_READ_KEY_EXACT))) { table->file->print_error(error, MYF(0)); @@ -6948,15 +7442,22 @@ static int find_and_fetch_row(TABLE *table, byte *key) while (record_compare(table)) { int error; + /* We need to set the null bytes to ensure that the filler bit are all set when returning. There are storage engines that just set the necessary bits on the bytes and don't set the filler bits correctly. + + TODO[record format ndb]: Remove this code once NDB returns the + correct record format. */ - my_ptrdiff_t const pos= - table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0; - table->record[1][pos]= 0xFF; + if (table->s->null_bytes > 0) + { + table->record[1][table->s->null_bytes - 1]|= + 256U - (1U << table->s->last_null_bit_pos); + } + if ((error= table->file->index_next(table->record[1]))) { table->file->print_error(error, MYF(0)); @@ -6982,17 +7483,11 @@ static int find_and_fetch_row(TABLE *table, byte *key) /* Continue until we find the right record or have made a full loop */ do { - /* - We need to set the null bytes to ensure that the filler bit - are all set when returning. There are storage engines that - just set the necessary bits on the bytes and don't set the - filler bits correctly. - */ - my_ptrdiff_t const pos= - table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0; - table->record[1][pos]= 0xFF; error= table->file->rnd_next(table->record[1]); + DBUG_DUMP("record[0]", table->record[0], table->s->reclength); + DBUG_DUMP("record[1]", table->record[1], table->s->reclength); + switch (error) { case 0: @@ -7006,6 +7501,7 @@ static int find_and_fetch_row(TABLE *table, byte *key) default: table->file->print_error(error, MYF(0)); + DBUG_PRINT("info", ("Record not found")); table->file->ha_rnd_end(); DBUG_RETURN(error); } @@ -7015,6 +7511,7 @@ static int find_and_fetch_row(TABLE *table, byte *key) /* Have to restart the scan to be able to fetch the next row. */ + DBUG_PRINT("info", ("Record %sfound", restart_count == 2 ? "not " : "")); table->file->ha_rnd_end(); DBUG_ASSERT(error == HA_ERR_END_OF_FILE || error == 0); @@ -7107,7 +7604,7 @@ int Delete_rows_log_event::do_after_row_operations(TABLE *table, int error) return error; } -int Delete_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli, +int Delete_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO const *rli, TABLE *table, char const *const row_start, char const **const row_end) @@ -7173,16 +7670,55 @@ void Delete_rows_log_event::print(FILE *file, */ #if !defined(MYSQL_CLIENT) Update_rows_log_event::Update_rows_log_event(THD *thd_arg, TABLE *tbl_arg, - ulong tid, MY_BITMAP const *cols, + ulong tid, + MY_BITMAP const *cols_bi, + MY_BITMAP const *cols_ai, + bool is_transactional) +: Rows_log_event(thd_arg, tbl_arg, tid, cols_bi, is_transactional) +#ifdef HAVE_REPLICATION + , m_memory(NULL), m_key(NULL) + +#endif +{ + init(cols_ai); +} + +Update_rows_log_event::Update_rows_log_event(THD *thd_arg, TABLE *tbl_arg, + ulong tid, + MY_BITMAP const *cols, bool is_transactional) : Rows_log_event(thd_arg, tbl_arg, tid, cols, is_transactional) #ifdef HAVE_REPLICATION , m_memory(NULL), m_key(NULL) #endif { + init(cols); +} + +void Update_rows_log_event::init(MY_BITMAP const *cols) +{ + /* if bitmap_init fails, catched in is_valid() */ + if (likely(!bitmap_init(&m_cols_ai, + m_width <= sizeof(m_bitbuf_ai)*8 ? m_bitbuf_ai : NULL, + (m_width + 7) & ~7UL, + false))) + { + /* Cols can be zero if this is a dummy binrows event */ + if (likely(cols != NULL)) + memcpy(m_cols_ai.bitmap, cols->bitmap, no_bytes_in_map(cols)); + } } #endif /* !defined(MYSQL_CLIENT) */ + +Update_rows_log_event::~Update_rows_log_event() +{ + if (m_cols_ai.bitmap == m_bitbuf_ai) // no my_malloc happened + m_cols_ai.bitmap= 0; // so no my_free in bitmap_free + bitmap_free(&m_cols_ai); // To pair with bitmap_init(). +} + + /* Constructor used by slave to read the event from the binary log. */ @@ -7242,7 +7778,7 @@ int Update_rows_log_event::do_after_row_operations(TABLE *table, int error) return error; } -int Update_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli, +int Update_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO const *rli, TABLE *table, char const *const row_start, char const **const row_end) @@ -7267,7 +7803,7 @@ int Update_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli, store_record(table, record[1]); char const *next_start = *row_end; /* m_after_image is the after image for the update */ - error= unpack_row(rli, table, m_width, next_start, &m_cols, row_end, + error= unpack_row(rli, table, m_width, next_start, &m_cols_ai, row_end, &m_master_reclength, table->write_set, UPDATE_ROWS_EVENT); bmove_align(m_after_image, table->record[0], table->s->reclength); restore_record(table, record[1]); @@ -7339,3 +7875,113 @@ void Update_rows_log_event::print(FILE *file, } #endif + +Incident_log_event::Incident_log_event(const char *buf, uint event_len, + const Format_description_log_event *descr_event) + : Log_event(buf, descr_event) +{ + DBUG_ENTER("Incident_log_event::Incident_log_event"); + uint8 const common_header_len= + descr_event->common_header_len; + uint8 const post_header_len= + descr_event->post_header_len[INCIDENT_EVENT-1]; + + DBUG_PRINT("info",("event_len: %u; common_header_len: %d; post_header_len: %d", + event_len, common_header_len, post_header_len)); + + m_incident= static_cast<Incident>(uint2korr(buf + common_header_len)); + char const *ptr= buf + common_header_len + post_header_len; + char const *const str_end= buf + event_len; + uint8 len= 0; // Assignment to keep compiler happy + const char *str= NULL; // Assignment to keep compiler happy + read_str(&ptr, str_end, &str, &len); + m_message.str= const_cast<char*>(str); + m_message.length= len; + DBUG_PRINT("info", ("m_incident: %d", m_incident)); + DBUG_VOID_RETURN; +} + + +Incident_log_event::~Incident_log_event() +{ +} + + +const char * +Incident_log_event::description() const +{ + static const char *const description[]= { + "NOTHING", // Not used + "LOST_EVENTS" + }; + + DBUG_PRINT("info", ("m_incident: %d", m_incident)); + + DBUG_ASSERT(0 <= m_incident); + DBUG_ASSERT((my_size_t) m_incident <= sizeof(description)/sizeof(*description)); + + return description[m_incident]; +} + + +#ifndef MYSQL_CLIENT +void Incident_log_event::pack_info(Protocol *protocol) +{ + char buf[256]; + my_size_t bytes; + if (m_message.length > 0) + bytes= my_snprintf(buf, sizeof(buf), "#%d (%s)", + m_incident, description()); + else + bytes= my_snprintf(buf, sizeof(buf), "#%d (%s): %s", + m_incident, description(), m_message.str); + protocol->store(buf, bytes, &my_charset_bin); +} +#endif + + +#ifdef MYSQL_CLIENT +void +Incident_log_event::print(FILE *file, + PRINT_EVENT_INFO *print_event_info) +{ + if (print_event_info->short_form) + return; + + Write_on_release_cache cache(&print_event_info->head_cache, file); + print_header(&cache, print_event_info, FALSE); + my_b_printf(&cache, "\n# Incident: %s", description()); +} +#endif + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +int +Incident_log_event::do_apply_event(RELAY_LOG_INFO const *rli) +{ + DBUG_ENTER("Incident_log_event::do_apply_event"); + slave_print_msg(ERROR_LEVEL, rli, ER_SLAVE_INCIDENT, + ER(ER_SLAVE_INCIDENT), + description(), + m_message.length > 0 ? m_message.str : "<none>"); + DBUG_RETURN(1); +} +#endif + +bool +Incident_log_event::write_data_header(IO_CACHE *file) +{ + DBUG_ENTER("Incident_log_event::write_data_header"); + DBUG_PRINT("enter", ("m_incident: %d", m_incident)); + byte buf[sizeof(int16)]; + int2store(buf, (int16) m_incident); + DBUG_RETURN(my_b_safe_write(file, buf, sizeof(buf))); +} + +bool +Incident_log_event::write_data_body(IO_CACHE *file) +{ + DBUG_ENTER("Incident_log_event::write_data_body"); + DBUG_RETURN(write_str(file, m_message.str, m_message.length)); +} + + diff --git a/sql/log_event.h b/sql/log_event.h index 5994beb0df3..51543291621 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -22,6 +22,7 @@ #endif #include <my_bitmap.h> +#include "rpl_constants.h" #define LOG_READ_EOF -1 #define LOG_READ_BOGUS -2 @@ -198,7 +199,7 @@ struct sql_ex_info #define TABLE_MAP_HEADER_LEN 8 #define EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN (4 + 4 + 4 + 1) #define EXECUTE_LOAD_QUERY_HEADER_LEN (QUERY_HEADER_LEN + EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN) - +#define INCIDENT_HEADER_LEN 2 /* Max number of possible extra bytes in a replication event compared to a packet (i.e. a query) sent from client to master; @@ -272,6 +273,7 @@ struct sql_ex_info #define Q_LC_TIME_NAMES_CODE 7 +#define Q_CHARSET_DATABASE_CODE 8 /* Intvar event post-header */ #define I_TYPE_OFFSET 0 @@ -467,10 +469,28 @@ enum Log_event_type XID_EVENT= 16, BEGIN_LOAD_QUERY_EVENT= 17, EXECUTE_LOAD_QUERY_EVENT= 18, + TABLE_MAP_EVENT = 19, - WRITE_ROWS_EVENT = 20, - UPDATE_ROWS_EVENT = 21, - DELETE_ROWS_EVENT = 22, + + /* + These event numbers were used for 5.1.0 to 5.1.15 and are + therefore obsolete. + */ + PRE_GA_WRITE_ROWS_EVENT = 20, + PRE_GA_UPDATE_ROWS_EVENT = 21, + PRE_GA_DELETE_ROWS_EVENT = 22, + + /* + These event numbers are used from 5.1.16 and forward + */ + WRITE_ROWS_EVENT = 23, + UPDATE_ROWS_EVENT = 24, + DELETE_ROWS_EVENT = 25, + + /* + Something out of the ordinary happened on the master + */ + INCIDENT_EVENT= 26, /* Add new events here - right above this comment! @@ -502,6 +522,7 @@ class THD; class Format_description_log_event; struct st_relay_log_info; +typedef st_relay_log_info RELAY_LOG_INFO; #ifdef MYSQL_CLIENT /* @@ -533,10 +554,11 @@ typedef struct st_print_event_info char charset[6]; // 3 variables, each of them storable in 2 bytes char time_zone_str[MAX_TIME_ZONE_NAME_LENGTH]; uint lc_time_names_number; + uint charset_database_number; st_print_event_info() :flags2_inited(0), sql_mode_inited(0), auto_increment_increment(1),auto_increment_offset(1), charset_inited(0), - lc_time_names_number(0) + lc_time_names_number(0), charset_database_number(0) { /* Currently we only use static PRINT_EVENT_INFO objects, so zeroed at @@ -546,16 +568,19 @@ typedef struct st_print_event_info bzero(db, sizeof(db)); bzero(charset, sizeof(charset)); bzero(time_zone_str, sizeof(time_zone_str)); - strcpy(delimiter, ";"); - uint const flags = MYF(MY_WME | MY_NABP); - init_io_cache(&head_cache, -1, 0, WRITE_CACHE, 0L, FALSE, flags); - init_io_cache(&body_cache, -1, 0, WRITE_CACHE, 0L, FALSE, flags); + delimiter[0]= ';'; + delimiter[1]= 0; + myf const flags = MYF(MY_WME | MY_NABP); + open_cached_file(&head_cache, NULL, NULL, 0, flags); + open_cached_file(&body_cache, NULL, NULL, 0, flags); } ~st_print_event_info() { - end_io_cache(&head_cache); - end_io_cache(&body_cache); + close_cached_file(&head_cache); + close_cached_file(&body_cache); } + bool init_ok() /* tells if construction was successful */ + { return my_b_inited(&head_cache) && my_b_inited(&body_cache); } /* Settings on how to print the events */ @@ -586,6 +611,33 @@ typedef struct st_print_event_info class Log_event { public: + /** + Enumeration of what kinds of skipping (and non-skipping) that can + occur when the slave executes an event. + + @see shall_skip + @see do_shall_skip + */ + enum enum_skip_reason { + /** + Don't skip event. + */ + EVENT_SKIP_NOT, + + /** + Skip event by ignoring it. + + This means that the slave skip counter will not be changed. + */ + EVENT_SKIP_IGNORE, + + /** + Skip event and decrease skip counter. + */ + EVENT_SKIP_COUNT + }; + + /* The following type definition is to be used whenever data is placed and manipulated in a common buffer. Use this typedef for buffers @@ -667,16 +719,14 @@ public: static void init_show_field_list(List<Item>* field_list); #ifdef HAVE_REPLICATION int net_send(Protocol *protocol, const char* log_name, my_off_t pos); + /* pack_info() is used by SHOW BINLOG EVENTS; as print() it prepares and sends a string to display to the user, so it resembles print(). */ + virtual void pack_info(Protocol *protocol); - /* - The SQL slave thread calls exec_event() to execute the event; this is where - the slave's data is modified. - */ - virtual int exec_event(struct st_relay_log_info* rli); + #endif /* HAVE_REPLICATION */ virtual const char* get_db() { @@ -749,6 +799,127 @@ public: *description_event); /* returns the human readable name of the event's type */ const char* get_type_str(); + +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) +public: + + /** + Apply the event to the database. + + This function represents the public interface for applying an + event. + + @see do_apply_event + */ + int apply_event(RELAY_LOG_INFO const *rli) { + return do_apply_event(rli); + } + + + /** + Update the relay log position. + + This function represents the public interface for "stepping over" + the event and will update the relay log information. + + @see do_update_pos + */ + int update_pos(RELAY_LOG_INFO *rli) + { + return do_update_pos(rli); + } + + /** + Decide if the event shall be skipped, and the reason for skipping + it. + + @see do_shall_skip + */ + enum_skip_reason shall_skip(RELAY_LOG_INFO *rli) + { + return do_shall_skip(rli); + } + +protected: + /** + Primitive to apply an event to the database. + + This is where the change to the database is made. + + @note The primitive is protected instead of private, since there + is a hierarchy of actions to be performed in some cases. + + @see Format_description_log_event::do_apply_event() + + @param rli Pointer to relay log info structure + + @retval 0 Event applied successfully + @retval errno Error code if event application failed + */ + virtual int do_apply_event(RELAY_LOG_INFO const *rli) + { + return 0; /* Default implementation does nothing */ + } + + + /** + Advance relay log coordinates. + + This function is called to advance the relay log coordinates to + just after the event. It is essential that both the relay log + coordinate and the group log position is updated correctly, since + this function is used also for skipping events. + + Normally, each implementation of do_update_pos() shall: + + - Update the event position to refer to the position just after + the event. + + - Update the group log position to refer to the position just + after the event <em>if the event is last in a group</em> + + @param rli Pointer to relay log info structure + + @retval 0 Coordinates changed successfully + @retval errno Error code if advancing failed (usually just + 1). Observe that handler errors are returned by the + do_apply_event() function, and not by this one. + */ + virtual int do_update_pos(RELAY_LOG_INFO *rli); + + + /** + Decide if this event shall be skipped or not and the reason for + skipping it. + + The default implementation decide that the event shall be skipped + if either: + + - the server id of the event is the same as the server id of the + server and <code>rli->replicate_same_server_id</code> is true, + or + + - if <code>rli->slave_skip_counter</code> is greater than zero. + + @see do_apply_event + @see do_update_pos + + @retval Log_event::EVENT_SKIP_NOT + The event shall not be skipped and should be applied. + + @retval Log_event::EVENT_SKIP_IGNORE + The event shall be skipped by just ignoring it, i.e., the slave + skip counter shall not be changed. This happends if, for example, + the originating server id of the event is the same as the server + id of the slave. + + @retval Log_event::EVENT_SKIP_COUNT + The event shall be skipped because the slave skip counter was + non-zero. The caller shall decrease the counter by one. + */ + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli); + +#endif }; /* @@ -789,10 +960,10 @@ public: uint16 error_code; ulong thread_id; /* - For events created by Query_log_event::exec_event (and - Load_log_event::exec_event()) we need the *original* thread id, to be able - to log the event with the original (=master's) thread id (fix for - BUG#1686). + For events created by Query_log_event::do_apply_event (and + Load_log_event::do_apply_event()) we need the *original* thread + id, to be able to log the event with the original (=master's) + thread id (fix for BUG#1686). */ ulong slave_proxy_id; @@ -846,6 +1017,7 @@ public: uint time_zone_len; /* 0 means uninited */ const char *time_zone_str; uint lc_time_names_number; /* 0 means en_US */ + uint charset_database_number; #ifndef MYSQL_CLIENT @@ -854,9 +1026,6 @@ public: const char* get_db() { return db; } #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); - int exec_event(struct st_relay_log_info* rli, const char *query_arg, - uint32 q_len_arg); #endif /* HAVE_REPLICATION */ #else void print_query_header(IO_CACHE* file, PRINT_EVENT_INFO* print_event_info); @@ -885,6 +1054,16 @@ public: */ virtual ulong get_post_header_size_for_derived() { return 0; } /* Writes derived event-specific part of post header. */ + +public: /* !!! Public in this patch to allow old usage */ +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); + + int do_apply_event(RELAY_LOG_INFO const *rli, + const char *query_arg, + uint32 q_len_arg); +#endif /* HAVE_REPLICATION */ }; @@ -933,9 +1112,8 @@ public: uint16 master_port; #ifndef MYSQL_CLIENT - Slave_log_event(THD* thd_arg, struct st_relay_log_info* rli); + Slave_log_event(THD* thd_arg, RELAY_LOG_INFO* rli); void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif @@ -948,6 +1126,11 @@ public: #ifndef MYSQL_CLIENT bool write(IO_CACHE* file); #endif + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const* rli); +#endif }; #endif /* HAVE_REPLICATION */ @@ -1017,12 +1200,6 @@ public: const char* get_db() { return db; } #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli) - { - return exec_event(thd->slave_net,rli,0); - } - int exec_event(NET* net, struct st_relay_log_info* rli, - bool use_rli_only_for_errors); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1054,6 +1231,17 @@ public: + LOAD_HEADER_LEN + sql_ex.data_size() + field_block_len + num_fields); } + +public: /* !!! Public in this patch to allow old usage */ +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const* rli) + { + return do_apply_event(thd->slave_net,rli,0); + } + + int do_apply_event(NET *net, RELAY_LOG_INFO const *rli, + bool use_rli_only_for_errors); +#endif }; extern char server_version[SERVER_VERSION_LENGTH]; @@ -1111,7 +1299,6 @@ public: Start_log_event_v3(); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else Start_log_event_v3() {} @@ -1131,6 +1318,22 @@ public: return START_V3_HEADER_LEN; //no variable-sized part } virtual bool is_artificial_event() { return artificial_event; } + +protected: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO*) + { + /* + Events from ourself should be skipped, but they should not + decrease the slave skip counter. + */ + if (this->server_id == ::server_id) + return Log_event::EVENT_SKIP_IGNORE; + else + return Log_event::EVENT_SKIP_NOT; + } +#endif }; @@ -1153,15 +1356,9 @@ public: uint8 number_of_event_types; /* The list of post-headers' lengthes */ uint8 *post_header_len; + uchar server_version_split[3]; Format_description_log_event(uint8 binlog_ver, const char* server_ver=0); - -#ifndef MYSQL_CLIENT -#ifdef HAVE_REPLICATION - int exec_event(struct st_relay_log_info* rli); -#endif /* HAVE_REPLICATION */ -#endif - Format_description_log_event(const char* buf, uint event_len, const Format_description_log_event* description_event); ~Format_description_log_event() { my_free((gptr)post_header_len, MYF(0)); } @@ -1184,6 +1381,15 @@ public: */ return FORMAT_DESCRIPTION_HEADER_LEN; } + + void calc_server_version_split(); + +protected: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli); +#endif }; @@ -1207,7 +1413,6 @@ public: {} #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1222,6 +1427,13 @@ public: bool write(IO_CACHE* file); #endif bool is_valid() const { return 1; } + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli); +#endif }; @@ -1248,7 +1460,6 @@ class Rand_log_event: public Log_event {} #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1262,6 +1473,13 @@ class Rand_log_event: public Log_event bool write(IO_CACHE* file); #endif bool is_valid() const { return 1; } + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli); +#endif }; /***************************************************************************** @@ -1285,7 +1503,6 @@ class Xid_log_event: public Log_event Xid_log_event(THD* thd_arg, my_xid x): Log_event(thd_arg,0,0), xid(x) {} #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1299,6 +1516,11 @@ class Xid_log_event: public Log_event bool write(IO_CACHE* file); #endif bool is_valid() const { return 1; } + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif }; /***************************************************************************** @@ -1328,7 +1550,6 @@ public: val_len(val_len_arg), type(type_arg), charset_number(charset_number_arg) { is_null= !val; } void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif @@ -1340,6 +1561,13 @@ public: bool write(IO_CACHE* file); #endif bool is_valid() const { return 1; } + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli); +#endif }; @@ -1354,7 +1582,6 @@ public: #ifndef MYSQL_CLIENT Stop_log_event() :Log_event() {} - int exec_event(struct st_relay_log_info* rli); #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif @@ -1365,6 +1592,22 @@ public: ~Stop_log_event() {} Log_event_type get_type_code() { return STOP_EVENT;} bool is_valid() const { return 1; } + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_update_pos(RELAY_LOG_INFO *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli) + { + /* + Events from ourself should be skipped, but they should not + decrease the slave skip counter. + */ + if (this->server_id == ::server_id) + return Log_event::EVENT_SKIP_IGNORE; + else + return Log_event::EVENT_SKIP_NOT; + } +#endif }; /***************************************************************************** @@ -1391,7 +1634,6 @@ public: ulonglong pos_arg, uint flags); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1410,6 +1652,12 @@ public: #ifndef MYSQL_CLIENT bool write(IO_CACHE* file); #endif + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_update_pos(RELAY_LOG_INFO *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli); +#endif }; @@ -1444,7 +1692,6 @@ public: bool using_trans); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1478,6 +1725,11 @@ public: */ bool write_base(IO_CACHE* file); #endif + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif }; @@ -1510,7 +1762,6 @@ public: Append_block_log_event(THD* thd, const char* db_arg, char* block_arg, uint block_len_arg, bool using_trans); #ifdef HAVE_REPLICATION - int exec_event(struct st_relay_log_info* rli); void pack_info(Protocol* protocol); virtual int get_create_or_append() const; #endif /* HAVE_REPLICATION */ @@ -1528,6 +1779,11 @@ public: bool write(IO_CACHE* file); const char* get_db() { return db; } #endif + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif }; @@ -1547,7 +1803,6 @@ public: Delete_file_log_event(THD* thd, const char* db_arg, bool using_trans); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1564,6 +1819,11 @@ public: bool write(IO_CACHE* file); const char* get_db() { return db; } #endif + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif }; @@ -1583,7 +1843,6 @@ public: Execute_load_log_event(THD* thd, const char* db_arg, bool using_trans); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1599,6 +1858,11 @@ public: bool write(IO_CACHE* file); const char* get_db() { return db; } #endif + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif }; @@ -1668,7 +1932,6 @@ public: bool using_trans, bool suppress_use); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1687,7 +1950,12 @@ public: #ifndef MYSQL_CLIENT bool write_post_header_for_derived(IO_CACHE* file); #endif - }; + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif +}; #ifdef MYSQL_CLIENT @@ -1727,14 +1995,17 @@ public: TYPE_CODE = TABLE_MAP_EVENT }; + /** + Enumeration of the errors that can be returned. + */ enum enum_error { - ERR_OPEN_FAILURE = -1, /* Failure to open table */ - ERR_OK = 0, /* No error */ - ERR_TABLE_LIMIT_EXCEEDED = 1, /* No more room for tables */ - ERR_OUT_OF_MEM = 2, /* Out of memory */ - ERR_BAD_TABLE_DEF = 3, /* Table definition does not match */ - ERR_RBR_TO_SBR = 4 /* daisy-chanining RBR to SBR not allowed */ + ERR_OPEN_FAILURE = -1, /**< Failure to open table */ + ERR_OK = 0, /**< No error */ + ERR_TABLE_LIMIT_EXCEEDED = 1, /**< No more room for tables */ + ERR_OUT_OF_MEM = 2, /**< Out of memory */ + ERR_BAD_TABLE_DEF = 3, /**< Table definition does not match */ + ERR_RBR_TO_SBR = 4 /**< daisy-chanining RBR to SBR not allowed */ }; enum enum_flag @@ -1782,7 +2053,6 @@ public: #endif #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) - virtual int exec_event(struct st_relay_log_info *rli); virtual void pack_info(Protocol *protocol); #endif @@ -1792,6 +2062,11 @@ public: private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); +#endif + #ifndef MYSQL_CLIENT TABLE *m_table; #endif @@ -1814,7 +2089,7 @@ private: Row level log event class. - Common base class for all row-level log events. + Common base class for all row-containing log events. RESPONSIBILITIES @@ -1828,6 +2103,19 @@ private: class Rows_log_event : public Log_event { public: + /** + Enumeration of the errors that can be returned. + */ + enum enum_error + { + ERR_OPEN_FAILURE = -1, /**< Failure to open table */ + ERR_OK = 0, /**< No error */ + ERR_TABLE_LIMIT_EXCEEDED = 1, /**< No more room for tables */ + ERR_OUT_OF_MEM = 2, /**< Out of memory */ + ERR_BAD_TABLE_DEF = 3, /**< Table definition does not match */ + ERR_RBR_TO_SBR = 4 /**< daisy-chanining RBR to SBR not allowed */ + }; + /* These definitions allow you to combine the flags into an appropriate flag set using the normal bitwise operators. The @@ -1835,7 +2123,6 @@ public: accepted by the compiler, which is then used to set the real set of flags. */ - enum enum_flag { /* Last event of a statement */ @@ -1863,7 +2150,6 @@ public: flag_set get_flags(flag_set flags) const { return m_flags & flags; } #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) - virtual int exec_event(struct st_relay_log_info *rli); virtual void pack_info(Protocol *protocol); #endif @@ -1880,14 +2166,7 @@ public: #endif /* Member functions to implement superclass interface */ - virtual int get_data_size() - { - DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master", - return 6 + 1 + no_bytes_in_map(&m_cols) + - (m_rows_cur - m_rows_buf);); - return ROWS_HEADER_LEN + 1 + no_bytes_in_map(&m_cols) + - (m_rows_cur - m_rows_buf); - } + virtual int get_data_size(); MY_BITMAP const *get_cols() const { return &m_cols; } my_size_t get_width() const { return m_width; } @@ -1898,9 +2177,14 @@ public: virtual bool write_data_body(IO_CACHE *file); virtual const char *get_db() { return m_table->s->db.str; } #endif + /* + Check that malloc() succeeded in allocating memory for the rows + buffer and the COLS vector. Checking that an Update_rows_log_event + is valid is done in the Update_rows_log_event::is_valid() + function. + */ virtual bool is_valid() const { - /* that's how we check malloc() succeeded */ return m_rows_buf && m_cols.bitmap; } @@ -1933,10 +2217,20 @@ protected: ulong m_table_id; /* Table ID */ MY_BITMAP m_cols; /* Bitmap denoting columns available */ ulong m_width; /* The width of the columns bitmap */ + /* + Bitmap for columns available in the after image, if present. These + fields are only available for Update_rows events. Observe that the + width of both the before image COLS vector and the after image + COLS vector is the same: the number of columns of the table on the + master. + */ + MY_BITMAP m_cols_ai; + ulong m_master_reclength; /* Length of record on master side */ - /* Bit buffer in the same memory as the class */ + /* Bit buffers in the same memory as the class */ uint32 m_bitbuf[128/(sizeof(uint32)*8)]; + uint32 m_bitbuf_ai[128/(sizeof(uint32)*8)]; byte *m_rows_buf; /* The rows in packed format */ byte *m_rows_cur; /* One-after the end of the data */ @@ -1947,6 +2241,8 @@ protected: private: #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + /* Primitive to prepare for a sequence of row executions. @@ -1994,7 +2290,7 @@ private: RETURN VALUE Error code, if something went wrong, 0 otherwise. */ - virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, char const *row_start, char const **row_end) = 0; /* @@ -2065,7 +2361,7 @@ private: virtual int do_before_row_operations(TABLE *table); virtual int do_after_row_operations(TABLE *table, int error); - virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, char const *row_start, char const **row_end); virtual int do_exec_row(TABLE *table); #endif @@ -2094,10 +2390,20 @@ public: }; #ifndef MYSQL_CLIENT - Update_rows_log_event(THD*, TABLE*, ulong table_id, - MY_BITMAP const *cols, bool is_transactional); + Update_rows_log_event(THD*, TABLE*, ulong table_id, + MY_BITMAP const *cols_bi, + MY_BITMAP const *cols_ai, + bool is_transactional); + + Update_rows_log_event(THD*, TABLE*, ulong table_id, + MY_BITMAP const *cols, + bool is_transactional); + + void init(MY_BITMAP const *cols); #endif + virtual ~Update_rows_log_event(); + #ifdef HAVE_REPLICATION Update_rows_log_event(const char *buf, uint event_len, const Format_description_log_event *description_event); @@ -2116,6 +2422,11 @@ public: } #endif + virtual bool is_valid() const + { + return Rows_log_event::is_valid() && m_cols_ai.bitmap; + } + private: virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; } @@ -2130,7 +2441,7 @@ private: virtual int do_before_row_operations(TABLE *table); virtual int do_after_row_operations(TABLE *table, int error); - virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, char const *row_start, char const **row_end); virtual int do_exec_row(TABLE *table); #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ @@ -2201,10 +2512,108 @@ private: virtual int do_before_row_operations(TABLE *table); virtual int do_after_row_operations(TABLE *table, int error); - virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, char const *row_start, char const **row_end); virtual int do_exec_row(TABLE *table); #endif }; + +/** + Class representing an incident, an occurance out of the ordinary, + that happened on the master. + + The event is used to inform the slave that something out of the + ordinary happened on the master that might cause the database to be + in an inconsistent state. + + <table id="IncidentFormat"> + <caption>Incident event format</caption> + <tr> + <th>Symbol</th> + <th>Size<br/>(bytes)</th> + <th>Description</th> + </tr> + <tr> + <td>INCIDENT</td> + <td align="right">2</td> + <td>Incident number as an unsigned integer</td> + </tr> + <tr> + <td>MSGLEN</td> + <td align="right">1</td> + <td>Message length as an unsigned integer</td> + </tr> + <tr> + <td>MESSAGE</td> + <td align="right">MSGLEN</td> + <td>The message, if present. Not null terminated.</td> + </tr> + </table> + */ +class Incident_log_event : public Log_event { +public: +#ifndef MYSQL_CLIENT + Incident_log_event(THD *thd_arg, Incident incident) + : Log_event(thd_arg, 0, FALSE), m_incident(incident) + { + DBUG_ENTER("Incident_log_event::Incident_log_event"); + DBUG_PRINT("enter", ("m_incident: %d", m_incident)); + m_message.str= NULL; /* Just as a precaution */ + m_message.length= 0; + DBUG_VOID_RETURN; + } + + Incident_log_event(THD *thd_arg, Incident incident, LEX_STRING const msg) + : Log_event(thd_arg, 0, FALSE), m_incident(incident) + { + DBUG_ENTER("Incident_log_event::Incident_log_event"); + DBUG_PRINT("enter", ("m_incident: %d", m_incident)); + m_message= msg; + DBUG_VOID_RETURN; + } +#endif + +#ifndef MYSQL_CLIENT + void pack_info(Protocol*); +#endif + + Incident_log_event(const char *buf, uint event_len, + const Format_description_log_event *descr_event); + + virtual ~Incident_log_event(); + +#ifdef MYSQL_CLIENT + virtual void print(FILE *file, PRINT_EVENT_INFO *print_event_info); +#endif + +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif + + virtual bool write_data_header(IO_CACHE *file); + virtual bool write_data_body(IO_CACHE *file); + + virtual Log_event_type get_type_code() { return INCIDENT_EVENT; } + + virtual bool is_valid() const { return 1; } + virtual int get_data_size() { + return INCIDENT_HEADER_LEN + 1 + m_message.length; + } + +private: + const char *description() const; + + Incident m_incident; + LEX_STRING m_message; +}; + +static inline bool copy_event_cache_to_file_and_reinit(IO_CACHE *cache, + FILE *file) +{ + return + my_b_copy_to_file(cache, file) || + reinit_io_cache(cache, WRITE_CACHE, 0, FALSE, TRUE); +} + #endif /* _log_event_h */ diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index d9026af7b99..a481ac5c18c 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -38,6 +38,7 @@ #include <queues.h> #include "sql_bitmap.h" #include "sql_array.h" +#include "scheduler.h" /* TODO convert all these three maps to Bitmap classes */ typedef ulonglong table_map; /* Used for table bits in join */ @@ -46,7 +47,6 @@ typedef Bitmap<64> key_map; /* Used for finding keys */ #else typedef Bitmap<((MAX_INDEXES+7)/8*8)> key_map; /* Used for finding keys */ #endif -typedef ulong key_part_map; /* Used for finding key parts */ typedef ulong nesting_map; /* Used for flags of nesting constructs */ /* Used to identify NESTED_JOIN structures within a join (applicable only to @@ -95,15 +95,18 @@ void net_set_read_timeout(NET *net, uint timeout); #define PREV_BITS(type,A) ((type) (((type) 1 << (A)) -1)) #define all_bits_set(A,B) ((A) & (B) != (B)) -#define WARN_DEPRECATED(Thd,Ver,Old,New) \ - do { \ - DBUG_ASSERT(strncmp(Ver, MYSQL_SERVER_VERSION, sizeof(Ver)-1) >= 0); \ - push_warning_printf(((THD *)Thd), MYSQL_ERROR::WARN_LEVEL_WARN, \ - ER_WARN_DEPRECATED_SYNTAX, ER(ER_WARN_DEPRECATED_SYNTAX), \ - (Old), (Ver), (New)); \ +#define WARN_DEPRECATED(Thd,Ver,Old,New) \ + do { \ + DBUG_ASSERT(strncmp(Ver, MYSQL_SERVER_VERSION, sizeof(Ver)-1) > 0); \ + if (((gptr)Thd) != NULL) \ + push_warning_printf(((THD *)Thd), MYSQL_ERROR::WARN_LEVEL_WARN, \ + ER_WARN_DEPRECATED_SYNTAX, ER(ER_WARN_DEPRECATED_SYNTAX), \ + (Old), (Ver), (New)); \ + else \ + sql_print_warning("The syntax %s is deprecated and will be removed " \ + "in MySQL %s. Please use %s instead.", (Old), (Ver), (New)); \ } while(0) - extern CHARSET_INFO *system_charset_info, *files_charset_info ; extern CHARSET_INFO *national_charset_info, *table_alias_charset; @@ -282,7 +285,6 @@ MY_LOCALE *my_locale_by_number(uint number); #define TEST_MIT_THREAD 4 #define TEST_BLOCKING 8 #define TEST_KEEP_TMP_TABLES 16 -#define TEST_NO_THREADS 32 /* For debugging under Linux */ #define TEST_READCHECK 64 /* Force use of readcheck */ #define TEST_NO_EXTRA 128 #define TEST_CORE_ON_SIGNAL 256 /* Give core if signal */ @@ -310,54 +312,54 @@ MY_LOCALE *my_locale_by_number(uint number); TODO: separate three contexts above, move them to separate bitfields. */ -#define SELECT_DISTINCT (1ULL << 0) // SELECT, user -#define SELECT_STRAIGHT_JOIN (1ULL << 1) // SELECT, user -#define SELECT_DESCRIBE (1ULL << 2) // SELECT, user -#define SELECT_SMALL_RESULT (1ULL << 3) // SELECT, user -#define SELECT_BIG_RESULT (1ULL << 4) // SELECT, user -#define OPTION_FOUND_ROWS (1ULL << 5) // SELECT, user -#define OPTION_TO_QUERY_CACHE (1ULL << 6) // SELECT, user -#define SELECT_NO_JOIN_CACHE (1ULL << 7) // intern -#define OPTION_BIG_TABLES (1ULL << 8) // THD, user -#define OPTION_BIG_SELECTS (1ULL << 9) // THD, user -#define OPTION_LOG_OFF (1ULL << 10) // THD, user -#define OPTION_QUOTE_SHOW_CREATE (1ULL << 11) // THD, user, unused -#define TMP_TABLE_ALL_COLUMNS (1ULL << 12) // SELECT, intern -#define OPTION_WARNINGS (1ULL << 13) // THD, user -#define OPTION_AUTO_IS_NULL (1ULL << 14) // THD, user, binlog -#define OPTION_FOUND_COMMENT (1ULL << 15) // SELECT, intern, parser -#define OPTION_SAFE_UPDATES (1ULL << 16) // THD, user -#define OPTION_BUFFER_RESULT (1ULL << 17) // SELECT, user -#define OPTION_BIN_LOG (1ULL << 18) // THD, user -#define OPTION_NOT_AUTOCOMMIT (1ULL << 19) // THD, user -#define OPTION_BEGIN (1ULL << 20) // THD, intern -#define OPTION_TABLE_LOCK (1ULL << 21) // THD, intern -#define OPTION_QUICK (1ULL << 22) // SELECT (for DELETE) -#define OPTION_KEEP_LOG (1ULL << 23) // THD, user +#define SELECT_DISTINCT (ULL(1) << 0) // SELECT, user +#define SELECT_STRAIGHT_JOIN (ULL(1) << 1) // SELECT, user +#define SELECT_DESCRIBE (ULL(1) << 2) // SELECT, user +#define SELECT_SMALL_RESULT (ULL(1) << 3) // SELECT, user +#define SELECT_BIG_RESULT (ULL(1) << 4) // SELECT, user +#define OPTION_FOUND_ROWS (ULL(1) << 5) // SELECT, user +#define OPTION_TO_QUERY_CACHE (ULL(1) << 6) // SELECT, user +#define SELECT_NO_JOIN_CACHE (ULL(1) << 7) // intern +#define OPTION_BIG_TABLES (ULL(1) << 8) // THD, user +#define OPTION_BIG_SELECTS (ULL(1) << 9) // THD, user +#define OPTION_LOG_OFF (ULL(1) << 10) // THD, user +#define OPTION_QUOTE_SHOW_CREATE (ULL(1) << 11) // THD, user, unused +#define TMP_TABLE_ALL_COLUMNS (ULL(1) << 12) // SELECT, intern +#define OPTION_WARNINGS (ULL(1) << 13) // THD, user +#define OPTION_AUTO_IS_NULL (ULL(1) << 14) // THD, user, binlog +#define OPTION_FOUND_COMMENT (ULL(1) << 15) // SELECT, intern, parser +#define OPTION_SAFE_UPDATES (ULL(1) << 16) // THD, user +#define OPTION_BUFFER_RESULT (ULL(1) << 17) // SELECT, user +#define OPTION_BIN_LOG (ULL(1) << 18) // THD, user +#define OPTION_NOT_AUTOCOMMIT (ULL(1) << 19) // THD, user +#define OPTION_BEGIN (ULL(1) << 20) // THD, intern +#define OPTION_TABLE_LOCK (ULL(1) << 21) // THD, intern +#define OPTION_QUICK (ULL(1) << 22) // SELECT (for DELETE) +#define OPTION_KEEP_LOG (ULL(1) << 23) // THD, user /* The following is used to detect a conflict with DISTINCT */ -#define SELECT_ALL (1ULL << 24) // SELECT, user, parser +#define SELECT_ALL (ULL(1) << 24) // SELECT, user, parser /* Set if we are updating a non-transaction safe table */ -#define OPTION_STATUS_NO_TRANS_UPDATE (1ULL << 25) // THD, intern +#define OPTION_STATUS_NO_TRANS_UPDATE (ULL(1) << 25) // THD, intern /* The following can be set when importing tables in a 'wrong order' to suppress foreign key checks */ -#define OPTION_NO_FOREIGN_KEY_CHECKS (1ULL << 26) // THD, user, binlog +#define OPTION_NO_FOREIGN_KEY_CHECKS (ULL(1) << 26) // THD, user, binlog /* The following speeds up inserts to InnoDB tables by suppressing unique key checks in some cases */ -#define OPTION_RELAXED_UNIQUE_CHECKS (1ULL << 27) // THD, user, binlog -#define SELECT_NO_UNLOCK (1ULL << 28) // SELECT, intern -#define OPTION_SCHEMA_TABLE (1ULL << 29) // SELECT, intern +#define OPTION_RELAXED_UNIQUE_CHECKS (ULL(1) << 27) // THD, user, binlog +#define SELECT_NO_UNLOCK (ULL(1) << 28) // SELECT, intern +#define OPTION_SCHEMA_TABLE (ULL(1) << 29) // SELECT, intern /* Flag set if setup_tables already done */ -#define OPTION_SETUP_TABLES_DONE (1ULL << 30) // intern +#define OPTION_SETUP_TABLES_DONE (ULL(1) << 30) // intern /* If not set then the thread will ignore all warnings with level notes. */ -#define OPTION_SQL_NOTES (1ULL << 31) // THD, user +#define OPTION_SQL_NOTES (ULL(1) << 31) // THD, user /* Force the used temporary table to be a MyISAM table (because we will use fulltext functions when reading from it. */ -#define TMP_TABLE_FORCE_MYISAM (1ULL << 32) +#define TMP_TABLE_FORCE_MYISAM (ULL(1) << 32) /* @@ -408,8 +410,9 @@ MY_LOCALE *my_locale_by_number(uint number); updated (to store more bytes on disk). NOTE: When adding new SQL_MODE types, make sure to also add them to - ../scripts/mysql_create_system_tables.sh and - ../scripts/mysql_fix_privilege_tables.sql + the scripts used for creating the MySQL system tables + in scripts/mysql_system_tables.sql and scripts/mysql_system_tables_fix.sql + */ #define RAID_BLOCK_SIZE 1024 @@ -644,6 +647,7 @@ struct Query_cache_query_flags { unsigned int client_long_flag:1; unsigned int client_protocol_41:1; + unsigned int result_in_binary_protocol:1; unsigned int more_results_exists:1; unsigned int pkt_nr; uint character_set_client_num; @@ -670,6 +674,11 @@ struct Query_cache_query_flags query_cache.send_result_to_client(A, B, C) #define query_cache_invalidate_by_MyISAM_filename_ref \ &query_cache_invalidate_by_MyISAM_filename +/* note the "maybe": it's a read without mutex */ +#define query_cache_maybe_disabled(T) \ + (T->variables.query_cache_type == 0 || query_cache.query_cache_size == 0) +#define query_cache_is_cacheable_query(L) \ + (((L)->sql_command == SQLCOM_SELECT) && (L)->safe_to_cache_query) #else #define QUERY_CACHE_FLAGS_SIZE 0 #define query_cache_store_query(A, B) @@ -686,6 +695,8 @@ struct Query_cache_query_flags #define query_cache_abort(A) #define query_cache_end_of_result(A) #define query_cache_invalidate_by_MyISAM_filename_ref NULL +#define query_cache_maybe_disabled(T) 1 +#define query_cache_is_cacheable_query(L) 0 #endif /*HAVE_QUERY_CACHE*/ /* @@ -787,6 +798,23 @@ uint build_table_path(char *buff, size_t bufflen, const char *db, void write_bin_log(THD *thd, bool clear_error, char const *query, ulong query_length); +/* sql_connect.cc */ +int check_user(THD *thd, enum enum_server_command command, + const char *passwd, uint passwd_len, const char *db, + bool check_count); +pthread_handler_t handle_one_connection(void *arg); +bool init_new_connection_handler_thread(); +void reset_mqh(LEX_USER *lu, bool get_them); +bool check_mqh(THD *thd, uint check_command); +void time_out_user_resource_limits(THD *thd, USER_CONN *uc); +int check_for_max_user_connections(THD *thd, USER_CONN *uc); +void decrease_user_connections(USER_CONN *uc); +void thd_init_client_charset(THD *thd, uint cs_number); +bool setup_connection_thread_globals(THD *thd); +bool login_connection(THD *thd); +void prepare_new_connection_state(THD* thd); +void end_connection(THD *thd); + bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create, bool silent); bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create); bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent); @@ -814,7 +842,7 @@ bool is_update_query(enum enum_sql_command command); bool alloc_query(THD *thd, const char *packet, uint packet_length); void mysql_init_select(LEX *lex); void mysql_reset_thd_for_next_command(THD *thd); -void mysql_init_query(THD *thd, uchar *buf, uint length); +void mysql_init_query(THD *thd, const char *buf, uint length); bool mysql_new_select(LEX *lex, bool move_down); void create_select_for_variable(const char *var_name); void mysql_init_multi_delete(LEX *lex); @@ -822,16 +850,14 @@ bool multi_delete_set_locks_and_link_aux_tables(LEX *lex); void init_max_user_conn(void); void init_update_queries(void); void free_max_user_conn(void); -pthread_handler_t handle_one_connection(void *arg); pthread_handler_t handle_bootstrap(void *arg); -void end_thread(THD *thd,bool put_in_cache); -void flush_thread_cache(); bool mysql_execute_command(THD *thd); bool do_command(THD *thd); bool dispatch_command(enum enum_server_command command, THD *thd, char* packet, uint packet_length); void log_slow_statement(THD *thd); bool check_dup(const char *db, const char *name, TABLE_LIST *tables); +bool compare_record(TABLE *table); bool append_file_to_dir(THD *thd, const char **filename_ptr, const char *table_name); @@ -900,6 +926,8 @@ int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, int setup_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, List<Item> &fields, List<Item> &all_fields, ORDER *order, bool *hidden_group_fields); +bool fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select, + Item **ref_pointer_array); bool handle_select(THD *thd, LEX *lex, select_result *result, ulong setup_tables_done_option); @@ -971,7 +999,8 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE *table, List<Item> &fields, List_item *values, List<Item> &update_fields, List<Item> &update_values, enum_duplicates duplic, - COND **where, bool select_insert); + COND **where, bool select_insert, + bool check_fields, bool abort_on_warning); bool mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields, List<List_item> &values, List<Item> &update_fields, List<Item> &update_values, enum_duplicates flag, @@ -1101,7 +1130,8 @@ int fill_schema_user_privileges(THD *thd, TABLE_LIST *tables, COND *cond); int fill_schema_schema_privileges(THD *thd, TABLE_LIST *tables, COND *cond); int fill_schema_table_privileges(THD *thd, TABLE_LIST *tables, COND *cond); int fill_schema_column_privileges(THD *thd, TABLE_LIST *tables, COND *cond); -bool get_schema_tables_result(JOIN *join); +bool get_schema_tables_result(JOIN *join, + enum enum_schema_table_state executed_place); #define is_schema_db(X) \ !my_strcasecmp(system_charset_info, information_schema_name.str, (X)) @@ -1167,9 +1197,29 @@ SQL_SELECT *make_select(TABLE *head, table_map const_tables, table_map read_tables, COND *conds, bool allow_null_cond, int *error); extern Item **not_found_item; + +/* + This enumeration type is used only by the function find_item_in_list + to return the info on how an item has been resolved against a list + of possibly aliased items. + The item can be resolved: + - against an alias name of the list's element (RESOLVED_AGAINST_ALIAS) + - against non-aliased field name of the list (RESOLVED_WITH_NO_ALIAS) + - against an aliased field name of the list (RESOLVED_BEHIND_ALIAS) + - ignoring the alias name in cases when SQL requires to ignore aliases + (e.g. when the resolved field reference contains a table name or + when the resolved item is an expression) (RESOLVED_IGNORING_ALIAS) +*/ +enum enum_resolution_type { + NOT_RESOLVED=0, + RESOLVED_IGNORING_ALIAS, + RESOLVED_BEHIND_ALIAS, + RESOLVED_WITH_NO_ALIAS, + RESOLVED_AGAINST_ALIAS +}; Item ** find_item_in_list(Item *item, List<Item> &items, uint *counter, find_item_error_report_type report_error, - bool *unaliased); + enum_resolution_type *resolution); bool get_key_map_from_key_list(key_map *map, TABLE *table, List<String> *index_list); bool insert_fields(THD *thd, Name_resolution_context *context, @@ -1227,7 +1277,8 @@ TABLE_LIST *find_table_in_list(TABLE_LIST *table, st_table_list *TABLE_LIST::*link, const char *db_name, const char *table_name); -TABLE_LIST *unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list); +TABLE_LIST *unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list, + bool check_alias); TABLE *find_temporary_table(THD *thd, const char *db, const char *table_name); TABLE *find_temporary_table(THD *thd, TABLE_LIST *table_list); bool close_temporary_table(THD *thd, TABLE_LIST *table_list); @@ -1387,7 +1438,16 @@ int abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt); void close_open_tables_and_downgrade(ALTER_PARTITION_PARAM_TYPE *lpt); void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table); +/* Functions to work with system tables. */ +bool open_system_tables_for_read(THD *thd, TABLE_LIST *table_list, + Open_tables_state *backup); +void close_system_tables(THD *thd, Open_tables_state *backup); +TABLE *open_system_table_for_update(THD *thd, TABLE_LIST *one_table); + bool close_cached_tables(THD *thd, bool wait_for_refresh, TABLE_LIST *tables, bool have_lock = FALSE); +bool close_cached_connection_tables(THD *thd, bool wait_for_refresh, + LEX_STRING *connect_string, + bool have_lock = FALSE); void copy_field_from_tmp_record(Field *field,int offset); bool fill_record(THD *thd, Field **field, List<Item> &values, bool ignore_errors); @@ -1450,7 +1510,7 @@ void print_plan(JOIN* join,uint idx, double record_count, double read_time, void mysql_print_status(); /* key.cc */ int find_ref_key(KEY *key, uint key_count, byte *record, Field *field, - uint *key_length); + uint *key_length, uint *keypart); void key_copy(byte *to_key, byte *from_record, KEY *key_info, uint key_length); void key_restore(byte *to_record, byte *from_key, KEY *key_info, uint key_length); @@ -1493,6 +1553,11 @@ File open_binlog(IO_CACHE *log, const char *log_file_name, extern void MYSQLerror(const char*); void refresh_status(THD *thd); my_bool mysql_rm_tmp_tables(void); +void handle_connection_in_main_thread(THD *thd); +void create_thread_to_handle_connection(THD *thd); +void unlink_thd(THD *thd); +bool one_thread_per_connection_end(THD *thd, bool put_in_cache); +void flush_thread_cache(); /* item_func.cc */ extern bool check_reserved_words(LEX_STRING *name); @@ -1500,8 +1565,10 @@ extern bool check_reserved_words(LEX_STRING *name); /* strfunc.cc */ ulonglong find_set(TYPELIB *lib, const char *x, uint length, CHARSET_INFO *cs, char **err_pos, uint *err_len, bool *set_warning); -uint find_type(TYPELIB *lib, const char *find, uint length, bool part_match); -uint find_type2(TYPELIB *lib, const char *find, uint length, CHARSET_INFO *cs); +uint find_type(const TYPELIB *lib, const char *find, uint length, + bool part_match); +uint find_type2(const TYPELIB *lib, const char *find, uint length, + CHARSET_INFO *cs); void unhex_type2(TYPELIB *lib); uint check_word(TYPELIB *lib, const char *val, const char *end, const char **end_of_word); @@ -1556,7 +1623,7 @@ extern double log_01[32]; extern ulonglong log_10_int[20]; extern ulonglong keybuff_size; extern ulonglong thd_startup_options; -extern ulong refresh_version,flush_version, thread_id; +extern ulong refresh_version, thread_id; extern ulong binlog_cache_use, binlog_cache_disk_use; extern ulong aborted_threads,aborted_connects; extern ulong delayed_insert_timeout; @@ -1576,7 +1643,7 @@ extern ulong max_prepared_stmt_count, prepared_stmt_count; extern ulong binlog_cache_size, max_binlog_cache_size, open_files_limit; extern ulong max_binlog_size, max_relay_log_size; extern ulong opt_binlog_rows_event_max_size; -extern ulong rpl_recovery_rank, thread_cache_size; +extern ulong rpl_recovery_rank, thread_cache_size, thread_pool_size; extern ulong back_log; extern ulong specialflag, current_pid; extern ulong expire_logs_days, sync_binlog_period, sync_binlog_counter; @@ -1604,6 +1671,7 @@ extern my_bool opt_slave_compressed_protocol, use_temp_pool; extern my_bool opt_readonly, lower_case_file_system; extern my_bool opt_enable_named_pipe, opt_sync_frm, opt_allow_suspicious_udfs; extern my_bool opt_secure_auth; +extern char* opt_secure_file_priv; extern my_bool opt_log_slow_admin_statements; extern my_bool sp_automatic_privileges, opt_noacl; extern my_bool opt_old_style_user_limits, trust_function_creators; @@ -1661,6 +1729,9 @@ extern TABLE *unused_tables; extern const char* any_db; extern struct my_option my_long_options[]; extern const LEX_STRING view_type; +extern scheduler_functions thread_scheduler; +extern TYPELIB thread_handling_typelib; +extern uint8 uc_update_queries[SQLCOM_END+1]; extern uint sql_command_flags[]; extern TYPELIB log_output_typelib; @@ -1675,7 +1746,7 @@ extern handlerton *partition_hton; extern handlerton *myisam_hton; extern handlerton *heap_hton; -extern SHOW_COMP_OPTION have_openssl, have_symlink, have_dlopen; +extern SHOW_COMP_OPTION have_ssl, have_symlink, have_dlopen; extern SHOW_COMP_OPTION have_query_cache; extern SHOW_COMP_OPTION have_geometry, have_rtree_keys; extern SHOW_COMP_OPTION have_crypt; @@ -1970,7 +2041,6 @@ inline void setup_table_map(TABLE *table, TABLE_LIST *table_list, uint tablenr) table->const_table= 0; table->null_row= 0; table->status= STATUS_NO_RECORD; - table->keys_in_use_for_query= table->s->keys_in_use; table->maybe_null= table_list->outer_join; TABLE_LIST *embedding= table_list->embedding; while (!table->maybe_null && embedding) @@ -1981,6 +2051,8 @@ inline void setup_table_map(TABLE *table, TABLE_LIST *table_list, uint tablenr) table->tablenr= tablenr; table->map= (table_map) 1 << tablenr; table->force_index= table_list->force_index; + table->covering_keys= table->s->keys_for_keyread; + table->merge_keys.clear_all(); } diff --git a/sql/mysqld.cc b/sql/mysqld.cc index d9fab73a23c..9bdf117f8f7 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -59,10 +59,6 @@ #define mysqld_charset &my_charset_latin1 -#ifndef DBUG_OFF -#define ONE_THREAD -#endif - #ifdef HAVE_purify #define IF_PURIFY(A,B) (A) #else @@ -199,12 +195,6 @@ inline void reset_floating_point_exceptions() } /* cplusplus */ - -#if defined(HAVE_LINUXTHREADS) -#define THR_KILL_SIGNAL SIGINT -#else -#define THR_KILL_SIGNAL SIGUSR2 // Can't use this with LinuxThreads -#endif #define MYSQL_KILL_SIGNAL SIGTERM #ifdef HAVE_GLIBC2_STYLE_GETHOSTBYNAME_R @@ -282,6 +272,16 @@ static TYPELIB tc_heuristic_recover_typelib= array_elements(tc_heuristic_recover_names)-1,"", tc_heuristic_recover_names, NULL }; + +static const char *thread_handling_names[]= +{ "one-thread-per-connection", "no-threads", "pool-of-threads", NullS}; + +TYPELIB thread_handling_typelib= +{ + array_elements(thread_handling_names) - 1, "", + thread_handling_names, NULL +}; + const char *first_keyword= "first", *binary_keyword= "BINARY"; const char *my_localhost= "localhost", *delayed_user= "DELAYED"; #if SIZEOF_OFF_T > 4 && defined(BIG_TABLES) @@ -361,6 +361,7 @@ my_bool opt_safe_user_create = 0, opt_no_mix_types = 0; my_bool opt_show_slave_auth_info, opt_sql_bin_update = 0; my_bool opt_log_slave_updates= 0; my_bool opt_innodb; +bool slave_warning_issued = false; /* Legacy global handlerton. These will be removed (please do not add more). @@ -393,6 +394,7 @@ extern my_bool innobase_log_archive, innobase_use_native_aio, innobase_file_per_table, innobase_locks_unsafe_for_binlog, innobase_rollback_on_timeout, + innobase_stats_on_metadata, innobase_create_status_file; extern "C" { extern ulong srv_max_buf_pool_modified_pct; @@ -430,6 +432,7 @@ extern enum ndb_distribution opt_ndb_distribution_id; my_bool opt_readonly, use_temp_pool, relay_log_purge; my_bool opt_sync_frm, opt_allow_suspicious_udfs; my_bool opt_secure_auth= 0; +char* opt_secure_file_priv= 0; my_bool opt_log_slow_admin_statements= 0; my_bool lower_case_file_system= 0; my_bool opt_large_pages= 0; @@ -448,9 +451,10 @@ my_bool sp_automatic_privileges= 1; ulong opt_binlog_rows_event_max_size; const char *binlog_format_names[]= {"STATEMENT", "ROW", "MIXED", NullS}; TYPELIB binlog_format_typelib= - { array_elements(binlog_format_names)-1,"", + { array_elements(binlog_format_names) - 1, "", binlog_format_names, NULL }; - +ulong opt_binlog_format_id= (ulong) BINLOG_FORMAT_UNSPEC; +const char *opt_binlog_format= binlog_format_names[opt_binlog_format_id]; #ifdef HAVE_INITGROUPS static bool calling_initgroups= FALSE; /* Used in SIGSEGV handler. */ #endif @@ -467,9 +471,10 @@ ulong thread_stack, what_to_log; ulong query_buff_size, slow_launch_time, slave_open_temp_tables; ulong open_files_limit, max_binlog_size, max_relay_log_size; ulong slave_net_timeout, slave_trans_retries; -ulong thread_cache_size=0, binlog_cache_size=0, max_binlog_cache_size=0; +ulong thread_cache_size=0, thread_pool_size= 0; +ulong binlog_cache_size=0, max_binlog_cache_size=0; ulong query_cache_size=0; -ulong refresh_version, flush_version; /* Increments on each reload */ +ulong refresh_version; /* Increments on each reload */ query_id_t global_query_id; ulong aborted_threads, aborted_connects; ulong delayed_insert_timeout, delayed_insert_limit, delayed_queue_size; @@ -560,7 +565,7 @@ CHARSET_INFO *system_charset_info, *files_charset_info ; CHARSET_INFO *national_charset_info, *table_alias_charset; CHARSET_INFO *character_set_filesystem; -SHOW_COMP_OPTION have_openssl, have_symlink, have_dlopen, have_query_cache; +SHOW_COMP_OPTION have_ssl, have_symlink, have_dlopen, have_query_cache; SHOW_COMP_OPTION have_geometry, have_rtree_keys; SHOW_COMP_OPTION have_crypt, have_compress; @@ -627,6 +632,7 @@ struct rand_struct sql_rand; // used by sql_class.cc:THD::THD() #ifndef EMBEDDED_LIBRARY struct passwd *user_info; static pthread_t select_thread; +static uint thr_kill_signal; #endif /* OS specific variables */ @@ -681,6 +687,8 @@ my_bool opt_enable_shared_memory; HANDLE smem_event_connect_request= 0; #endif +scheduler_functions thread_scheduler; + #define SSL_VARS_NOT_STATIC #include "sslopt-vars.h" #ifdef HAVE_OPENSSL @@ -775,7 +783,7 @@ static void close_connections(void) DBUG_PRINT("info",("Waiting for select thread")); #ifndef DONT_USE_THR_ALARM - if (pthread_kill(select_thread,THR_CLIENT_ALARM)) + if (pthread_kill(select_thread, thr_client_alarm)) break; // allready dead #endif set_timespec(abstime, 2); @@ -861,6 +869,7 @@ static void close_connections(void) continue; tmp->killed= THD::KILL_CONNECTION; + thread_scheduler.post_kill_notification(tmp); if (tmp->mysys_var) { tmp->mysys_var->abort=1; @@ -1233,6 +1242,7 @@ void clean_up(bool print_message) #endif x_free(opt_bin_logname); x_free(opt_relay_logname); + x_free(opt_secure_file_priv); bitmap_free(&temp_pool); free_max_user_conn(); #ifdef HAVE_REPLICATION @@ -1254,6 +1264,7 @@ void clean_up(bool print_message) if (!opt_bootstrap) (void) my_delete(pidfile_name,MYF(0)); // This may not always exist #endif + thread_scheduler.end(); finish_client_errs(); my_free((gptr) my_error_unregister(ER_ERROR_FIRST, ER_ERROR_LAST), MYF(MY_WME | MY_FAE | MY_ALLOW_ZERO_PTR)); @@ -1513,6 +1524,9 @@ static void network_init(void) DBUG_ENTER("network_init"); LINT_INIT(ret); + if (thread_scheduler.init()) + unireg_abort(1); /* purecov: inspected */ + set_ports(); if (mysqld_port != 0 && !opt_disable_networking && !opt_bootstrap) @@ -1670,18 +1684,6 @@ static void network_init(void) #endif /*!EMBEDDED_LIBRARY*/ -void MYSQLerror(const char *s) -{ - THD *thd=current_thd; - char *yytext= (char*) thd->lex->tok_start; - /* "parse error" changed into "syntax error" between bison 1.75 and 1.875 */ - if (strcmp(s,"parse error") == 0 || strcmp(s,"syntax error") == 0) - s=ER(ER_SYNTAX_ERROR); - my_printf_error(ER_PARSE_ERROR, ER(ER_PARSE_ERROR), MYF(0), s, - (yytext ? (char*) yytext : ""), - thd->lex->yylineno); -} - #ifndef EMBEDDED_LIBRARY /* @@ -1731,21 +1733,55 @@ extern "C" sig_handler end_thread_signal(int sig __attribute__((unused))) if (thd && ! thd->bootstrap) { statistic_increment(killed_threads, &LOCK_status); - end_thread(thd,0); + thread_scheduler.end_thread(thd,0); /* purecov: inspected */ } DBUG_VOID_RETURN; /* purecov: deadcode */ } -void end_thread(THD *thd, bool put_in_cache) +/* + Unlink thd from global list of available connections and free thd + + SYNOPSIS + unlink_thd() + thd Thread handler + + NOTES + LOCK_thread_count is locked and left locked +*/ + +void unlink_thd(THD *thd) { - DBUG_ENTER("end_thread"); + DBUG_ENTER("unlink_thd"); + DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd)); thd->cleanup(); (void) pthread_mutex_lock(&LOCK_thread_count); thread_count--; delete thd; + DBUG_VOID_RETURN; +} + - if (put_in_cache && cached_thread_count < thread_cache_size && +/* + Store thread in cache for reuse by new connections + + SYNOPSIS + cache_thread() + + NOTES + LOCK_thread_count has to be locked + + RETURN + 0 Thread was not put in cache + 1 Thread is to be reused by new connection. + (ie, caller should return, not abort with pthread_exit()) +*/ + + +static bool cache_thread() +{ + safe_mutex_assert_owner(&LOCK_thread_count); + if (cached_thread_count < thread_cache_size && ! abort_loop && !kill_cached_threads) { /* Don't kill the thread, just put it in cache for reuse */ @@ -1758,31 +1794,62 @@ void end_thread(THD *thd, bool put_in_cache) pthread_cond_signal(&COND_flush_thread_cache); if (wake_thread) { + THD *thd; wake_thread--; - thd=thread_cache.get(); - thd->real_id=pthread_self(); + thd= thread_cache.get(); thd->thread_stack= (char*) &thd; // For store_globals (void) thd->store_globals(); + /* + THD::mysys_var::abort is associated with physical thread rather + than with THD object. So we need to reset this flag before using + this thread for handling of new THD object/connection. + */ + thd->mysys_var->abort= 0; thd->thr_create_time= time(NULL); threads.append(thd); - pthread_mutex_unlock(&LOCK_thread_count); - DBUG_VOID_RETURN; + return(1); } } + return(0); +} + + +/* + End thread for the current connection + + SYNOPSIS + one_thread_per_connection_end() + thd Thread handler + put_in_cache Store thread in cache, if there is room in it + Normally this is true in all cases except when we got + out of resources initializing the current thread + + NOTES + If thread is cached, we will wait until thread is scheduled to be + reused and then we will return. + If thread is not cached, we end the thread. + + RETURN + 0 Signal to handle_one_connection to reuse connection +*/ + +bool one_thread_per_connection_end(THD *thd, bool put_in_cache) +{ + DBUG_ENTER("one_thread_per_connection_end"); + unlink_thd(thd); + if (put_in_cache) + put_in_cache= cache_thread(); + pthread_mutex_unlock(&LOCK_thread_count); + if (put_in_cache) + DBUG_RETURN(0); // Thread is reused - /* Tell main we are ready */ - (void) pthread_mutex_unlock(&LOCK_thread_count); /* It's safe to broadcast outside a lock (COND... is not deleted here) */ DBUG_PRINT("signal", ("Broadcasting COND_thread_count")); (void) pthread_cond_broadcast(&COND_thread_count); -#ifdef ONE_THREAD - if (!(test_flags & TEST_NO_THREADS)) // For debugging under Linux -#endif - { - my_thread_end(); - pthread_exit(0); - } - DBUG_VOID_RETURN; + + my_thread_end(); + pthread_exit(0); + DBUG_RETURN(0); // Impossible } @@ -2095,7 +2162,10 @@ static void check_data_home(const char *path) extern "C" sig_handler handle_segfault(int sig) { + time_t curr_time; + struct tm tm; THD *thd=current_thd; + /* Strictly speaking, one needs a mutex here but since we have got SIGSEGV already, things are a mess @@ -2109,11 +2179,17 @@ extern "C" sig_handler handle_segfault(int sig) } segfaulted = 1; + + curr_time= time(NULL); + localtime_r(&curr_time, &tm); + fprintf(stderr,"\ -mysqld got signal %d;\n\ +%02d%02d%02d %2d:%02d:%02d - mysqld got signal %d;\n\ This could be because you hit a bug. It is also possible that this binary\n\ or one of the libraries it was linked against is corrupt, improperly built,\n\ or misconfigured. This error can also be caused by malfunctioning hardware.\n", + tm.tm_year % 100, tm.tm_mon+1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, sig); fprintf(stderr, "\ We will try our best to scrape up some info that will hopefully help diagnose\n\ @@ -2123,14 +2199,15 @@ and this may fail.\n\n"); (ulong) dflt_key_cache->key_cache_mem_size); fprintf(stderr, "read_buffer_size=%ld\n", (long) global_system_variables.read_buff_size); fprintf(stderr, "max_used_connections=%lu\n", max_used_connections); - fprintf(stderr, "max_connections=%lu\n", max_connections); + fprintf(stderr, "max_threads=%u\n", thread_scheduler.max_threads); fprintf(stderr, "threads_connected=%u\n", thread_count); fprintf(stderr, "It is possible that mysqld could use up to \n\ -key_buffer_size + (read_buffer_size + sort_buffer_size)*max_connections = %lu K\n\ +key_buffer_size + (read_buffer_size + sort_buffer_size)*max_threads = %lu K\n\ bytes of memory\n", ((ulong) dflt_key_cache->key_cache_mem_size + (global_system_variables.read_buff_size + global_system_variables.sortbuff_size) * - max_connections)/ 1024); + thread_scheduler.max_threads + + max_connections * sizeof(THD)) / 1024); fprintf(stderr, "Hope that's ok; if not, decrease some variables in the equation.\n\n"); #if defined(HAVE_LINUXTHREADS) @@ -2212,7 +2289,9 @@ static void init_signals(void) DBUG_ENTER("init_signals"); if (test_flags & TEST_SIGINT) - my_sigset(THR_KILL_SIGNAL,end_thread_signal); + { + my_sigset(thr_kill_signal, end_thread_signal); + } my_sigset(THR_SERVER_ALARM,print_signal_warning); // Should never be called! if (!(test_flags & TEST_NO_STACKTRACE) || (test_flags & TEST_CORE_ON_SIGNAL)) @@ -2267,10 +2346,13 @@ static void init_signals(void) #ifdef SIGTSTP sigaddset(&set,SIGTSTP); #endif - sigaddset(&set,THR_SERVER_ALARM); + if (thd_lib_detected != THD_LIB_LT) + sigaddset(&set,THR_SERVER_ALARM); if (test_flags & TEST_SIGINT) - sigdelset(&set,THR_KILL_SIGNAL); // May be SIGINT - sigdelset(&set,THR_CLIENT_ALARM); // For alarms + { + // May be SIGINT + sigdelset(&set, thr_kill_signal); + } sigprocmask(SIG_SETMASK,&set,NULL); pthread_sigmask(SIG_SETMASK,&set,NULL); DBUG_VOID_RETURN; @@ -2331,26 +2413,22 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused))) This should actually be '+ max_number_of_slaves' instead of +10, but the +10 should be quite safe. */ - init_thr_alarm(max_connections + + init_thr_alarm(thread_scheduler.max_threads + global_system_variables.max_insert_delayed_threads + 10); -#if SIGINT != THR_KILL_SIGNAL - if (test_flags & TEST_SIGINT) + if (thd_lib_detected != THD_LIB_LT && (test_flags & TEST_SIGINT)) { (void) sigemptyset(&set); // Setup up SIGINT for debug (void) sigaddset(&set,SIGINT); // For debugging (void) pthread_sigmask(SIG_UNBLOCK,&set,NULL); } -#endif (void) sigemptyset(&set); // Setup up SIGINT for debug #ifdef USE_ONE_SIGNAL_HAND (void) sigaddset(&set,THR_SERVER_ALARM); // For alarms #endif #ifndef IGNORE_SIGHUP_SIGQUIT (void) sigaddset(&set,SIGQUIT); -#if THR_CLIENT_ALARM != SIGHUP (void) sigaddset(&set,SIGHUP); #endif -#endif (void) sigaddset(&set,SIGTERM); (void) sigaddset(&set,SIGTSTP); @@ -2477,6 +2555,14 @@ static int my_message_sql(uint error, const char *str, myf MyFlags) */ if ((thd= current_thd)) { + /* + TODO: There are two exceptions mechanism (THD and sp_rcontext), + this could be improved by having a common stack of handlers. + */ + if (thd->handle_error(error, + MYSQL_ERROR::WARN_LEVEL_ERROR)) + DBUG_RETURN(0); + if (thd->spcont && thd->spcont->handle_error(error, MYSQL_ERROR::WARN_LEVEL_ERROR, thd)) { @@ -2535,18 +2621,6 @@ static void my_str_free_mysqld(void *ptr) #ifdef __WIN__ -struct utsname -{ - char nodename[FN_REFLEN]; -}; - - -int uname(struct utsname *a) -{ - return -1; -} - - pthread_handler_t handle_shutdown(void *arg) { MSG msg; @@ -2675,9 +2749,15 @@ static int init_common_variables(const char *conf_file_name, int argc, */ mysql_bin_log.init_pthread_objects(); - if (gethostname(glob_hostname,sizeof(glob_hostname)-4) < 0) - strmov(glob_hostname,"mysql"); - strmake(pidfile_name, glob_hostname, sizeof(pidfile_name)-5); + if (gethostname(glob_hostname,sizeof(glob_hostname)) < 0) + { + strmake(glob_hostname, STRING_WITH_LEN("localhost")); + sql_print_warning("gethostname failed, using '%s' as hostname", + glob_hostname); + strmake(pidfile_name, STRING_WITH_LEN("mysql")); + } + else + strmake(pidfile_name, glob_hostname, sizeof(pidfile_name)-5); strmov(fn_ext(pidfile_name),".pid"); // Add proper extension /* @@ -2774,10 +2854,33 @@ static int init_common_variables(const char *conf_file_name, int argc, #ifdef USE_REGEX my_regex_init(&my_charset_latin1); #endif - if (!(default_charset_info= get_charset_by_csname(default_character_set_name, - MY_CS_PRIMARY, - MYF(MY_WME)))) - return 1; + /* + Process a comma-separated character set list and choose + the first available character set. This is mostly for + test purposes, to be able to start "mysqld" even if + the requested character set is not available (see bug#18743). + */ + for (;;) + { + char *next_character_set_name= strchr(default_character_set_name, ','); + if (next_character_set_name) + *next_character_set_name++= '\0'; + if (!(default_charset_info= + get_charset_by_csname(default_character_set_name, + MY_CS_PRIMARY, MYF(MY_WME)))) + { + if (next_character_set_name) + { + default_character_set_name= next_character_set_name; + default_collation_name= 0; // Ignore collation + } + else + return 1; // Eof of the list + } + else + break; + } + if (default_collation_name) { CHARSET_INFO *default_collation; @@ -3058,12 +3161,12 @@ static void init_ssl() if (!ssl_acceptor_fd) { opt_use_ssl = 0; - have_openssl= SHOW_OPTION_DISABLED; + have_ssl= SHOW_OPTION_DISABLED; } } else { - have_openssl= SHOW_OPTION_DISABLED; + have_ssl= SHOW_OPTION_DISABLED; } if (des_key_file) load_des_key_file(des_key_file); @@ -3112,7 +3215,7 @@ static int init_server_components() if (opt_error_log) { if (!log_error_file_ptr[0]) - fn_format(log_error_file, glob_hostname, mysql_data_home, ".err", + fn_format(log_error_file, pidfile_name, mysql_data_home, ".err", MY_REPLACE_EXT); /* replace '.<domain>' by '.err', bug#4997 */ else fn_format(log_error_file, log_error_file_ptr, mysql_data_home, ".err", @@ -3193,17 +3296,24 @@ with --log-bin instead."); "--log-slave-updates work."); unireg_abort(1); } - - if (!opt_bin_log && (global_system_variables.binlog_format != BINLOG_FORMAT_UNSPEC)) - { - sql_print_error("You need to use --log-bin to make " - "--binlog-format work."); - unireg_abort(1); - } - if (global_system_variables.binlog_format == BINLOG_FORMAT_UNSPEC) - { + if (!opt_bin_log) + if (opt_binlog_format_id != BINLOG_FORMAT_UNSPEC) + { + sql_print_error("You need to use --log-bin to make " + "--binlog-format work."); + unireg_abort(1); + } + else + { + global_system_variables.binlog_format= BINLOG_FORMAT_UNSPEC; + } + else + if (opt_binlog_format_id == BINLOG_FORMAT_UNSPEC) global_system_variables.binlog_format= BINLOG_FORMAT_MIXED; - } + else + { + DBUG_ASSERT(global_system_variables.binlog_format != BINLOG_FORMAT_UNSPEC); + } /* Check that we have not let the format to unspecified at this point */ DBUG_ASSERT((uint)global_system_variables.binlog_format <= @@ -3339,7 +3449,7 @@ server."); (TC_LOG *) &tc_log_mmap) : (TC_LOG *) &tc_log_dummy); - if (tc_log->open(opt_bin_logname)) + if (tc_log->open(opt_bin_log ? opt_bin_logname : opt_tc_log_file)) { sql_print_error("Can't init tc log"); unireg_abort(1); @@ -3357,7 +3467,7 @@ server."); #ifdef HAVE_REPLICATION if (opt_bin_log && expire_logs_days) { - long purge_time= time(0) - expire_logs_days*24*60*60; + long purge_time= (long) (time(0) - expire_logs_days*24*60*60); if (purge_time >= 0) mysql_bin_log.purge_logs_before_date(purge_time); } @@ -3512,6 +3622,13 @@ int main(int argc, char **argv) MY_INIT(argv[0]); // init my_sys library & pthreads /* nothing should come before this line ^^^ */ + /* Set signal used to kill MySQL */ +#if defined(SIGUSR2) + thr_kill_signal= thd_lib_detected == THD_LIB_LT ? SIGINT : SIGUSR2; +#else + thr_kill_signal= SIGINT; +#endif + /* Perform basic logger initialization logger. Should be called after MY_INIT, as it initializes mutexes. Log tables are inited later. @@ -3691,6 +3808,7 @@ we force server id to 2, but this MySQL server will not act as a slave."); udf_init(); #endif } + init_status_vars(); if (opt_bootstrap) /* If running with bootstrap, do not start replication. */ opt_skip_slave_start= 1; @@ -3737,6 +3855,10 @@ we force server id to 2, but this MySQL server will not act as a slave."); if (Events::get_instance()->init()) unireg_abort(1); } + else + { + Events::opt_event_scheduler = Events::EVENTS_DISABLED; + } /* Signal threads waiting for server to be started */ pthread_mutex_lock(&LOCK_server_started); @@ -4009,7 +4131,7 @@ static void bootstrap(FILE *file) my_net_init(&thd->net,(st_vio*) 0); thd->max_client_packet_length= thd->net.max_packet; thd->security_ctx->master_access= ~(ulong)0; - thd->thread_id=thread_id++; + thd->thread_id= thd->variables.pseudo_thread_id= thread_id++; thread_count++; bootstrap_file=file; @@ -4052,6 +4174,74 @@ static bool read_init_file(char *file_name) #ifndef EMBEDDED_LIBRARY + +/* + Simple scheduler that use the main thread to handle the request + + NOTES + This is only used for debugging, when starting mysqld with + --thread-handling=no-threads or --one-thread + + When we enter this function, LOCK_thread_count is hold! +*/ + +void handle_connection_in_main_thread(THD *thd) +{ + safe_mutex_assert_owner(&LOCK_thread_count); + thread_cache_size=0; // Safety + threads.append(thd); + (void) pthread_mutex_unlock(&LOCK_thread_count); + handle_one_connection((void*) thd); +} + + +/* + Scheduler that uses one thread per connection +*/ + +void create_thread_to_handle_connection(THD *thd) +{ + if (cached_thread_count > wake_thread) + { + /* Get thread from cache */ + thread_cache.append(thd); + wake_thread++; + pthread_cond_signal(&COND_thread_cache); + } + else + { + /* Create new thread to handle connection */ + int error; + thread_created++; + threads.append(thd); + DBUG_PRINT("info",(("creating thread %lu"), thd->thread_id)); + thd->connect_time = time(NULL); + if ((error=pthread_create(&thd->real_id,&connection_attrib, + handle_one_connection, + (void*) thd))) + { + /* purify: begin inspected */ + DBUG_PRINT("error", + ("Can't create thread to handle request (error %d)", + error)); + thread_count--; + thd->killed= THD::KILL_CONNECTION; // Safety + (void) pthread_mutex_unlock(&LOCK_thread_count); + statistic_increment(aborted_connects,&LOCK_status); + net_printf_error(thd, ER_CANT_CREATE_THREAD, error); + (void) pthread_mutex_lock(&LOCK_thread_count); + close_connection(thd,0,0); + delete thd; + (void) pthread_mutex_unlock(&LOCK_thread_count); + return; + /* purecov: end */ + } + } + (void) pthread_mutex_unlock(&LOCK_thread_count); + DBUG_PRINT("info",("Thread created")); +} + + /* Create new thread to handle incoming connection. @@ -4088,64 +4278,15 @@ static void create_new_thread(THD *thd) DBUG_VOID_RETURN; } pthread_mutex_lock(&LOCK_thread_count); - thd->thread_id=thread_id++; - - thd->real_id=pthread_self(); // Keep purify happy + thd->thread_id= thd->variables.pseudo_thread_id= thread_id++; /* Start a new thread to handle connection */ thread_count++; -#ifdef ONE_THREAD - if (test_flags & TEST_NO_THREADS) // For debugging under Linux - { - thread_cache_size=0; // Safety - threads.append(thd); - thd->real_id=pthread_self(); - (void) pthread_mutex_unlock(&LOCK_thread_count); - handle_one_connection((void*) thd); - } - else -#endif - { - if (thread_count-delayed_insert_threads > max_used_connections) - max_used_connections=thread_count-delayed_insert_threads; - - if (cached_thread_count > wake_thread) - { - thread_cache.append(thd); - wake_thread++; - pthread_cond_signal(&COND_thread_cache); - } - else - { - int error; - thread_created++; - threads.append(thd); - DBUG_PRINT("info",(("creating thread %lu"), thd->thread_id)); - thd->connect_time = time(NULL); - if ((error=pthread_create(&thd->real_id,&connection_attrib, - handle_one_connection, - (void*) thd))) - { - DBUG_PRINT("error", - ("Can't create thread to handle request (error %d)", - error)); - thread_count--; - thd->killed= THD::KILL_CONNECTION; // Safety - (void) pthread_mutex_unlock(&LOCK_thread_count); - statistic_increment(aborted_connects,&LOCK_status); - net_printf_error(thd, ER_CANT_CREATE_THREAD, error); - (void) pthread_mutex_lock(&LOCK_thread_count); - close_connection(thd,0,0); - delete thd; - (void) pthread_mutex_unlock(&LOCK_thread_count); - DBUG_VOID_RETURN; - } - } - (void) pthread_mutex_unlock(&LOCK_thread_count); + if (thread_count - delayed_insert_threads > max_used_connections) + max_used_connections= thread_count - delayed_insert_threads; - } - DBUG_PRINT("info",("Thread created")); + thread_scheduler.add_connection(thd); DBUG_VOID_RETURN; } #endif /* EMBEDDED_LIBRARY */ @@ -4845,6 +4986,7 @@ enum options_mysqld OPT_INNODB_SYNC_SPIN_LOOPS, OPT_INNODB_CONCURRENCY_TICKETS, OPT_INNODB_THREAD_SLEEP_DELAY, + OPT_INNODB_STATS_ON_METADATA, OPT_BDB_CACHE_SIZE, OPT_BDB_CACHE_PARTS, OPT_BDB_LOG_BUFFER_SIZE, @@ -4895,7 +5037,10 @@ enum options_mysqld OPT_GENERAL_LOG, OPT_SLOW_LOG, OPT_MERGE, - OPT_INNODB_ROLLBACK_ON_TIMEOUT + OPT_THREAD_HANDLING, + OPT_INNODB_ROLLBACK_ON_TIMEOUT, + OPT_SECURE_FILE_PRIV, + OPT_OLD_MODE }; @@ -4946,6 +5091,7 @@ struct my_option my_long_options[] = (gptr*) &my_bind_addr_str, (gptr*) &my_bind_addr_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"binlog_format", OPT_BINLOG_FORMAT, + "Does not have any effect without '--log-bin'. " "Tell the master the form of binary logging to use: either 'row' for " "row-based binary logging, or 'statement' for statement-based binary " "logging, or 'mixed'. 'mixed' is statement-based binary logging except " @@ -4953,11 +5099,12 @@ struct my_option my_long_options[] = "involve user-defined functions (i.e. UDFs) or the UUID() function; for " "those, row-based binary logging is automatically used. " #ifdef HAVE_NDB_BINLOG - "If ndbcluster is enabled, the default is 'row'." + "If ndbcluster is enabled and binlog_format is `mixed', the format switches" + " to 'row' and back implicitly per each query accessing a NDB table." #endif - , 0, 0, 0, GET_STR, REQUIRED_ARG, - BINLOG_FORMAT_MIXED - , 0, 0, 0, 0, 0 }, + ,(gptr*) &opt_binlog_format, (gptr*) &opt_binlog_format, + 0, GET_STR, REQUIRED_ARG, BINLOG_FORMAT_MIXED, BINLOG_FORMAT_STMT, + BINLOG_FORMAT_MIXED, 0, 0, 0}, {"binlog-do-db", OPT_BINLOG_DO_DB, "Tells the master it should log updates for the specified database, and exclude all others not explicitly mentioned.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -5072,7 +5219,7 @@ struct my_option my_long_options[] = "Push supported query conditions to the storage engine.", (gptr*) &global_system_variables.engine_condition_pushdown, (gptr*) &global_system_variables.engine_condition_pushdown, - 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, /* See how it's handled in get_one_option() */ {"event-scheduler", OPT_EVENT_SCHEDULER, "Enable/disable the event scheduler.", NULL, NULL, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, @@ -5188,6 +5335,10 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite, "Enable SHOW INNODB STATUS output in the innodb_status.<pid> file", (gptr*) &innobase_create_status_file, (gptr*) &innobase_create_status_file, 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0}, + {"innodb_stats_on_metadata", OPT_INNODB_STATS_ON_METADATA, + "Enable statistics gathering for metadata commands such as SHOW TABLE STATUS (on by default)", + (gptr*) &innobase_stats_on_metadata, (gptr*) &innobase_stats_on_metadata, + 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"innodb_support_xa", OPT_INNODB_SUPPORT_XA, "Enable InnoDB support for the XA two-phase commit", (gptr*) &global_system_variables.innodb_support_xa, @@ -5502,11 +5653,9 @@ Disable with --skip-ndbcluster (will save memory).", (gptr*) &global_system_variables.old_passwords, (gptr*) &max_system_variables.old_passwords, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, -#ifdef ONE_THREAD {"one-thread", OPT_ONE_THREAD, - "Only use one thread (for debugging under Linux).", 0, 0, 0, GET_NO_ARG, - NO_ARG, 0, 0, 0, 0, 0, 0}, -#endif + "(deprecated): Only use one thread (for debugging under Linux). Use thread-handling=no-threads instead", + 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"old-style-user-limits", OPT_OLD_STYLE_USER_LIMITS, "Enable old-style user limits (before 5.0.3 user resources were counted per each user+host vs. per account)", (gptr*) &opt_old_style_user_limits, (gptr*) &opt_old_style_user_limits, @@ -5598,6 +5747,10 @@ Can't be set to 1 if --log-slave-updates is used.", {"secure-auth", OPT_SECURE_AUTH, "Disallow authentication for accounts that have old (pre-4.1) passwords.", (gptr*) &opt_secure_auth, (gptr*) &opt_secure_auth, 0, GET_BOOL, NO_ARG, my_bool(0), 0, 0, 0, 0, 0}, + {"secure-file-priv", OPT_SECURE_FILE_PRIV, + "Limit LOAD DATA, SELECT ... OUTFILE, and LOAD_FILE() to files within specified directory", + (gptr*) &opt_secure_file_priv, (gptr*) &opt_secure_file_priv, 0, + GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"server-id", OPT_SERVER_ID, "Uniquely identifies the server instance in the community of replication partners.", (gptr*) &server_id, (gptr*) &server_id, 0, GET_ULONG, REQUIRED_ARG, 0, 0, 0, @@ -5985,7 +6138,7 @@ The minimum value for this variable is 4096.", // children, to avoid "too many connections" error in a common setup {"max_connections", OPT_MAX_CONNECTIONS, "The number of simultaneous clients allowed.", (gptr*) &max_connections, - (gptr*) &max_connections, 0, GET_ULONG, REQUIRED_ARG, 151, 1, 16384, 0, 1, + (gptr*) &max_connections, 0, GET_ULONG, REQUIRED_ARG, 151, 1, 100000, 0, 1, 0}, {"max_delayed_threads", OPT_MAX_DELAYED_THREADS, "Don't start more than this number of threads to handle INSERT DELAYED statements. If set to zero, which means INSERT DELAYED is not used.", @@ -6118,6 +6271,10 @@ The minimum value for this variable is 4096.", (gptr*) &global_system_variables.net_write_timeout, (gptr*) &max_system_variables.net_write_timeout, 0, GET_ULONG, REQUIRED_ARG, NET_WRITE_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0}, + { "old", OPT_OLD_MODE, "Use compatible behavior.", + (gptr*) &global_system_variables.old_mode, + (gptr*) &max_system_variables.old_mode, 0, GET_BOOL, NO_ARG, + 0, 0, 0, 0, 0, 0}, {"open_files_limit", OPT_OPEN_FILES_LIMIT, "If this is not 0, then mysqld will use this value to reserve file descriptors to use with setrlimit(). If this value is 0 then mysqld will reserve max_connections*5 or max_connections + table_cache*2 (whichever is larger) number of files.", (gptr*) &open_files_limit, (gptr*) &open_files_limit, 0, GET_ULONG, @@ -6276,6 +6433,12 @@ The minimum value for this variable is 4096.", "Permits the application to give the threads system a hint for the desired number of threads that should be run at the same time.", (gptr*) &concurrency, (gptr*) &concurrency, 0, GET_ULONG, REQUIRED_ARG, DEFAULT_CONCURRENCY, 1, 512, 0, 1, 0}, +#if HAVE_POOL_OF_THREADS == 1 + {"thread_pool_size", OPT_THREAD_CACHE_SIZE, + "How many threads we should create to handle query requests in case of 'thread_handling=pool-of-threads'", + (gptr*) &thread_pool_size, (gptr*) &thread_pool_size, 0, GET_ULONG, + REQUIRED_ARG, 20, 1, 16384, 0, 1, 0}, +#endif {"thread_stack", OPT_THREAD_STACK, "The stack size for each thread.", (gptr*) &thread_stack, (gptr*) &thread_stack, 0, GET_ULONG, REQUIRED_ARG,DEFAULT_THREAD_STACK, @@ -6291,15 +6454,19 @@ The minimum value for this variable is 4096.", (gptr*) &max_system_variables.tmp_table_size, 0, GET_ULL, REQUIRED_ARG, 16*1024*1024L, 1024, MAX_MEM_TABLE_SIZE, 0, 1, 0}, {"transaction_alloc_block_size", OPT_TRANS_ALLOC_BLOCK_SIZE, - "Allocation block size for transactions to be stored in binary log", + "Allocation block size for various transaction-related structures", (gptr*) &global_system_variables.trans_alloc_block_size, (gptr*) &max_system_variables.trans_alloc_block_size, 0, GET_ULONG, REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, ~0L, 0, 1024, 0}, {"transaction_prealloc_size", OPT_TRANS_PREALLOC_SIZE, - "Persistent buffer for transactions to be stored in binary log", + "Persistent buffer for various transaction-related structures", (gptr*) &global_system_variables.trans_prealloc_size, (gptr*) &max_system_variables.trans_prealloc_size, 0, GET_ULONG, REQUIRED_ARG, TRANS_ALLOC_PREALLOC_SIZE, 1024, ~0L, 0, 1024, 0}, + {"thread_handling", OPT_THREAD_HANDLING, + "Define threads usage for handling queries: " + "one-thread-per-connection or no-threads", 0, 0, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"updatable_views_with_limit", OPT_UPDATABLE_VIEWS_WITH_LIMIT, "1 = YES = Don't issue an error message (warning only) if a VIEW without presence of a key of the underlying table is used in queries with a LIMIT clause for updating. 0 = NO = Prohibit update of a VIEW, which does not contain a key of the underlying table and the query uses a LIMIT clause (usually get from GUI tools).", (gptr*) &global_system_variables.updatable_views_with_limit, @@ -6565,12 +6732,20 @@ static int show_ssl_ctx_get_session_cache_mode(THD *thd, SHOW_VAR *var, char *bu return 0; } -/* Functions relying on SSL */ +/* + Functions relying on SSL + Note: In the show_ssl_* functions, we need to check if we have a + valid vio-object since this isn't always true, specifically + when session_status or global_status is requested from + inside an Event. + */ static int show_ssl_get_version(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_CHAR; - var->value= const_cast<char*>(thd->net.vio->ssl_arg ? - SSL_get_version((SSL*) thd->net.vio->ssl_arg) : ""); + if( thd->vio_ok() && thd->net.vio->ssl_arg ) + var->value= const_cast<char*>(SSL_get_version((SSL*) thd->net.vio->ssl_arg)); + else + var->value= (char *)""; return 0; } @@ -6578,9 +6753,10 @@ static int show_ssl_session_reused(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_LONG; var->value= buff; - *((long *)buff)= (long)thd->net.vio->ssl_arg ? - SSL_session_reused((SSL*) thd->net.vio->ssl_arg) : - 0; + if( thd->vio_ok() && thd->net.vio->ssl_arg ) + *((long *)buff)= (long)SSL_session_reused((SSL*) thd->net.vio->ssl_arg); + else + *((long *)buff)= 0; return 0; } @@ -6588,9 +6764,10 @@ static int show_ssl_get_default_timeout(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_LONG; var->value= buff; - *((long *)buff)= (long)thd->net.vio->ssl_arg ? - SSL_get_default_timeout((SSL*)thd->net.vio->ssl_arg) : - 0; + if( thd->vio_ok() && thd->net.vio->ssl_arg ) + *((long *)buff)= (long)SSL_get_default_timeout((SSL*)thd->net.vio->ssl_arg); + else + *((long *)buff)= 0; return 0; } @@ -6598,9 +6775,10 @@ static int show_ssl_get_verify_mode(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_LONG; var->value= buff; - *((long *)buff)= (long)thd->net.vio->ssl_arg ? - SSL_get_verify_mode((SSL*)thd->net.vio->ssl_arg) : - 0; + if( thd->net.vio && thd->net.vio->ssl_arg ) + *((long *)buff)= (long)SSL_get_verify_mode((SSL*)thd->net.vio->ssl_arg); + else + *((long *)buff)= 0; return 0; } @@ -6608,17 +6786,20 @@ static int show_ssl_get_verify_depth(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_LONG; var->value= buff; - *((long *)buff)= (long)thd->net.vio->ssl_arg ? - SSL_get_verify_depth((SSL*)thd->net.vio->ssl_arg) : - 0; + if( thd->vio_ok() && thd->net.vio->ssl_arg ) + *((long *)buff)= (long)SSL_get_verify_depth((SSL*)thd->net.vio->ssl_arg); + else + *((long *)buff)= 0; return 0; } static int show_ssl_get_cipher(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_CHAR; - var->value= const_cast<char*>(thd->net.vio->ssl_arg ? - SSL_get_cipher((SSL*) thd->net.vio->ssl_arg) : ""); + if( thd->vio_ok() && thd->net.vio->ssl_arg ) + var->value= const_cast<char*>(SSL_get_cipher((SSL*) thd->net.vio->ssl_arg)); + else + var->value= (char *)""; return 0; } @@ -6626,7 +6807,7 @@ static int show_ssl_get_cipher_list(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_CHAR; var->value= buff; - if (thd->net.vio->ssl_arg) + if (thd->vio_ok() && thd->net.vio->ssl_arg) { int i; const char *p; @@ -6964,6 +7145,7 @@ static void mysql_init_variables(void) opt_logname= opt_update_logname= opt_binlog_index_name= opt_slow_logname= 0; opt_tc_log_file= (char *)"tc.log"; // no hostname in tc_log file name ! opt_secure_auth= 0; + opt_secure_file_priv= 0; opt_bootstrap= opt_myisam_log= 0; mqh_used= 0; segfaulted= kill_in_progress= 0; @@ -7015,7 +7197,7 @@ static void mysql_init_variables(void) OPTION_QUOTE_SHOW_CREATE | OPTION_SQL_NOTES); protocol_version= PROTOCOL_VERSION; what_to_log= ~ (1L << (uint) COM_TIME); - refresh_version= flush_version= 1L; /* Increments on each reload */ + refresh_version= 1L; /* Increments on each reload */ global_query_id= thread_id= 1L; strmov(server_version, MYSQL_SERVER_VERSION); myisam_recover_options_str= sql_mode_str= "OFF"; @@ -7108,9 +7290,9 @@ static void mysql_init_variables(void) have_ndbcluster=SHOW_OPTION_NO; #endif #ifdef HAVE_OPENSSL - have_openssl=SHOW_OPTION_YES; + have_ssl=SHOW_OPTION_YES; #else - have_openssl=SHOW_OPTION_NO; + have_ssl=SHOW_OPTION_NO; #endif #ifdef HAVE_BROKEN_REALPATH have_symlink=SHOW_OPTION_NO; @@ -7241,7 +7423,6 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), break; case 'T': test_flags= argument ? (uint) atoi(argument) : 0; - test_flags&= ~TEST_NO_THREADS; opt_endinfo=1; break; case (int) OPT_BIG_TABLES: @@ -7334,7 +7515,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), binlog_format_names[BINLOG_FORMAT_MIXED]); exit(1); } - global_system_variables.binlog_format= id-1; + global_system_variables.binlog_format= opt_binlog_format_id= id - 1; break; } case (int)OPT_BINLOG_DO_DB: @@ -7473,11 +7654,6 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), opt_skip_show_db=1; opt_specialflag|=SPECIAL_SKIP_SHOW_DB; break; -#ifdef ONE_THREAD - case (int) OPT_ONE_THREAD: - test_flags |= TEST_NO_THREADS; -#endif - break; case (int) OPT_WANT_CORE: test_flags |= TEST_CORE_ON_SIGNAL; break; @@ -7518,6 +7694,29 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), case (int) OPT_STANDALONE: /* Dummy option for NT */ break; #endif + /* + The following change issues a deprecation warning if the slave + configuration is specified either in the my.cnf file or on + the command-line. See BUG#21490. + */ + case OPT_MASTER_HOST: + case OPT_MASTER_USER: + case OPT_MASTER_PASSWORD: + case OPT_MASTER_PORT: + case OPT_MASTER_CONNECT_RETRY: + case OPT_MASTER_SSL: + case OPT_MASTER_SSL_KEY: + case OPT_MASTER_SSL_CERT: + case OPT_MASTER_SSL_CAPATH: + case OPT_MASTER_SSL_CIPHER: + case OPT_MASTER_SSL_CA: + if (!slave_warning_issued) //only show the warning once + { + slave_warning_issued = true; + WARN_DEPRECATED(NULL, "5.2", "for replication startup options", + "'CHANGE MASTER'"); + } + break; case OPT_CONSOLE: if (opt_console) opt_error_log= 0; // Force logs to stdout @@ -7723,6 +7922,23 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), sql_mode); break; } + case OPT_ONE_THREAD: + global_system_variables.thread_handling= 2; + break; + case OPT_THREAD_HANDLING: + { + if ((global_system_variables.thread_handling= + find_type(argument, &thread_handling_typelib, 2)) <= 0 || + (global_system_variables.thread_handling == SCHEDULER_POOL_OF_THREADS + && !HAVE_POOL_OF_THREADS)) + { + /* purecov: begin tested */ + fprintf(stderr,"Unknown/unsupported thread-handling: %s\n",argument); + exit(1); + /* purecov: end */ + } + break; + } case OPT_FT_BOOLEAN_SYNTAX: if (ft_boolean_check_syntax_string((byte*) argument)) { @@ -7843,6 +8059,7 @@ static void get_options(int argc,char **argv) if (mysqld_chroot) set_root(mysqld_chroot); #else + global_system_variables.thread_handling = SCHEDULER_NO_THREADS; max_allowed_packet= global_system_variables.max_allowed_packet; net_buffer_length= global_system_variables.net_buffer_length; #endif @@ -7873,6 +8090,17 @@ static void get_options(int argc,char **argv) &global_system_variables.datetime_format)) exit(1); +#ifdef EMBEDDED_LIBRARY + one_thread_scheduler(&thread_scheduler); +#else + if (global_system_variables.thread_handling <= + SCHEDULER_ONE_THREAD_PER_CONNECTION) + one_thread_per_connection_scheduler(&thread_scheduler); + else if (global_system_variables.thread_handling == SCHEDULER_NO_THREADS) + one_thread_scheduler(&thread_scheduler); + else + pool_of_threads_scheduler(&thread_scheduler); /* purecov: tested */ +#endif } @@ -7984,6 +8212,16 @@ static void fix_paths(void) exit(1); } #endif /* HAVE_REPLICATION */ + /* + Convert the secure-file-priv option to system format, allowing + a quick strcmp to check if read or write is in an allowed dir + */ + if (opt_secure_file_priv) + { + convert_dirname(buff, opt_secure_file_priv, NullS); + my_free(opt_secure_file_priv, MYF(0)); + opt_secure_file_priv= my_strdup(buff, MYF(MY_FAE)); + } } @@ -8179,7 +8417,8 @@ my_bool innobase_log_archive, innobase_use_checksums, innobase_file_per_table, innobase_locks_unsafe_for_binlog, - innobase_rollback_on_timeout; + innobase_rollback_on_timeout, + innobase_stats_on_metadata; extern "C" { ulong srv_max_buf_pool_modified_pct; @@ -8213,5 +8452,3 @@ template class I_List<NAMED_LIST>; template class I_List<Statement>; template class I_List_iterator<Statement>; #endif - - diff --git a/sql/mysqld.cc.rej b/sql/mysqld.cc.rej deleted file mode 100644 index 62f0357622d..00000000000 --- a/sql/mysqld.cc.rej +++ /dev/null @@ -1,17 +0,0 @@ -*************** -*** 5316,5322 **** - (gptr*) &locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"merge", OPT_MERGE, "Enable Merge storage engine. Disable with \ - --skip-merge.", -! (gptr*) &opt_merge, (gptr*) &opt_merge, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0}, - {"myisam-recover", OPT_MYISAM_RECOVER, - "Syntax: myisam-recover[=option[,option...]], where option can be DEFAULT, BACKUP, FORCE or QUICK.", - (gptr*) &myisam_recover_options_str, (gptr*) &myisam_recover_options_str, 0, ---- 5336,5342 ---- - (gptr*) &locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"merge", OPT_MERGE, "Enable Merge storage engine. Disable with \ - --skip-merge.", -! (gptr*) &opt_merge, (gptr*) &opt_merge, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, - {"myisam-recover", OPT_MYISAM_RECOVER, - "Syntax: myisam-recover[=option[,option...]], where option can be DEFAULT, BACKUP, FORCE or QUICK.", - (gptr*) &myisam_recover_options_str, (gptr*) &myisam_recover_options_str, 0, diff --git a/sql/net_serv.cc b/sql/net_serv.cc index e9533495650..2156888b8cf 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -47,9 +47,6 @@ #include <violite.h> #include <signal.h> #include <errno.h> -#ifdef __WIN__ -#include <winsock.h> -#endif #ifdef __NETWARE__ #include <sys/select.h> #endif diff --git a/sql/opt_range.cc b/sql/opt_range.cc index f613b1b9f02..f5cf79bd609 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -84,7 +84,7 @@ static int sel_cmp(Field *f,char *a,char *b,uint8 a_flag,uint8 b_flag); static char is_null_string[2]= {1,0}; - +class RANGE_OPT_PARAM; /* A construction block of the SEL_ARG-graph. @@ -170,6 +170,89 @@ static char is_null_string[2]= {1,0}; - get_quick_select() - Walk the SEL_ARG, materialize the key intervals, and create QUICK_RANGE_SELECT object that will read records within these intervals. + + 4. SPACE COMPLEXITY NOTES + + SEL_ARG graph is a representation of an ordered disjoint sequence of + intervals over the ordered set of index tuple values. + + For multi-part keys, one can construct a WHERE expression such that its + list of intervals will be of combinatorial size. Here is an example: + + (keypart1 IN (1,2, ..., n1)) AND + (keypart2 IN (1,2, ..., n2)) AND + (keypart3 IN (1,2, ..., n3)) + + For this WHERE clause the list of intervals will have n1*n2*n3 intervals + of form + + (keypart1, keypart2, keypart3) = (k1, k2, k3), where 1 <= k{i} <= n{i} + + SEL_ARG graph structure aims to reduce the amount of required space by + "sharing" the elementary intervals when possible (the pic at the + beginning of this comment has examples of such sharing). The sharing may + prevent combinatorial blowup: + + There are WHERE clauses that have combinatorial-size interval lists but + will be represented by a compact SEL_ARG graph. + Example: + (keypartN IN (1,2, ..., n1)) AND + ... + (keypart2 IN (1,2, ..., n2)) AND + (keypart1 IN (1,2, ..., n3)) + + but not in all cases: + + - There are WHERE clauses that do have a compact SEL_ARG-graph + representation but get_mm_tree() and its callees will construct a + graph of combinatorial size. + Example: + (keypart1 IN (1,2, ..., n1)) AND + (keypart2 IN (1,2, ..., n2)) AND + ... + (keypartN IN (1,2, ..., n3)) + + - There are WHERE clauses for which the minimal possible SEL_ARG graph + representation will have combinatorial size. + Example: + By induction: Let's take any interval on some keypart in the middle: + + kp15=c0 + + Then let's AND it with this interval 'structure' from preceding and + following keyparts: + + (kp14=c1 AND kp16=c3) OR keypart14=c2) (*) + + We will obtain this SEL_ARG graph: + + kp14 $ kp15 $ kp16 + $ $ + +---------+ $ +---------+ $ +---------+ + | kp14=c1 |--$-->| kp15=c0 |--$-->| kp16=c3 | + +---------+ $ +---------+ $ +---------+ + | $ $ + +---------+ $ +---------+ $ + | kp14=c2 |--$-->| kp15=c0 | $ + +---------+ $ +---------+ $ + $ $ + + Note that we had to duplicate "kp15=c0" and there was no way to avoid + that. + The induction step: AND the obtained expression with another "wrapping" + expression like (*). + When the process ends because of the limit on max. number of keyparts + we'll have: + + WHERE clause length is O(3*#max_keyparts) + SEL_ARG graph size is O(2^(#max_keyparts/2)) + + (it is also possible to construct a case where instead of 2 in 2^n we + have a bigger constant, e.g. 4, and get a graph with 4^(31/2)= 2^31 + nodes) + + We avoid consuming too much memory by setting a limit on the number of + SEL_ARG object we can construct during one range analysis invocation. */ class SEL_ARG :public Sql_alloc @@ -200,6 +283,8 @@ public: enum leaf_color { BLACK,RED } color; enum Type { IMPOSSIBLE, MAYBE, MAYBE_KEY, KEY_RANGE } type; + enum { MAX_SEL_ARGS = 16000 }; + SEL_ARG() {} SEL_ARG(SEL_ARG &); SEL_ARG(Field *,const char *,const char *); @@ -271,7 +356,7 @@ public: return new SEL_ARG(field, part, min_value, arg->max_value, min_flag, arg->max_flag, maybe_flag | arg->maybe_flag); } - SEL_ARG *clone(SEL_ARG *new_parent,SEL_ARG **next); + SEL_ARG *clone(RANGE_OPT_PARAM *param, SEL_ARG *new_parent, SEL_ARG **next); bool copy_min(SEL_ARG* arg) { // Get overlapping range @@ -312,7 +397,8 @@ public: min_value=arg->max_value; min_flag=arg->max_flag & NEAR_MAX ? 0 : NEAR_MIN; } - void store_min(uint length,char **min_key,uint min_key_flag) + /* returns a number of keypart values (0 or 1) appended to the key buffer */ + int store_min(uint length,char **min_key,uint min_key_flag) { if ((min_flag & GEOM_FLAG) || (!(min_flag & NO_MIN_RANGE) && @@ -326,12 +412,13 @@ public: else memcpy(*min_key,min_value,length); (*min_key)+= length; + return 1; } + return 0; } - void store(uint length,char **min_key,uint min_key_flag, - char **max_key, uint max_key_flag) + /* returns a number of keypart values (0 or 1) appended to the key buffer */ + int store_max(uint length,char **max_key, uint max_key_flag) { - store_min(length, min_key, min_key_flag); if (!(max_flag & NO_MAX_RANGE) && !(max_key_flag & (NO_MAX_RANGE | NEAR_MAX))) { @@ -343,33 +430,41 @@ public: else memcpy(*max_key,max_value,length); (*max_key)+= length; + return 1; } + return 0; } - void store_min_key(KEY_PART *key,char **range_key, uint *range_key_flag) + /* returns a number of keypart values appended to the key buffer */ + int store_min_key(KEY_PART *key,char **range_key, uint *range_key_flag) { SEL_ARG *key_tree= first(); - key_tree->store(key[key_tree->part].store_length, - range_key,*range_key_flag,range_key,NO_MAX_RANGE); + uint res= key_tree->store_min(key[key_tree->part].store_length, + range_key, *range_key_flag); *range_key_flag|= key_tree->min_flag; if (key_tree->next_key_part && key_tree->next_key_part->part == key_tree->part+1 && !(*range_key_flag & (NO_MIN_RANGE | NEAR_MIN)) && key_tree->next_key_part->type == SEL_ARG::KEY_RANGE) - key_tree->next_key_part->store_min_key(key,range_key, range_key_flag); + res+= key_tree->next_key_part->store_min_key(key, range_key, + range_key_flag); + return res; } - void store_max_key(KEY_PART *key,char **range_key, uint *range_key_flag) + /* returns a number of keypart values appended to the key buffer */ + int store_max_key(KEY_PART *key,char **range_key, uint *range_key_flag) { SEL_ARG *key_tree= last(); - key_tree->store(key[key_tree->part].store_length, - range_key, NO_MIN_RANGE, range_key,*range_key_flag); + uint res=key_tree->store_max(key[key_tree->part].store_length, + range_key, *range_key_flag); (*range_key_flag)|= key_tree->max_flag; if (key_tree->next_key_part && key_tree->next_key_part->part == key_tree->part+1 && !(*range_key_flag & (NO_MAX_RANGE | NEAR_MAX)) && key_tree->next_key_part->type == SEL_ARG::KEY_RANGE) - key_tree->next_key_part->store_max_key(key,range_key, range_key_flag); + res+= key_tree->next_key_part->store_max_key(key, range_key, + range_key_flag); + return res; } SEL_ARG *insert(SEL_ARG *key); @@ -413,7 +508,6 @@ public: { return parent->left == this ? &parent->left : &parent->right; } - SEL_ARG *clone_tree(); /* @@ -456,6 +550,7 @@ public: } return !field->key_cmp(min_val, max_val); } + SEL_ARG *clone_tree(RANGE_OPT_PARAM *param); }; class SEL_IMERGE; @@ -535,6 +630,8 @@ public: using_real_indexes==TRUE */ uint real_keynr[MAX_KEY]; + /* Number of SEL_ARG objects allocated by SEL_ARG::clone_tree operations */ + uint alloced_sel_args; }; class PARAM : public RANGE_OPT_PARAM @@ -583,11 +680,11 @@ static SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param,COND *cond_func,Field *field, static SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param,COND *cond); static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts); -static ha_rows check_quick_select(PARAM *param,uint index,SEL_ARG *key_tree, +static ha_rows check_quick_select(PARAM *param,uint index,SEL_ARG *key_tree, bool update_tbl_stats); static ha_rows check_quick_keys(PARAM *param,uint index,SEL_ARG *key_tree, - char *min_key,uint min_key_flag, - char *max_key, uint max_key_flag); + char *min_key, uint min_key_flag, int, + char *max_key, uint max_key_flag, int); QUICK_RANGE_SELECT *get_quick_select(PARAM *param,uint index, SEL_ARG *key_tree, @@ -624,8 +721,9 @@ static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg); static SEL_TREE *tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2); static SEL_TREE *tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2); static SEL_ARG *sel_add(SEL_ARG *key1,SEL_ARG *key2); -static SEL_ARG *key_or(SEL_ARG *key1,SEL_ARG *key2); -static SEL_ARG *key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag); +static SEL_ARG *key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2); +static SEL_ARG *key_and(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2, + uint clone_flag); static bool get_range(SEL_ARG **e1,SEL_ARG **e2,SEL_ARG *root1); bool get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, SEL_ARG *key_tree,char *min_key,uint min_key_flag, @@ -850,6 +948,7 @@ int imerge_list_or_tree(RANGE_OPT_PARAM *param, return im1->is_empty(); } + /*************************************************************************** ** Basic functions for SQL_SELECT and QUICK_RANGE_SELECT ***************************************************************************/ @@ -1453,6 +1552,7 @@ QUICK_ROR_UNION_SELECT::~QUICK_ROR_UNION_SELECT() QUICK_RANGE::QUICK_RANGE() :min_key(0),max_key(0),min_length(0),max_length(0), + min_keypart_map(0), max_keypart_map(0), flag(NO_MIN_RANGE | NO_MAX_RANGE) {} @@ -1499,12 +1599,18 @@ SEL_ARG::SEL_ARG(Field *field_,uint8 part_,char *min_value_,char *max_value_, left=right= &null_element; } -SEL_ARG *SEL_ARG::clone(SEL_ARG *new_parent,SEL_ARG **next_arg) +SEL_ARG *SEL_ARG::clone(RANGE_OPT_PARAM *param, SEL_ARG *new_parent, + SEL_ARG **next_arg) { SEL_ARG *tmp; + + /* Bail out if we have already generated too many SEL_ARGs */ + if (++param->alloced_sel_args > MAX_SEL_ARGS) + return 0; + if (type != KEY_RANGE) { - if (!(tmp= new SEL_ARG(type))) + if (!(tmp= new (param->mem_root) SEL_ARG(type))) return 0; // out of memory tmp->prev= *next_arg; // Link into next/prev chain (*next_arg)->next=tmp; @@ -1512,20 +1618,21 @@ SEL_ARG *SEL_ARG::clone(SEL_ARG *new_parent,SEL_ARG **next_arg) } else { - if (!(tmp= new SEL_ARG(field,part, min_value,max_value, - min_flag, max_flag, maybe_flag))) + if (!(tmp= new (param->mem_root) SEL_ARG(field,part, min_value,max_value, + min_flag, max_flag, maybe_flag))) return 0; // OOM tmp->parent=new_parent; tmp->next_key_part=next_key_part; if (left != &null_element) - tmp->left=left->clone(tmp,next_arg); + if (!(tmp->left=left->clone(param, tmp, next_arg))) + return 0; // OOM tmp->prev= *next_arg; // Link into next/prev chain (*next_arg)->next=tmp; (*next_arg)= tmp; if (right != &null_element) - if (!(tmp->right= right->clone(tmp,next_arg))) + if (!(tmp->right= right->clone(param, tmp, next_arg))) return 0; // OOM } increment_use_count(1); @@ -1603,11 +1710,12 @@ static int sel_cmp(Field *field, char *a,char *b,uint8 a_flag,uint8 b_flag) } -SEL_ARG *SEL_ARG::clone_tree() +SEL_ARG *SEL_ARG::clone_tree(RANGE_OPT_PARAM *param) { SEL_ARG tmp_link,*next_arg,*root; next_arg= &tmp_link; - root= clone((SEL_ARG *) 0, &next_arg); + if (!(root= clone(param, (SEL_ARG *) 0, &next_arg))) + return 0; next_arg->next=0; // Fix last link tmp_link.next->prev=0; // Fix first link if (root) // If not OOM @@ -2109,11 +2217,12 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, param.real_keynr[param.keys++]=idx; } param.key_parts_end=key_parts; + param.alloced_sel_args= 0; /* Calculate cost of full index read for the shortest covering index */ - if (!head->used_keys.is_clear_all()) + if (!head->covering_keys.is_clear_all()) { - int key_for_use= find_shortest_key(head, &head->used_keys); + int key_for_use= find_shortest_key(head, &head->covering_keys); double key_read_time= (get_index_only_read_time(¶m, records, key_for_use) + (double) records / TIME_FOR_COMPARE); @@ -2501,6 +2610,7 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond) range_par->using_real_indexes= FALSE; range_par->remove_jump_scans= FALSE; range_par->real_keynr[0]= 0; + range_par->alloced_sel_args= 0; thd->no_errors=1; // Don't warn about NULL thd->mem_root=&alloc; @@ -4031,9 +4141,9 @@ void ror_intersect_cpy(ROR_INTERSECT_INFO *dst, const ROR_INTERSECT_INFO *src) The calculation is conducted as follows: Lets denote #records(keypart1, ... keypartK) as n_k. We need to calculate - n_{k1} n_{k_2} + n_{k1} n_{k2} --------- * --------- * .... (3) - n_{k1-1} n_{k2_1} + n_{k1-1} n_{k2-1} where k1,k2,... are key parts which fields were not yet marked as fixed ( this is result of application of option b) of the recursion step for @@ -4041,9 +4151,9 @@ void ror_intersect_cpy(ROR_INTERSECT_INFO *dst, const ROR_INTERSECT_INFO *src) Since it is reasonable to expect that most of the fields are not marked as fixed, we calculate (3) as - n_{i1} n_{i_2} + n_{i1} n_{i2} (3) = n_{max_key_part} / ( --------- * --------- * .... ) - n_{i1-1} n_{i2_1} + n_{i1-1} n_{i2-1} where i1,i2, .. are key parts that were already marked as fixed. @@ -4052,7 +4162,6 @@ void ror_intersect_cpy(ROR_INTERSECT_INFO *dst, const ROR_INTERSECT_INFO *src) RETURN Selectivity of given ROR scan. - */ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info, @@ -4063,6 +4172,7 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info, byte key_val[MAX_KEY_LENGTH+MAX_FIELD_WIDTH]; /* key values tuple */ char *key_ptr= (char*) key_val; SEL_ARG *sel_arg, *tuple_arg= NULL; + key_part_map keypart_map= 0; bool cur_covered; bool prev_covered= test(bitmap_is_set(&info->covered_fields, key_part->fieldnr-1)); @@ -4073,7 +4183,7 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info, max_range.key= (byte*) key_val; max_range.flag= HA_READ_AFTER_KEY; ha_rows prev_records= info->param->table->file->stats.records; - DBUG_ENTER("ror_intersect_selectivity"); + DBUG_ENTER("ror_scan_selectivity"); for (sel_arg= scan->sel_arg; sel_arg; sel_arg= sel_arg->next_key_part) @@ -4090,13 +4200,17 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info, tuple_arg= scan->sel_arg; /* Here we use the length of the first key part */ tuple_arg->store_min(key_part->store_length, &key_ptr, 0); + keypart_map= 1; } while (tuple_arg->next_key_part != sel_arg) { tuple_arg= tuple_arg->next_key_part; - tuple_arg->store_min(key_part[tuple_arg->part].store_length, &key_ptr, 0); + tuple_arg->store_min(key_part[tuple_arg->part].store_length, + &key_ptr, 0); + keypart_map= (keypart_map << 1) | 1; } min_range.length= max_range.length= ((char*) key_ptr - (char*) key_val); + min_range.keypart_map= max_range.keypart_map= keypart_map; records= (info->param->table->file-> records_in_range(scan->keynr, &min_range, &max_range)); if (cur_covered) @@ -4646,7 +4760,7 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree, param->needed_reg->set_bit(keynr); bool read_index_only= index_read_must_be_used ? TRUE : - (bool) param->table->used_keys.is_set(keynr); + (bool) param->table->covering_keys.is_set(keynr); found_records= check_quick_select(param, idx, *key, update_tbl_stats); if (param->is_ror_scan) @@ -4923,8 +5037,8 @@ static SEL_TREE *get_func_mm_tree(RANGE_OPT_PARAM *param, Item_func *cond_func, type. Tree won't be built for values with different result types, so we check it here to avoid unnecessary work. */ - if (!func->array) - break; + if (!func->arg_types_compatible) + break; if (inv) { @@ -5227,7 +5341,8 @@ static SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param,COND *cond) while ((item=li++)) { SEL_TREE *new_tree=get_mm_tree(param,item); - if (param->thd->is_fatal_error) + if (param->thd->is_fatal_error || + param->alloced_sel_args > SEL_ARG::MAX_SEL_ARGS) DBUG_RETURN(0); // out of memory tree=tree_and(param,tree,new_tree); if (tree && tree->type == SEL_TREE::IMPOSSIBLE) @@ -5305,12 +5420,11 @@ static SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param,COND *cond) */ for (uint i= 1 ; i < cond_func->arg_count ; i++) { - if (cond_func->arguments()[i]->real_item()->type() == Item::FIELD_ITEM) { field_item= (Item_field*) (cond_func->arguments()[i]->real_item()); SEL_TREE *tmp= get_full_func_mm_tree(param, cond_func, - field_item, (Item*) i, inv); + field_item, (Item*)(intptr)i, inv); if (inv) tree= !tree ? tmp : tree_or(param, tree, tmp); else @@ -5595,7 +5709,22 @@ get_mm_leaf(RANGE_OPT_PARAM *param, COND *conf_func, Field *field, err= value->save_in_field_no_warnings(field, 1); if (err > 0 && field->cmp_type() != value->result_type()) { - tree= 0; + if ((type == Item_func::EQ_FUNC || type == Item_func::EQUAL_FUNC) && + value->result_type() == item_cmp_type(field->result_type(), + value->result_type())) + + { + tree= new (alloc) SEL_ARG(field, 0, 0); + tree->type= SEL_ARG::IMPOSSIBLE; + } + else + { + /* + TODO: We should return trees of the type SEL_ARG::IMPOSSIBLE + for the cases like int_field > 999999999999999999999999 as well. + */ + tree= 0; + } goto end; } if (err < 0) @@ -5789,9 +5918,9 @@ tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) tree1->type=SEL_TREE::KEY_SMALLER; DBUG_RETURN(tree1); } - key_map result_keys; result_keys.clear_all(); + /* Join the trees key per key */ SEL_ARG **key1,**key2,**end; for (key1= tree1->keys,key2= tree2->keys,end=key1+param->keys ; @@ -5804,7 +5933,7 @@ tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) flag|=CLONE_KEY1_MAYBE; if (*key2 && !(*key2)->simple_key()) flag|=CLONE_KEY2_MAYBE; - *key1=key_and(*key1,*key2,flag); + *key1=key_and(param, *key1, *key2, flag); if (*key1 && (*key1)->type == SEL_ARG::IMPOSSIBLE) { tree1->type= SEL_TREE::IMPOSSIBLE; @@ -5812,8 +5941,8 @@ tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) } result_keys.set_bit(key1 - tree1->keys); #ifdef EXTRA_DEBUG - if (*key1) - (*key1)->test_use_count(*key1); + if (*key1 && param->alloced_sel_args < SEL_ARG::MAX_SEL_ARGS) + (*key1)->test_use_count(*key1); #endif } } @@ -5965,13 +6094,14 @@ tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) for (key1= tree1->keys,key2= tree2->keys,end= key1+param->keys ; key1 != end ; key1++,key2++) { - *key1=key_or(*key1,*key2); + *key1=key_or(param, *key1, *key2); if (*key1) { result=tree1; // Added to tree1 result_keys.set_bit(key1 - tree1->keys); #ifdef EXTRA_DEBUG - (*key1)->test_use_count(*key1); + if (param->alloced_sel_args < SEL_ARG::MAX_SEL_ARGS) + (*key1)->test_use_count(*key1); #endif } } @@ -6029,14 +6159,15 @@ tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) /* And key trees where key1->part < key2 -> part */ static SEL_ARG * -and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) +and_all_keys(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2, + uint clone_flag) { SEL_ARG *next; ulong use_count=key1->use_count; if (key1->elements != 1) { - key2->use_count+=key1->elements-1; + key2->use_count+=key1->elements-1; //psergey: why we don't count that key1 has n-k-p? key2->increment_use_count((int) key1->elements-1); } if (key1->type == SEL_ARG::MAYBE_KEY) @@ -6048,7 +6179,7 @@ and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) { if (next->next_key_part) { - SEL_ARG *tmp=key_and(next->next_key_part,key2,clone_flag); + SEL_ARG *tmp= key_and(param, next->next_key_part, key2, clone_flag); if (tmp && tmp->type == SEL_ARG::IMPOSSIBLE) { key1=key1->tree_delete(next); @@ -6057,6 +6188,8 @@ and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) next->next_key_part=tmp; if (use_count) next->increment_use_count(use_count); + if (param->alloced_sel_args > SEL_ARG::MAX_SEL_ARGS) + break; } else next->next_key_part=key2; @@ -6073,8 +6206,10 @@ and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) SYNOPSIS key_and() - key1 First argument, root of its RB-tree - key2 Second argument, root of its RB-tree + param Range analysis context (needed to track if we have allocated + too many SEL_ARGs) + key1 First argument, root of its RB-tree + key2 Second argument, root of its RB-tree RETURN RB-tree root of the resulting SEL_ARG graph. @@ -6082,7 +6217,7 @@ and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) */ static SEL_ARG * -key_and(SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) +key_and(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) { if (!key1) return key2; @@ -6098,9 +6233,9 @@ key_and(SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) // key1->part < key2->part key1->use_count--; if (key1->use_count > 0) - if (!(key1= key1->clone_tree())) + if (!(key1= key1->clone_tree(param))) return 0; // OOM - return and_all_keys(key1,key2,clone_flag); + return and_all_keys(param, key1, key2, clone_flag); } if (((clone_flag & CLONE_KEY2_MAYBE) && @@ -6118,14 +6253,14 @@ key_and(SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) if (key1->use_count > 1) { key1->use_count--; - if (!(key1=key1->clone_tree())) + if (!(key1=key1->clone_tree(param))) return 0; // OOM key1->use_count++; } if (key1->type == SEL_ARG::MAYBE_KEY) { // Both are maybe key - key1->next_key_part=key_and(key1->next_key_part,key2->next_key_part, - clone_flag); + key1->next_key_part=key_and(param, key1->next_key_part, + key2->next_key_part, clone_flag); if (key1->next_key_part && key1->next_key_part->type == SEL_ARG::IMPOSSIBLE) return key1; @@ -6136,7 +6271,7 @@ key_and(SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) if (key2->next_key_part) { key1->use_count--; // Incremented in and_all_keys - return and_all_keys(key1,key2,clone_flag); + return and_all_keys(param, key1, key2, clone_flag); } key2->use_count--; // Key2 doesn't have a tree } @@ -6172,7 +6307,8 @@ key_and(SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) } else if (get_range(&e2,&e1,key2)) continue; - SEL_ARG *next=key_and(e1->next_key_part,e2->next_key_part,clone_flag); + SEL_ARG *next=key_and(param, e1->next_key_part, e2->next_key_part, + clone_flag); e1->increment_use_count(1); e2->increment_use_count(1); if (!next || next->type != SEL_ARG::IMPOSSIBLE) @@ -6220,7 +6356,7 @@ get_range(SEL_ARG **e1,SEL_ARG **e2,SEL_ARG *root1) static SEL_ARG * -key_or(SEL_ARG *key1,SEL_ARG *key2) +key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2) { if (!key1) { @@ -6268,7 +6404,7 @@ key_or(SEL_ARG *key1,SEL_ARG *key2) { swap_variables(SEL_ARG *,key1,key2); } - if (key1->use_count > 0 || !(key1=key1->clone_tree())) + if (key1->use_count > 0 || !(key1=key1->clone_tree(param))) return 0; // OOM } @@ -6412,7 +6548,7 @@ key_or(SEL_ARG *key1,SEL_ARG *key2) { // tmp.min. <= x <= tmp.max tmp->maybe_flag|= key.maybe_flag; key.increment_use_count(key1->use_count+1); - tmp->next_key_part=key_or(tmp->next_key_part,key.next_key_part); + tmp->next_key_part= key_or(param, tmp->next_key_part, key.next_key_part); if (!cmp) // Key2 is ready break; key.copy_max_to_min(tmp); @@ -6443,7 +6579,7 @@ key_or(SEL_ARG *key1,SEL_ARG *key2) tmp->increment_use_count(key1->use_count+1); /* Increment key count as it may be used for next loop */ key.increment_use_count(1); - new_arg->next_key_part=key_or(tmp->next_key_part,key.next_key_part); + new_arg->next_key_part= key_or(param, tmp->next_key_part, key.next_key_part); key1=key1->insert(new_arg); break; } @@ -7042,7 +7178,9 @@ check_quick_select(PARAM *param,uint idx,SEL_ARG *tree, bool update_tbl_stats) } param->n_ranges= 0; - records=check_quick_keys(param,idx,tree,param->min_key,0,param->max_key,0); + records= check_quick_keys(param, idx, tree, + param->min_key, 0, -1, + param->max_key, 0, -1); if (records != HA_POS_ERROR) { if (update_tbl_stats) @@ -7105,12 +7243,13 @@ check_quick_select(PARAM *param,uint idx,SEL_ARG *tree, bool update_tbl_stats) */ static ha_rows -check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, - char *min_key,uint min_key_flag, char *max_key, - uint max_key_flag) +check_quick_keys(PARAM *param, uint idx, SEL_ARG *key_tree, + char *min_key, uint min_key_flag, int min_keypart, + char *max_key, uint max_key_flag, int max_keypart) { ha_rows records=0, tmp; uint tmp_min_flag, tmp_max_flag, keynr, min_key_length, max_key_length; + uint tmp_min_keypart= min_keypart, tmp_max_keypart= max_keypart; char *tmp_min_key, *tmp_max_key; uint8 save_first_null_comp= param->first_null_comp; @@ -7124,18 +7263,21 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, This is not a ROR scan if the key is not Clustered Primary Key. */ param->is_ror_scan= FALSE; - records=check_quick_keys(param,idx,key_tree->left,min_key,min_key_flag, - max_key,max_key_flag); + records=check_quick_keys(param, idx, key_tree->left, + min_key, min_key_flag, min_keypart, + max_key, max_key_flag, max_keypart); if (records == HA_POS_ERROR) // Impossible return records; } tmp_min_key= min_key; tmp_max_key= max_key; - key_tree->store(param->key[idx][key_tree->part].store_length, - &tmp_min_key,min_key_flag,&tmp_max_key,max_key_flag); - min_key_length= (uint) (tmp_min_key- param->min_key); - max_key_length= (uint) (tmp_max_key- param->max_key); + tmp_min_keypart+= key_tree->store_min(param->key[idx][key_tree->part].store_length, + &tmp_min_key, min_key_flag); + tmp_max_keypart+= key_tree->store_max(param->key[idx][key_tree->part].store_length, + &tmp_max_key, max_key_flag); + min_key_length= (uint) (tmp_min_key - param->min_key); + max_key_length= (uint) (tmp_max_key - param->max_key); if (param->is_ror_scan) { @@ -7158,12 +7300,13 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, key_tree->next_key_part->type == SEL_ARG::KEY_RANGE) { // const key as prefix if (min_key_length == max_key_length && - !memcmp(min_key,max_key, (uint) (tmp_max_key - max_key)) && + !memcmp(min_key, max_key, (uint) (tmp_max_key - max_key)) && !key_tree->min_flag && !key_tree->max_flag) { - tmp=check_quick_keys(param,idx,key_tree->next_key_part, - tmp_min_key, min_key_flag | key_tree->min_flag, - tmp_max_key, max_key_flag | key_tree->max_flag); + tmp=check_quick_keys(param,idx,key_tree->next_key_part, tmp_min_key, + min_key_flag | key_tree->min_flag, tmp_min_keypart, + tmp_max_key, max_key_flag | key_tree->max_flag, + tmp_max_keypart); goto end; // Ugly, but efficient } else @@ -7175,18 +7318,20 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, tmp_min_flag=key_tree->min_flag; tmp_max_flag=key_tree->max_flag; if (!tmp_min_flag) + tmp_min_keypart+= key_tree->next_key_part->store_min_key(param->key[idx], &tmp_min_key, &tmp_min_flag); if (!tmp_max_flag) + tmp_max_keypart+= key_tree->next_key_part->store_max_key(param->key[idx], &tmp_max_key, &tmp_max_flag); - min_key_length= (uint) (tmp_min_key- param->min_key); - max_key_length= (uint) (tmp_max_key- param->max_key); + min_key_length= (uint) (tmp_min_key - param->min_key); + max_key_length= (uint) (tmp_max_key - param->max_key); } else { - tmp_min_flag=min_key_flag | key_tree->min_flag; - tmp_max_flag=max_key_flag | key_tree->max_flag; + tmp_min_flag= min_key_flag | key_tree->min_flag; + tmp_max_flag= max_key_flag | key_tree->max_flag; } keynr=param->real_keynr[idx]; @@ -7194,9 +7339,8 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, if (!tmp_min_flag && ! tmp_max_flag && (uint) key_tree->part+1 == param->table->key_info[keynr].key_parts && (param->table->key_info[keynr].flags & (HA_NOSAME | HA_END_SPACE_KEY)) == - HA_NOSAME && - min_key_length == max_key_length && - !memcmp(param->min_key,param->max_key,min_key_length) && + HA_NOSAME && min_key_length == max_key_length && + !memcmp(param->min_key, param->max_key, min_key_length) && !param->first_null_comp) { tmp=1; // Max one record @@ -7216,7 +7360,7 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, first members of clustered primary key. */ if (!(min_key_length == max_key_length && - !memcmp(min_key,max_key, (uint) (tmp_max_key - max_key)) && + !memcmp(min_key, max_key, (uint) (tmp_max_key - max_key)) && !key_tree->min_flag && !key_tree->max_flag && is_key_scan_ror(param, keynr, key_tree->part + 1))) param->is_ror_scan= FALSE; @@ -7228,11 +7372,12 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, key_range min_range; min_range.key= (byte*) param->min_key; min_range.length= min_key_length; + min_range.keypart_map= make_keypart_map(tmp_min_keypart); /* In this case tmp_min_flag contains the handler-read-function */ min_range.flag= (ha_rkey_function) (tmp_min_flag ^ GEOM_FLAG); - tmp= param->table->file->records_in_range(keynr, &min_range, - (key_range*) 0); + tmp= param->table->file->records_in_range(keynr, + &min_range, (key_range*) 0); } else { @@ -7242,10 +7387,12 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, min_range.length= min_key_length; min_range.flag= (tmp_min_flag & NEAR_MIN ? HA_READ_AFTER_KEY : HA_READ_KEY_EXACT); + min_range.keypart_map= make_keypart_map(tmp_min_keypart); max_range.key= (byte*) param->max_key; max_range.length= max_key_length; max_range.flag= (tmp_max_flag & NEAR_MAX ? HA_READ_BEFORE_KEY : HA_READ_AFTER_KEY); + max_range.keypart_map= make_keypart_map(tmp_max_keypart); tmp=param->table->file->records_in_range(keynr, (min_key_length ? &min_range : (key_range*) 0), @@ -7266,8 +7413,9 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, This is not a ROR scan if the key is not Clustered Primary Key. */ param->is_ror_scan= FALSE; - tmp=check_quick_keys(param,idx,key_tree->right,min_key,min_key_flag, - max_key,max_key_flag); + tmp=check_quick_keys(param, idx, key_tree->right, + min_key, min_key_flag, min_keypart, + max_key, max_key_flag, max_keypart); if (tmp == HA_POS_ERROR) return tmp; records+=tmp; @@ -7414,6 +7562,8 @@ get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, { QUICK_RANGE *range; uint flag; + int min_part= key_tree->part-1, // # of keypart values in min_key buffer + max_part= key_tree->part-1; // # of keypart values in max_key buffer if (key_tree->left != &null_element) { @@ -7422,16 +7572,18 @@ get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, return 1; } char *tmp_min_key=min_key,*tmp_max_key=max_key; - key_tree->store(key[key_tree->part].store_length, - &tmp_min_key,min_key_flag,&tmp_max_key,max_key_flag); + min_part+= key_tree->store_min(key[key_tree->part].store_length, + &tmp_min_key,min_key_flag); + max_part+= key_tree->store_max(key[key_tree->part].store_length, + &tmp_max_key,max_key_flag); if (key_tree->next_key_part && key_tree->next_key_part->part == key_tree->part+1 && key_tree->next_key_part->type == SEL_ARG::KEY_RANGE) { // const key as prefix - if (!((tmp_min_key - min_key) != (tmp_max_key - max_key) || - memcmp(min_key,max_key, (uint) (tmp_max_key - max_key)) || - key_tree->min_flag || key_tree->max_flag)) + if ((tmp_min_key - min_key) == (tmp_max_key - max_key) && + memcmp(min_key, max_key, (uint)(tmp_max_key - max_key))==0 && + key_tree->min_flag==0 && key_tree->max_flag==0) { if (get_quick_keys(param,quick,key,key_tree->next_key_part, tmp_min_key, min_key_flag | key_tree->min_flag, @@ -7442,10 +7594,10 @@ get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, { uint tmp_min_flag=key_tree->min_flag,tmp_max_flag=key_tree->max_flag; if (!tmp_min_flag) - key_tree->next_key_part->store_min_key(key, &tmp_min_key, + min_part+= key_tree->next_key_part->store_min_key(key, &tmp_min_key, &tmp_min_flag); if (!tmp_max_flag) - key_tree->next_key_part->store_max_key(key, &tmp_max_key, + max_part+= key_tree->next_key_part->store_max_key(key, &tmp_max_key, &tmp_max_flag); flag=tmp_min_flag | tmp_max_flag; } @@ -7496,13 +7648,15 @@ get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, /* Get range for retrieving rows in QUICK_SELECT::get_next */ if (!(range= new QUICK_RANGE((const char *) param->min_key, (uint) (tmp_min_key - param->min_key), + min_part >=0 ? make_keypart_map(min_part) : 0, (const char *) param->max_key, (uint) (tmp_max_key - param->max_key), + max_part >=0 ? make_keypart_map(max_part) : 0, flag))) return 1; // out of memory - set_if_bigger(quick->max_used_key_length,range->min_length); - set_if_bigger(quick->max_used_key_length,range->max_length); + set_if_bigger(quick->max_used_key_length, range->min_length); + set_if_bigger(quick->max_used_key_length, range->max_length); set_if_bigger(quick->used_key_parts, (uint) key_tree->part+1); if (insert_dynamic(&quick->ranges, (gptr)&range)) return 1; @@ -7600,13 +7754,13 @@ bool QUICK_ROR_UNION_SELECT::is_keys_used(const MY_BITMAP *fields) thd Thread handle table Table to access ref ref[_or_null] scan parameters - records Estimate of number of records (needed only to construct + records Estimate of number of records (needed only to construct quick select) NOTES This allocates things in a new memory root, as this may be called many times during a query. - - RETURN + + RETURN Quick select that retrieves the same rows as passed ref scan NULL on error. */ @@ -7642,8 +7796,10 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, !(range= new(alloc) QUICK_RANGE())) goto err; // out of memory - range->min_key=range->max_key=(char*) ref->key_buff; - range->min_length=range->max_length=ref->key_length; + range->min_key= range->max_key= (char*) ref->key_buff; + range->min_length= range->max_length= ref->key_length; + range->min_keypart_map= range->max_keypart_map= + make_prev_keypart_map(ref->key_parts); range->flag= ((ref->key_length == key_info->key_length && (key_info->flags & (HA_NOSAME | HA_END_SPACE_KEY)) == HA_NOSAME) ? EQ_RANGE : 0); @@ -7656,7 +7812,7 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, { key_part->part=part; key_part->field= key_info->key_part[part].field; - key_part->length= key_info->key_part[part].length; + key_part->length= key_info->key_part[part].length; key_part->store_length= key_info->key_part[part].store_length; key_part->null_bit= key_info->key_part[part].null_bit; key_part->flag= (uint8) key_info->key_part[part].key_part_flag; @@ -7675,11 +7831,11 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, QUICK_RANGE *null_range; *ref->null_ref_key= 1; // Set null byte then create a range - if (!(null_range= new (alloc) QUICK_RANGE((char*)ref->key_buff, - ref->key_length, - (char*)ref->key_buff, - ref->key_length, - EQ_RANGE))) + if (!(null_range= new (alloc) + QUICK_RANGE((char*)ref->key_buff, ref->key_length, + make_prev_keypart_map(ref->key_parts), + (char*)ref->key_buff, ref->key_length, + make_prev_keypart_map(ref->key_parts), EQ_RANGE))) goto err; *ref->null_ref_key= 0; // Clear null byte if (insert_dynamic(&quick->ranges,(gptr)&null_range)) @@ -8131,6 +8287,7 @@ int QUICK_RANGE_SELECT::get_next() start_key->flag= ((last_range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY : (last_range->flag & EQ_RANGE) ? HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT); + start_key->keypart_map= last_range->min_keypart_map; end_key->key= (const byte*) last_range->max_key; end_key->length= last_range->max_length; /* @@ -8139,6 +8296,7 @@ int QUICK_RANGE_SELECT::get_next() */ end_key->flag= (last_range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY : HA_READ_AFTER_KEY); + end_key->keypart_map= last_range->max_keypart_map; mrange_slot->range_flag= last_range->flag; } @@ -8188,7 +8346,9 @@ end: other if some error occurred */ -int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, byte *cur_prefix) +int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, + key_part_map keypart_map, + byte *cur_prefix) { DBUG_ENTER("QUICK_RANGE_SELECT::get_next_prefix"); @@ -8200,8 +8360,7 @@ int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, byte *cur_prefix) { /* Read the next record in the same range with prefix after cur_prefix. */ DBUG_ASSERT(cur_prefix != 0); - result= file->index_read(record, cur_prefix, prefix_length, - HA_READ_AFTER_KEY); + result= file->index_read(record, cur_prefix, keypart_map, HA_READ_AFTER_KEY); if (result || (file->compare_key(file->end_range) <= 0)) DBUG_RETURN(result); } @@ -8217,11 +8376,13 @@ int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, byte *cur_prefix) start_key.key= (const byte*) last_range->min_key; start_key.length= min(last_range->min_length, prefix_length); + start_key.keypart_map= last_range->min_keypart_map & keypart_map; start_key.flag= ((last_range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY : (last_range->flag & EQ_RANGE) ? HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT); end_key.key= (const byte*) last_range->max_key; end_key.length= min(last_range->max_length, prefix_length); + end_key.keypart_map= last_range->max_keypart_map & keypart_map; /* We use READ_AFTER_KEY here because if we are reading on a key prefix we want to find all keys with this prefix @@ -8229,8 +8390,8 @@ int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, byte *cur_prefix) end_key.flag= (last_range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY : HA_READ_AFTER_KEY); - result= file->read_range_first(last_range->min_length ? &start_key : 0, - last_range->max_length ? &end_key : 0, + result= file->read_range_first(last_range->min_keypart_map ? &start_key : 0, + last_range->max_keypart_map ? &end_key : 0, test(last_range->flag & EQ_RANGE), sorted); if (last_range->flag == (UNIQUE_RANGE | EQ_RANGE)) @@ -8270,9 +8431,8 @@ int QUICK_RANGE_SELECT_GEOM::get_next() } last_range= *(cur_range++); - result= file->index_read(record, - (byte*) last_range->min_key, - last_range->min_length, + result= file->index_read(record, (byte*) last_range->min_key, + last_range->min_keypart_map, (ha_rkey_function)(last_range->flag ^ GEOM_FLAG)); if (result != HA_ERR_KEY_NOT_FOUND && result != HA_ERR_END_OF_FILE) DBUG_RETURN(result); @@ -8404,15 +8564,15 @@ int QUICK_SELECT_DESC::get_next() if (last_range->flag & EQ_RANGE) { - result= file->index_read(record, (byte*) last_range->max_key, - last_range->max_length, HA_READ_KEY_EXACT); + result = file->index_read(record, (byte*) last_range->max_key, + last_range->max_keypart_map, HA_READ_KEY_EXACT); } else { DBUG_ASSERT(last_range->flag & NEAR_MAX || range_reads_after_key(last_range)); result=file->index_read(record, (byte*) last_range->max_key, - last_range->max_length, + last_range->max_keypart_map, ((last_range->flag & NEAR_MAX) ? HA_READ_BEFORE_KEY : HA_READ_PREFIX_LAST_OR_PREV)); @@ -8732,8 +8892,7 @@ void QUICK_ROR_UNION_SELECT::add_keys_and_lengths(String *key_names, static inline uint get_field_keypart(KEY *index, Field *field); static inline SEL_ARG * get_index_range_tree(uint index, SEL_TREE* range_tree, PARAM *param, uint *param_idx); -static bool -get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, +static bool get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, KEY_PART_INFO *first_non_group_part, KEY_PART_INFO *min_max_arg_part, KEY_PART_INFO *last_part, THD *thd, @@ -8909,7 +9068,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree) if ((join->tables != 1) || /* The query must reference one table. */ ((!join->group_list) && /* Neither GROUP BY nor a DISTINCT query. */ (!join->select_distinct)) || - (thd->lex->select_lex.olap == ROLLUP_TYPE)) /* Check (B3) for ROLLUP */ + (join->select_lex->olap == ROLLUP_TYPE)) /* Check (B3) for ROLLUP */ DBUG_RETURN(NULL); if (table->s->keys == 0) /* There are no indexes to use. */ DBUG_RETURN(NULL); @@ -8933,7 +9092,8 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree) else DBUG_RETURN(NULL); - Item *expr= min_max_item->args[0]; /* The argument of MIN/MAX. */ + /* The argument of MIN/MAX. */ + Item *expr= min_max_item->args[0]->real_item(); if (expr->type() == Item::FIELD_ITEM) /* Is it an attribute? */ { if (! min_max_arg_item) @@ -8997,7 +9157,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree) cur_index_info++, cur_index++) { /* Check (B1) - if current index is covering. */ - if (!table->used_keys.is_set(cur_index)) + if (!table->covering_keys.is_set(cur_index)) goto next_index; /* @@ -9131,7 +9291,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree) NULL; first_non_infix_part= min_max_arg_part ? (min_max_arg_part < last_part) ? - min_max_arg_part + 1 : + min_max_arg_part : NULL : NULL; if (first_non_group_part && @@ -9188,7 +9348,9 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree) */ if (first_non_infix_part) { - for (cur_part= first_non_infix_part; cur_part != last_part; cur_part++) + cur_part= first_non_infix_part + + (min_max_arg_part && (min_max_arg_part < last_part)); + for (; cur_part != last_part; cur_part++) { if (bitmap_is_set(table->read_set, cur_part->field->field_index)) goto next_index; @@ -9302,6 +9464,7 @@ check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item, DBUG_ENTER("check_group_min_max_predicates"); DBUG_ASSERT(cond && min_max_arg_item); + cond= cond->real_item(); Item::Type cond_type= cond->type(); if (cond_type == Item::COND_ITEM) /* 'AND' or 'OR' */ { @@ -9339,7 +9502,7 @@ check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item, DBUG_PRINT("info", ("Analyzing: %s", pred->func_name())); for (uint arg_idx= 0; arg_idx < pred->argument_count (); arg_idx++) { - cur_arg= arguments[arg_idx]; + cur_arg= arguments[arg_idx]->real_item(); DBUG_PRINT("info", ("cur_arg: %s", cur_arg->full_name())); if (cur_arg->type() == Item::FIELD_ITEM) { @@ -9734,7 +9897,7 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, RETURN New QUICK_GROUP_MIN_MAX_SELECT object if successfully created, - NULL o/w. + NULL otherwise. */ QUICK_SELECT_I * @@ -9747,10 +9910,10 @@ TRP_GROUP_MIN_MAX::make_quick(PARAM *param, bool retrieve_full_rows, quick= new QUICK_GROUP_MIN_MAX_SELECT(param->table, param->thd->lex->current_select->join, have_min, have_max, min_max_arg_part, - group_prefix_len, used_key_parts, - index_info, index, read_cost, records, - key_infix_len, key_infix, - parent_alloc); + group_prefix_len, group_key_parts, + used_key_parts, index_info, index, + read_cost, records, key_infix_len, + key_infix, parent_alloc); if (!quick) DBUG_RETURN(NULL); @@ -9839,7 +10002,7 @@ QUICK_GROUP_MIN_MAX_SELECT:: QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join_arg, bool have_min_arg, bool have_max_arg, KEY_PART_INFO *min_max_arg_part_arg, - uint group_prefix_len_arg, + uint group_prefix_len_arg, uint group_key_parts_arg, uint used_key_parts_arg, KEY *index_info_arg, uint use_index, double read_cost_arg, ha_rows records_arg, uint key_infix_len_arg, @@ -9849,7 +10012,7 @@ QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join_arg, bool have_min_arg, have_max(have_max_arg), seen_first_key(FALSE), min_max_arg_part(min_max_arg_part_arg), key_infix(key_infix_arg), key_infix_len(key_infix_len_arg), min_functions_it(NULL), - max_functions_it(NULL) + max_functions_it(NULL), group_key_parts(group_key_parts_arg) { head= table; file= head->file; @@ -9859,6 +10022,7 @@ QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join_arg, bool have_min_arg, read_time= read_cost_arg; records= records_arg; used_key_parts= used_key_parts_arg; + real_key_parts= used_key_parts_arg; real_prefix_len= group_prefix_len + key_infix_len; group_prefix= NULL; min_max_arg_len= min_max_arg_part ? min_max_arg_part->store_length : 0; @@ -10025,7 +10189,9 @@ bool QUICK_GROUP_MIN_MAX_SELECT::add_range(SEL_ARG *sel_range) range_flag|= EQ_RANGE; /* equality condition */ } range= new QUICK_RANGE(sel_range->min_value, min_max_arg_len, + make_keypart_map(sel_range->part), sel_range->max_value, min_max_arg_len, + make_keypart_map(sel_range->part), range_flag); if (!range) return TRUE; @@ -10158,14 +10324,13 @@ int QUICK_GROUP_MIN_MAX_SELECT::reset(void) DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::reset"); file->extra(HA_EXTRA_KEYREAD); /* We need only the key attributes */ - result= file->ha_index_init(index, 1); - result= file->index_last(record); - if (result == HA_ERR_END_OF_FILE) - DBUG_RETURN(0); - if (result) + if ((result= file->ha_index_init(index,1))) DBUG_RETURN(result); if (quick_prefix_select && quick_prefix_select->reset()) DBUG_RETURN(1); + result= file->index_last(record); + if (result == HA_ERR_END_OF_FILE) + DBUG_RETURN(0); /* Save the prefix of the last group. */ key_copy(last_prefix, record, index_info, group_prefix_len); @@ -10214,7 +10379,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next() #else int result; #endif - int is_last_prefix; + int is_last_prefix= 0; DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::get_next"); @@ -10229,13 +10394,18 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next() Check if this is the last group prefix. Notice that at this point this->record contains the current prefix in record format. */ - is_last_prefix= key_cmp(index_info->key_part, last_prefix, - group_prefix_len); - DBUG_ASSERT(is_last_prefix <= 0); - if (result == HA_ERR_KEY_NOT_FOUND) - continue; - if (result) + if (!result) + { + is_last_prefix= key_cmp(index_info->key_part, last_prefix, + group_prefix_len); + DBUG_ASSERT(is_last_prefix <= 0); + } + else + { + if (result == HA_ERR_KEY_NOT_FOUND) + continue; break; + } if (have_min) { @@ -10260,7 +10430,8 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next() first sub-group with the extended prefix. */ if (!have_min && !have_max && key_infix_len > 0) - result= file->index_read(record, group_prefix, real_prefix_len, + result= file->index_read(record, group_prefix, + make_prev_keypart_map(real_key_parts), HA_READ_KEY_EXACT); result= have_min ? min_res : have_max ? max_res : result; @@ -10323,7 +10494,8 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min() /* Apply the constant equality conditions to the non-group select fields */ if (key_infix_len > 0) { - if ((result= file->index_read(record, group_prefix, real_prefix_len, + if ((result= file->index_read(record, group_prefix, + make_prev_keypart_map(real_key_parts), HA_READ_KEY_EXACT))) DBUG_RETURN(result); } @@ -10340,7 +10512,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min() /* Find the first subsequent record without NULL in the MIN/MAX field. */ key_copy(tmp_record, record, index_info, 0); result= file->index_read(record, tmp_record, - real_prefix_len + min_max_arg_len, + make_keypart_map(real_key_parts), HA_READ_AFTER_KEY); /* Check if the new record belongs to the current group by comparing its @@ -10396,7 +10568,8 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_max() if (min_max_ranges.elements > 0) result= next_max_in_range(); else - result= file->index_read(record, group_prefix, real_prefix_len, + result= file->index_read(record, group_prefix, + make_prev_keypart_map(real_key_parts), HA_READ_PREFIX_LAST); DBUG_RETURN(result); } @@ -10432,7 +10605,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_prefix() { byte *cur_prefix= seen_first_key ? group_prefix : NULL; if ((result= quick_prefix_select->get_next_prefix(group_prefix_len, - cur_prefix))) + make_prev_keypart_map(group_key_parts), cur_prefix))) DBUG_RETURN(result); seen_first_key= TRUE; } @@ -10448,7 +10621,8 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_prefix() else { /* Load the first key in this group into record. */ - result= file->index_read(record, group_prefix, group_prefix_len, + result= file->index_read(record, group_prefix, + make_prev_keypart_map(group_key_parts), HA_READ_AFTER_KEY); if (result) DBUG_RETURN(result); @@ -10490,7 +10664,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_prefix() int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range() { ha_rkey_function find_flag; - uint search_prefix_len; + key_part_map keypart_map; QUICK_RANGE *cur_range; bool found_null= FALSE; int result= HA_ERR_KEY_NOT_FOUND; @@ -10512,22 +10686,21 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range() if (cur_range->flag & NO_MIN_RANGE) { + keypart_map= make_prev_keypart_map(real_key_parts); find_flag= HA_READ_KEY_EXACT; - search_prefix_len= real_prefix_len; } else { /* Extend the search key with the lower boundary for this range. */ memcpy(group_prefix + real_prefix_len, cur_range->min_key, cur_range->min_length); - search_prefix_len= real_prefix_len + min_max_arg_len; + keypart_map= make_keypart_map(real_key_parts); find_flag= (cur_range->flag & (EQ_RANGE | NULL_RANGE)) ? HA_READ_KEY_EXACT : (cur_range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY : HA_READ_KEY_OR_NEXT; } - result= file->index_read(record, group_prefix, search_prefix_len, - find_flag); + result= file->index_read(record, group_prefix, keypart_map, find_flag); if (result) { if ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) && @@ -10624,7 +10797,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range() int QUICK_GROUP_MIN_MAX_SELECT::next_max_in_range() { ha_rkey_function find_flag; - uint search_prefix_len; + key_part_map keypart_map; QUICK_RANGE *cur_range; int result; @@ -10646,22 +10819,21 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_max_in_range() if (cur_range->flag & NO_MAX_RANGE) { + keypart_map= make_prev_keypart_map(real_key_parts); find_flag= HA_READ_PREFIX_LAST; - search_prefix_len= real_prefix_len; } else { /* Extend the search key with the upper boundary for this range. */ memcpy(group_prefix + real_prefix_len, cur_range->max_key, cur_range->max_length); - search_prefix_len= real_prefix_len + min_max_arg_len; + keypart_map= make_keypart_map(real_key_parts); find_flag= (cur_range->flag & EQ_RANGE) ? HA_READ_KEY_EXACT : (cur_range->flag & NEAR_MAX) ? HA_READ_BEFORE_KEY : HA_READ_PREFIX_LAST_OR_PREV; } - result= file->index_read(record, group_prefix, search_prefix_len, - find_flag); + result= file->index_read(record, group_prefix, keypart_map, find_flag); if (result) { diff --git a/sql/opt_range.h b/sql/opt_range.h index d82e1dc459e..1ad9567cddd 100644 --- a/sql/opt_range.h +++ b/sql/opt_range.h @@ -37,17 +37,23 @@ class QUICK_RANGE :public Sql_alloc { public: char *min_key,*max_key; uint16 min_length,max_length,flag; + key_part_map min_keypart_map, // bitmap of used keyparts in min_key + max_keypart_map; // bitmap of used keyparts in max_key #ifdef HAVE_purify uint16 dummy; /* Avoid warnings on 'flag' */ #endif QUICK_RANGE(); /* Full range */ - QUICK_RANGE(const char *min_key_arg,uint min_length_arg, - const char *max_key_arg,uint max_length_arg, + QUICK_RANGE(const char *min_key_arg, uint min_length_arg, + key_part_map min_keypart_map_arg, + const char *max_key_arg, uint max_length_arg, + key_part_map max_keypart_map_arg, uint flag_arg) : min_key((char*) sql_memdup(min_key_arg,min_length_arg+1)), max_key((char*) sql_memdup(max_key_arg,max_length_arg+1)), min_length((uint16) min_length_arg), max_length((uint16) max_length_arg), + min_keypart_map(min_keypart_map_arg), + max_keypart_map(max_keypart_map_arg), flag((uint16) flag_arg) { #ifdef HAVE_purify @@ -60,11 +66,11 @@ class QUICK_RANGE :public Sql_alloc { /* Quick select interface. This class is a parent for all QUICK_*_SELECT and FT_SELECT classes. - + The usage scenario is as follows: 1. Create quick select quick= new QUICK_XXX_SELECT(...); - + 2. Perform lightweight initialization. This can be done in 2 ways: 2.a: Regular initialization if (quick->init()) @@ -75,29 +81,29 @@ class QUICK_RANGE :public Sql_alloc { 2.b: Special initialization for quick selects merged by QUICK_ROR_*_SELECT if (quick->init_ror_merged_scan()) delete quick; - + 3. Perform zero, one, or more scans. while (...) { // initialize quick select for scan. This may allocate - // buffers and/or prefetch rows. + // buffers and/or prefetch rows. if (quick->reset()) { //the only valid action after failed reset() call is delete delete quick; //abort query } - + // perform the scan do { res= quick->get_next(); } while (res && ...) } - + 4. Delete the select: delete quick; - + */ class QUICK_SELECT_I @@ -123,6 +129,8 @@ public: Max. number of (first) key parts this quick select uses for retrieval. eg. for "(key1p1=c1 AND key1p2=c2) OR key1p1=c2" used_key_parts == 2. Applicable if index!= MAX_KEY. + + For QUICK_GROUP_MIN_MAX_SELECT it includes MIN/MAX argument keyparts. */ uint used_key_parts; @@ -318,7 +326,8 @@ public: int reset(void); int get_next(); void range_end(); - int get_next_prefix(uint prefix_length, byte *cur_prefix); + int get_next_prefix(uint prefix_length, key_part_map keypart_map, + byte *cur_prefix); bool reverse_sorted() { return 0; } bool unique_key_range(); int init_ror_merged_scan(bool reuse_handler); @@ -605,6 +614,7 @@ private: byte *tmp_record; /* Temporary storage for next_min(), next_max(). */ byte *group_prefix; /* Key prefix consisting of the GROUP fields. */ uint group_prefix_len; /* Length of the group prefix. */ + uint group_key_parts; /* A number of keyparts in the group prefix */ byte *last_prefix; /* Prefix of the last group for detecting EOF. */ bool have_min; /* Specify whether we are computing */ bool have_max; /* a MIN, a MAX, or both. */ @@ -616,6 +626,7 @@ private: uint key_infix_len; DYNAMIC_ARRAY min_max_ranges; /* Array of range ptrs for the MIN/MAX field. */ uint real_prefix_len; /* Length of key prefix extended with key_infix. */ + uint real_key_parts; /* A number of keyparts in the above value. */ List<Item_sum> *min_functions; List<Item_sum> *max_functions; List_iterator<Item_sum> *min_functions_it; @@ -638,10 +649,11 @@ private: public: QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join, bool have_min, bool have_max, KEY_PART_INFO *min_max_arg_part, - uint group_prefix_len, uint used_key_parts, - KEY *index_info, uint use_index, double read_cost, - ha_rows records, uint key_infix_len, - byte *key_infix, MEM_ROOT *parent_alloc); + uint group_prefix_len, uint group_key_parts, + uint used_key_parts, KEY *index_info, uint + use_index, double read_cost, ha_rows records, uint + key_infix_len, byte *key_infix, MEM_ROOT + *parent_alloc); ~QUICK_GROUP_MIN_MAX_SELECT(); bool add_range(SEL_ARG *sel_range); void update_key_stat(); diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index 5bd5ec4b42d..f9a06f3fb6e 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -251,7 +251,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) error= table->file->index_first(table->record[0]); else error= table->file->index_read(table->record[0],key_buff, - ref.key_length, + make_prev_keypart_map(ref.key_parts), range_fl & NEAR_MIN ? HA_READ_AFTER_KEY : HA_READ_KEY_OR_NEXT); @@ -338,11 +338,11 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) error= table->file->index_last(table->record[0]); else error= table->file->index_read(table->record[0], key_buff, - ref.key_length, + make_prev_keypart_map(ref.key_parts), range_fl & NEAR_MAX ? HA_READ_BEFORE_KEY : HA_READ_PREFIX_LAST_OR_PREV); - if (!error && reckey_in_range(1, &ref, item_field->field, + if (!error && reckey_in_range(1, &ref, item_field->field, conds, range_fl, prefix_len)) error= HA_ERR_KEY_NOT_FOUND; if (table->key_read) @@ -605,15 +605,13 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, /* Check if field is part of the tested partial key */ byte *key_ptr= ref->key_buff; KEY_PART_INFO *part; - for (part= keyinfo->key_part; - ; - key_ptr+= part++->store_length) + for (part= keyinfo->key_part; ; key_ptr+= part++->store_length) { if (part > field_part) return 0; // Field is beyond the tested parts if (part->field->eq(((Item_field*) args[0])->field)) - break; // Found a part od the key for the field + break; // Found a part of the key for the field } bool is_field_part= part == field_part; @@ -625,8 +623,11 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, { uint length= (key_ptr-ref->key_buff)+part->store_length; if (ref->key_length < length) + { /* Ultimately ref->key_length will contain the length of the search key */ ref->key_length= length; + ref->key_parts= (part - keyinfo->key_part) + 1; + } if (!*prefix_len && part+1 == field_part) *prefix_len= length; if (is_field_part && eq_type) @@ -773,6 +774,7 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, { ref->key= idx; ref->key_length= 0; + ref->key_parts= 0; key_part_map key_part_used= 0; *range_fl= NO_MIN_RANGE | NO_MAX_RANGE; if (matching_cond(max_fl, ref, keyinfo, part, cond, @@ -788,6 +790,8 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, */ ref->key_buff[ref->key_length]= 1; ref->key_length+= part->store_length; + ref->key_parts++; + DBUG_ASSERT(ref->key_parts == jdx+1); *range_fl&= ~NO_MIN_RANGE; *range_fl|= NEAR_MIN; // > NULL } diff --git a/sql/parse_file.cc b/sql/parse_file.cc index c36e16e0553..f5b62e3afe2 100644 --- a/sql/parse_file.cc +++ b/sql/parse_file.cc @@ -228,7 +228,7 @@ sql_create_definition_file(const LEX_STRING *dir, const LEX_STRING *file_name, if (dir) { - fn_format(path, file_name->str, dir->str, 0, MY_UNPACK_FILENAME); + fn_format(path, file_name->str, dir->str, "", MY_UNPACK_FILENAME); path_end= strlen(path); } else diff --git a/sql/partition_info.h b/sql/partition_info.h index 8bcc769054f..6c21002c184 100644 --- a/sql/partition_info.h +++ b/sql/partition_info.h @@ -156,7 +156,7 @@ public: char *part_func_string; char *subpart_func_string; - uchar *part_state; + const char *part_state; partition_element *curr_part_elem; partition_element *current_partition; diff --git a/sql/protocol.cc b/sql/protocol.cc index 05e98c68e4e..5aa3b7b5055 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -35,7 +35,7 @@ static void write_eof_packet(THD *thd, NET *net); #ifndef EMBEDDED_LIBRARY bool Protocol::net_store_data(const char *from, uint length) #else -bool Protocol_prep::net_store_data(const char *from, uint length) +bool Protocol_binary::net_store_data(const char *from, uint length) #endif { ulong packet_length=packet->length(); @@ -557,7 +557,7 @@ bool Protocol::send_fields(List<Item> *list, uint flags) Item *item; char buff[80]; String tmp((char*) buff,sizeof(buff),&my_charset_bin); - Protocol_simple prot(thd); + Protocol_text prot(thd); String *local_packet= prot.storage_packet(); CHARSET_INFO *thd_charset= thd->variables.character_set_results; DBUG_ENTER("send_fields"); @@ -760,7 +760,7 @@ bool Protocol::store(I_List<i_string>* str_list) ****************************************************************************/ #ifndef EMBEDDED_LIBRARY -void Protocol_simple::prepare_for_resend() +void Protocol_text::prepare_for_resend() { packet->length(0); #ifndef DBUG_OFF @@ -768,7 +768,7 @@ void Protocol_simple::prepare_for_resend() #endif } -bool Protocol_simple::store_null() +bool Protocol_text::store_null() { #ifndef DBUG_OFF field_pos++; @@ -801,7 +801,7 @@ bool Protocol::store_string_aux(const char *from, uint length, } -bool Protocol_simple::store(const char *from, uint length, +bool Protocol_text::store(const char *from, uint length, CHARSET_INFO *fromcs, CHARSET_INFO *tocs) { #ifndef DBUG_OFF @@ -817,8 +817,8 @@ bool Protocol_simple::store(const char *from, uint length, } -bool Protocol_simple::store(const char *from, uint length, - CHARSET_INFO *fromcs) +bool Protocol_text::store(const char *from, uint length, + CHARSET_INFO *fromcs) { CHARSET_INFO *tocs= this->thd->variables.character_set_results; #ifndef DBUG_OFF @@ -834,7 +834,7 @@ bool Protocol_simple::store(const char *from, uint length, } -bool Protocol_simple::store_tiny(longlong from) +bool Protocol_text::store_tiny(longlong from) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_TINY); @@ -846,7 +846,7 @@ bool Protocol_simple::store_tiny(longlong from) } -bool Protocol_simple::store_short(longlong from) +bool Protocol_text::store_short(longlong from) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -860,7 +860,7 @@ bool Protocol_simple::store_short(longlong from) } -bool Protocol_simple::store_long(longlong from) +bool Protocol_text::store_long(longlong from) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -874,7 +874,7 @@ bool Protocol_simple::store_long(longlong from) } -bool Protocol_simple::store_longlong(longlong from, bool unsigned_flag) +bool Protocol_text::store_longlong(longlong from, bool unsigned_flag) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -889,7 +889,7 @@ bool Protocol_simple::store_longlong(longlong from, bool unsigned_flag) } -bool Protocol_simple::store_decimal(const my_decimal *d) +bool Protocol_text::store_decimal(const my_decimal *d) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -903,7 +903,7 @@ bool Protocol_simple::store_decimal(const my_decimal *d) } -bool Protocol_simple::store(float from, uint32 decimals, String *buffer) +bool Protocol_text::store(float from, uint32 decimals, String *buffer) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -915,7 +915,7 @@ bool Protocol_simple::store(float from, uint32 decimals, String *buffer) } -bool Protocol_simple::store(double from, uint32 decimals, String *buffer) +bool Protocol_text::store(double from, uint32 decimals, String *buffer) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -927,7 +927,7 @@ bool Protocol_simple::store(double from, uint32 decimals, String *buffer) } -bool Protocol_simple::store(Field *field) +bool Protocol_text::store(Field *field) { if (field->is_null()) return store_null(); @@ -961,7 +961,7 @@ bool Protocol_simple::store(Field *field) */ -bool Protocol_simple::store(TIME *tm) +bool Protocol_text::store(TIME *tm) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -984,7 +984,7 @@ bool Protocol_simple::store(TIME *tm) } -bool Protocol_simple::store_date(TIME *tm) +bool Protocol_text::store_date(TIME *tm) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -1003,7 +1003,7 @@ bool Protocol_simple::store_date(TIME *tm) we support 0-6 decimals for time. */ -bool Protocol_simple::store_time(TIME *tm) +bool Protocol_text::store_time(TIME *tm) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -1043,7 +1043,7 @@ bool Protocol_simple::store_time(TIME *tm) [..]..[[length]data] data ****************************************************************************/ -bool Protocol_prep::prepare_for_send(List<Item> *item_list) +bool Protocol_binary::prepare_for_send(List<Item> *item_list) { Protocol::prepare_for_send(item_list); bit_fields= (field_count+9)/8; @@ -1054,7 +1054,7 @@ bool Protocol_prep::prepare_for_send(List<Item> *item_list) } -void Protocol_prep::prepare_for_resend() +void Protocol_binary::prepare_for_resend() { packet->length(bit_fields+1); bzero((char*) packet->ptr(), 1+bit_fields); @@ -1062,21 +1062,22 @@ void Protocol_prep::prepare_for_resend() } -bool Protocol_prep::store(const char *from, uint length, CHARSET_INFO *fromcs) +bool Protocol_binary::store(const char *from, uint length, + CHARSET_INFO *fromcs) { CHARSET_INFO *tocs= thd->variables.character_set_results; field_pos++; return store_string_aux(from, length, fromcs, tocs); } -bool Protocol_prep::store(const char *from,uint length, - CHARSET_INFO *fromcs, CHARSET_INFO *tocs) +bool Protocol_binary::store(const char *from,uint length, + CHARSET_INFO *fromcs, CHARSET_INFO *tocs) { field_pos++; return store_string_aux(from, length, fromcs, tocs); } -bool Protocol_prep::store_null() +bool Protocol_binary::store_null() { uint offset= (field_pos+2)/8+1, bit= (1 << ((field_pos+2) & 7)); /* Room for this as it's allocated in prepare_for_send */ @@ -1087,7 +1088,7 @@ bool Protocol_prep::store_null() } -bool Protocol_prep::store_tiny(longlong from) +bool Protocol_binary::store_tiny(longlong from) { char buff[1]; field_pos++; @@ -1096,7 +1097,7 @@ bool Protocol_prep::store_tiny(longlong from) } -bool Protocol_prep::store_short(longlong from) +bool Protocol_binary::store_short(longlong from) { field_pos++; char *to= packet->prep_append(2, PACKET_BUFFER_EXTRA_ALLOC); @@ -1107,7 +1108,7 @@ bool Protocol_prep::store_short(longlong from) } -bool Protocol_prep::store_long(longlong from) +bool Protocol_binary::store_long(longlong from) { field_pos++; char *to= packet->prep_append(4, PACKET_BUFFER_EXTRA_ALLOC); @@ -1118,7 +1119,7 @@ bool Protocol_prep::store_long(longlong from) } -bool Protocol_prep::store_longlong(longlong from, bool unsigned_flag) +bool Protocol_binary::store_longlong(longlong from, bool unsigned_flag) { field_pos++; char *to= packet->prep_append(8, PACKET_BUFFER_EXTRA_ALLOC); @@ -1128,7 +1129,7 @@ bool Protocol_prep::store_longlong(longlong from, bool unsigned_flag) return 0; } -bool Protocol_prep::store_decimal(const my_decimal *d) +bool Protocol_binary::store_decimal(const my_decimal *d) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -1141,7 +1142,7 @@ bool Protocol_prep::store_decimal(const my_decimal *d) return store(str.ptr(), str.length(), str.charset()); } -bool Protocol_prep::store(float from, uint32 decimals, String *buffer) +bool Protocol_binary::store(float from, uint32 decimals, String *buffer) { field_pos++; char *to= packet->prep_append(4, PACKET_BUFFER_EXTRA_ALLOC); @@ -1152,7 +1153,7 @@ bool Protocol_prep::store(float from, uint32 decimals, String *buffer) } -bool Protocol_prep::store(double from, uint32 decimals, String *buffer) +bool Protocol_binary::store(double from, uint32 decimals, String *buffer) { field_pos++; char *to= packet->prep_append(8, PACKET_BUFFER_EXTRA_ALLOC); @@ -1163,7 +1164,7 @@ bool Protocol_prep::store(double from, uint32 decimals, String *buffer) } -bool Protocol_prep::store(Field *field) +bool Protocol_binary::store(Field *field) { /* We should not increment field_pos here as send_binary() will call another @@ -1175,7 +1176,7 @@ bool Protocol_prep::store(Field *field) } -bool Protocol_prep::store(TIME *tm) +bool Protocol_binary::store(TIME *tm) { char buff[12],*pos; uint length; @@ -1201,15 +1202,15 @@ bool Protocol_prep::store(TIME *tm) return packet->append(buff, length+1, PACKET_BUFFER_EXTRA_ALLOC); } -bool Protocol_prep::store_date(TIME *tm) +bool Protocol_binary::store_date(TIME *tm) { tm->hour= tm->minute= tm->second=0; tm->second_part= 0; - return Protocol_prep::store(tm); + return Protocol_binary::store(tm); } -bool Protocol_prep::store_time(TIME *tm) +bool Protocol_binary::store_time(TIME *tm) { char buff[13], *pos; uint length; diff --git a/sql/protocol.h b/sql/protocol.h index 6c4c7414ea5..da49cf769ae 100644 --- a/sql/protocol.h +++ b/sql/protocol.h @@ -98,16 +98,25 @@ public: #else void remove_last_row() {} #endif + enum enum_protocol_type + { + PROTOCOL_TEXT= 0, PROTOCOL_BINARY= 1 + /* + before adding here or change the values, consider that it is cast to a + bit in sql_cache.cc. + */ + }; + virtual enum enum_protocol_type type()= 0; }; /* Class used for the old (MySQL 4.0 protocol) */ -class Protocol_simple :public Protocol +class Protocol_text :public Protocol { public: - Protocol_simple() {} - Protocol_simple(THD *thd_arg) :Protocol(thd_arg) {} + Protocol_text() {} + Protocol_text(THD *thd_arg) :Protocol(thd_arg) {} virtual void prepare_for_resend(); virtual bool store_null(); virtual bool store_tiny(longlong from); @@ -127,16 +136,17 @@ public: #ifdef EMBEDDED_LIBRARY void remove_last_row(); #endif + virtual enum enum_protocol_type type() { return PROTOCOL_TEXT; }; }; -class Protocol_prep :public Protocol +class Protocol_binary :public Protocol { private: uint bit_fields; public: - Protocol_prep() {} - Protocol_prep(THD *thd_arg) :Protocol(thd_arg) {} + Protocol_binary() {} + Protocol_binary(THD *thd_arg) :Protocol(thd_arg) {} virtual bool prepare_for_send(List<Item> *item_list); virtual void prepare_for_resend(); #ifdef EMBEDDED_LIBRARY @@ -158,6 +168,7 @@ public: virtual bool store(float nr, uint32 decimals, String *buffer); virtual bool store(double from, uint32 decimals, String *buffer); virtual bool store(Field *field); + virtual enum enum_protocol_type type() { return PROTOCOL_BINARY; }; }; void send_warning(THD *thd, uint sql_errno, const char *err=0); diff --git a/sql/records.cc b/sql/records.cc index 0923ab1d75e..0fb9f4f9650 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -150,7 +150,8 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, info->file= table->file; info->forms= &info->table; /* Only one table */ - if (table->s->tmp_table == TMP_TABLE && !table->sort.addon_field) + if (table->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE && + !table->sort.addon_field) VOID(table->file->extra(HA_EXTRA_MMAP)); if (table->sort.addon_field) diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc index 16b00cab516..934a6821514 100644 --- a/sql/repl_failsafe.cc +++ b/sql/repl_failsafe.cc @@ -73,23 +73,19 @@ static int init_failsafe_rpl_thread(THD* thd) thd->net.read_timeout = slave_net_timeout; thd->max_client_packet_length=thd->net.max_packet; pthread_mutex_lock(&LOCK_thread_count); - thd->thread_id = thread_id++; + thd->thread_id= thd->variables.pseudo_thread_id= thread_id++; pthread_mutex_unlock(&LOCK_thread_count); if (init_thr_lock() || thd->store_globals()) { + /* purecov: begin inspected */ close_connection(thd, ER_OUT_OF_RESOURCES, 1); // is this needed? statistic_increment(aborted_connects,&LOCK_status); - end_thread(thd,0); + one_thread_per_connection_end(thd,0); DBUG_RETURN(-1); + /* purecov: end */ } -#if !defined(__WIN__) && !defined(__NETWARE__) - sigset_t set; - VOID(sigemptyset(&set)); // Get mask in use - VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals)); -#endif - thd->mem_root->free= thd->mem_root->used= 0; if (thd->variables.max_join_size == HA_POS_ERROR) thd->options|= OPTION_BIG_SELECTS; diff --git a/sql/rpl_constants.h b/sql/rpl_constants.h new file mode 100644 index 00000000000..426e80a328d --- /dev/null +++ b/sql/rpl_constants.h @@ -0,0 +1,18 @@ +#ifndef RPL_CONSTANTS_H +#define RPL_CONSTANTS_H + +/** + Enumeration of the incidents that can occur for the server. + */ +enum Incident { + /** No incident */ + INCIDENT_NONE, + + /** There are possibly lost events in the replication stream */ + INCIDENT_LOST_EVENTS, + + /** Shall be last event of the enumeration */ + INCIDENT_COUNT +}; + +#endif /* RPL_CONSTANTS_H */ diff --git a/sql/rpl_injector.cc b/sql/rpl_injector.cc index 95b5ecba895..b22b052a105 100644 --- a/sql/rpl_injector.cc +++ b/sql/rpl_injector.cc @@ -188,3 +188,21 @@ void injector::new_trans(THD *thd, injector::transaction *ptr) DBUG_VOID_RETURN; } + +int injector::record_incident(THD *thd, Incident incident) +{ + Incident_log_event ev(thd, incident); + if (int error= mysql_bin_log.write(&ev)) + return error; + mysql_bin_log.rotate_and_purge(RP_FORCE_ROTATE); + return 0; +} + +int injector::record_incident(THD *thd, Incident incident, LEX_STRING message) +{ + Incident_log_event ev(thd, incident, message); + if (int error= mysql_bin_log.write(&ev)) + return error; + mysql_bin_log.rotate_and_purge(RP_FORCE_ROTATE); + return 0; +} diff --git a/sql/rpl_injector.h b/sql/rpl_injector.h index 8b08c0672c9..eabf374857a 100644 --- a/sql/rpl_injector.h +++ b/sql/rpl_injector.h @@ -18,9 +18,10 @@ /* Pull in 'byte', 'my_off_t', and 'uint32' */ #include <my_global.h> - #include <my_bitmap.h> +#include "rpl_constants.h" + /* Forward declarations */ class handler; class MYSQL_BIN_LOG; @@ -284,12 +285,14 @@ public: */ int check_state(enum_state const target_state) { +#ifndef DBUG_OFF static char const *state_name[] = { "START_STATE", "TABLE_STATE", "ROW_STATE", "STATE_COUNT" }; DBUG_ASSERT(0 <= target_state && target_state <= STATE_COUNT); DBUG_PRINT("info", ("In state %s", state_name[m_state])); +#endif if (m_state <= target_state && target_state <= m_state + 1 && m_state < STATE_COUNT) @@ -320,6 +323,9 @@ public: transaction new_trans(THD *); void new_trans(THD *, transaction *); + int record_incident(THD*, Incident incident); + int record_incident(THD*, Incident incident, LEX_STRING message); + private: explicit injector(); ~injector() { } /* Nothing needs to be done */ diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index 6a7b22bf23d..b0db355154e 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -18,6 +18,7 @@ #include "rpl_rli.h" #include <my_dir.h> // For MY_STAT #include "sql_repl.h" // For check_binlog_magic +#include "rpl_utility.h" static int count_relay_log_space(RELAY_LOG_INFO* rli); @@ -28,14 +29,15 @@ int init_strvar_from_file(char *var, int max_size, IO_CACHE *f, st_relay_log_info::st_relay_log_info() - :no_storage(FALSE), info_fd(-1), cur_log_fd(-1), save_temporary_tables(0), + :no_storage(FALSE), replicate_same_server_id(::replicate_same_server_id), + info_fd(-1), cur_log_fd(-1), save_temporary_tables(0), cur_log_old_open_count(0), group_master_log_pos(0), log_space_total(0), ignore_log_space_limit(0), last_master_timestamp(0), slave_skip_counter(0), abort_pos_wait(0), slave_run_id(0), sql_thd(0), last_slave_errno(0), inited(0), abort_slave(0), slave_running(0), until_condition(UNTIL_NONE), until_log_pos(0), retried_trans(0), tables_to_lock(0), tables_to_lock_count(0), - unsafe_to_stop_at(0) + last_event_start_time(0) { DBUG_ENTER("st_relay_log_info::st_relay_log_info"); @@ -967,7 +969,7 @@ err: strtol() conversions needed for log names comparison. We don't need to compare them each time this function is called, we only need to do this when current log name changes. If we have UNTIL_MASTER_POS condition we - need to do this only after Rotate_log_event::exec_event() (which is + need to do this only after Rotate_log_event::do_apply_event() (which is rare, so caching gives real benifit), and if we have UNTIL_RELAY_POS condition then we should invalidate cached comarison value after inc_group_relay_log_pos() which called for each group of events (so we @@ -1000,6 +1002,22 @@ bool st_relay_log_info::is_until_satisfied() log_pos= group_relay_log_pos; } +#ifndef DBUG_OFF + { + char buf[32]; + DBUG_PRINT("info", ("group_master_log_name='%s', group_master_log_pos=%s", + group_master_log_name, llstr(group_master_log_pos, buf))); + DBUG_PRINT("info", ("group_relay_log_name='%s', group_relay_log_pos=%s", + group_relay_log_name, llstr(group_relay_log_pos, buf))); + DBUG_PRINT("info", ("(%s) log_name='%s', log_pos=%s", + until_condition == UNTIL_MASTER_POS ? "master" : "relay", + log_name, llstr(log_pos, buf))); + DBUG_PRINT("info", ("(%s) until_log_name='%s', until_log_pos=%s", + until_condition == UNTIL_MASTER_POS ? "master" : "relay", + until_log_name, llstr(until_log_pos, buf))); + } +#endif + if (until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_UNKNOWN) { /* @@ -1055,30 +1073,19 @@ void st_relay_log_info::cached_charset_invalidate() } -bool st_relay_log_info::cached_charset_compare(char *charset) +bool st_relay_log_info::cached_charset_compare(char *charset) const { DBUG_ENTER("st_relay_log_info::cached_charset_compare"); if (bcmp(cached_charset, charset, sizeof(cached_charset))) { - memcpy(cached_charset, charset, sizeof(cached_charset)); + memcpy(const_cast<char*>(cached_charset), charset, sizeof(cached_charset)); DBUG_RETURN(1); } DBUG_RETURN(0); } -void st_relay_log_info::transaction_end(THD* thd) -{ - DBUG_ENTER("st_relay_log_info::transaction_end"); - - /* - Nothing to do here right now. - */ - - DBUG_VOID_RETURN; -} - #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) void st_relay_log_info::cleanup_context(THD *thd, bool error) { @@ -1086,12 +1093,12 @@ void st_relay_log_info::cleanup_context(THD *thd, bool error) DBUG_ASSERT(sql_thd == thd); /* - 1) Instances of Table_map_log_event, if ::exec_event() was called on them, + 1) Instances of Table_map_log_event, if ::do_apply_event() was called on them, may have opened tables, which we cannot be sure have been closed (because maybe the Rows_log_event have not been found or will not be, because slave SQL thread is stopping, or relay log has a missing tail etc). So we close all thread's tables. And so the table mappings have to be cancelled. - 2) Rows_log_event::exec_event() may even have started statements or + 2) Rows_log_event::do_apply_event() may even have started statements or transactions on them, which we need to rollback in case of error. 3) If finding a Format_description_log_event after a BEGIN, we also need to rollback before continuing with the next events. @@ -1105,7 +1112,26 @@ void st_relay_log_info::cleanup_context(THD *thd, bool error) m_table_map.clear_tables(); close_thread_tables(thd); clear_tables_to_lock(); - unsafe_to_stop_at= 0; + last_event_start_time= 0; DBUG_VOID_RETURN; } + +void st_relay_log_info::clear_tables_to_lock() +{ + while (tables_to_lock) + { + gptr to_free= reinterpret_cast<gptr>(tables_to_lock); + if (tables_to_lock->m_tabledef_valid) + { + tables_to_lock->m_tabledef.table_def::~table_def(); + tables_to_lock->m_tabledef_valid= FALSE; + } + tables_to_lock= + static_cast<RPL_TABLE_LIST*>(tables_to_lock->next_global); + tables_to_lock_count--; + my_free(to_free, MYF(MY_WME)); + } + DBUG_ASSERT(tables_to_lock == NULL && tables_to_lock_count == 0); +} + #endif diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h index cb9894a2125..3f06e108f6d 100644 --- a/sql/rpl_rli.h +++ b/sql/rpl_rli.h @@ -20,6 +20,8 @@ #include "rpl_tblmap.h" +struct RPL_TABLE_LIST; + /**************************************************************************** @@ -56,6 +58,15 @@ typedef struct st_relay_log_info */ bool no_storage; + /* + If true, events with the same server id should be replicated. This + field is set on creation of a relay log info structure by copying + the value of ::replicate_same_server_id and can be overridden if + necessary. For example of when this is done, check sql_binlog.cc, + where the BINLOG statement can be used to execute "raw" events. + */ + bool replicate_same_server_id; + /*** The following variables can only be read when protect by data lock ****/ /* @@ -279,7 +290,7 @@ typedef struct st_relay_log_info group_relay_log_pos); } - TABLE_LIST *tables_to_lock; /* RBR: Tables to lock */ + RPL_TABLE_LIST *tables_to_lock; /* RBR: Tables to lock */ uint tables_to_lock_count; /* RBR: Count of tables to lock */ table_mapping m_table_map; /* RBR: Mapping table-id to table */ @@ -290,23 +301,19 @@ typedef struct st_relay_log_info When the 6 bytes are equal to 0 is used to mean "cache is invalidated". */ void cached_charset_invalidate(); - bool cached_charset_compare(char *charset); - - void transaction_end(THD*); + bool cached_charset_compare(char *charset) const; void cleanup_context(THD *, bool); - void clear_tables_to_lock() { - while (tables_to_lock) - { - char *to_free= reinterpret_cast<gptr>(tables_to_lock); - tables_to_lock= tables_to_lock->next_global; - tables_to_lock_count--; - my_free(to_free, MYF(MY_WME)); - } - DBUG_ASSERT(tables_to_lock == NULL && tables_to_lock_count == 0); - } + void clear_tables_to_lock(); - time_t unsafe_to_stop_at; + /* + Used by row-based replication to detect that it should not stop at + this event, but give it a chance to send more events. The time + where the last event inside a group started is stored here. If the + variable is zero, we are not in a group (but may be in a + transaction). + */ + time_t last_event_start_time; } RELAY_LOG_INFO; diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc index 65a44a4947b..1d7cc808f0c 100644 --- a/sql/rpl_utility.cc +++ b/sql/rpl_utility.cc @@ -108,7 +108,7 @@ field_length_from_packed(enum_field_types const field_type, */ int -table_def::compatible_with(RELAY_LOG_INFO *rli, TABLE *table) +table_def::compatible_with(RELAY_LOG_INFO const *rli_arg, TABLE *table) const { /* @@ -116,6 +116,7 @@ table_def::compatible_with(RELAY_LOG_INFO *rli, TABLE *table) */ uint const cols_to_check= min(table->s->fields, size()); int error= 0; + RELAY_LOG_INFO const *rli= const_cast<RELAY_LOG_INFO*>(rli_arg); TABLE_SHARE const *const tsh= table->s; diff --git a/sql/rpl_utility.h b/sql/rpl_utility.h index 34cebf93ddb..17879a9ecfc 100644 --- a/sql/rpl_utility.h +++ b/sql/rpl_utility.h @@ -22,104 +22,117 @@ #include "mysql_priv.h" +struct st_relay_log_info; +typedef st_relay_log_info RELAY_LOG_INFO; + uint32 -field_length_from_packed(enum_field_types const field_type, - byte const *const data); +field_length_from_packed(enum_field_types field_type, byte const *data); -/* +/** A table definition from the master. - RESPONSIBILITIES - + The responsibilities of this class is: - Extract and decode table definition data from the table map event - Check if table definition in table map is compatible with table definition on slave - DESCRIPTION - - Currently, the only field type data available is an array of the - type operators that are present in the table map event. + Currently, the only field type data available is an array of the + type operators that are present in the table map event. - TODO - - Add type operands to this structure to allow detection of - difference between, e.g., BIT(5) and BIT(10). + @todo Add type operands to this structure to allow detection of + difference between, e.g., BIT(5) and BIT(10). */ class table_def { public: - /* + /** Convenience declaration of the type of the field type data in a table map event. */ typedef unsigned char field_type; - /* + /** Constructor. - SYNOPSIS - table_def() - types Array of types - size Number of elements in array 'types' + @param types Array of types + @param size Number of elements in array 'types' */ table_def(field_type *types, my_size_t size) - : m_type(types), m_size(size) + : m_type(new unsigned char [size]), m_size(size) { + if (m_type) + memcpy(m_type, types, size); + else + m_size= 0; } - /* - Return the number of fields there is type data for. + ~table_def() { + if (m_type) + delete [] m_type; +#ifndef DBUG_OFF + m_type= 0; + m_size= 0; +#endif + } - SYNOPSIS - size() + /** + Return the number of fields there is type data for. - RETURN VALUE - The number of fields that there is type data for. + @return The number of fields that there is type data for. */ my_size_t size() const { return m_size; } + /* Return a representation of the type data for one field. - SYNOPSIS - type() - i Field index to return data for + @param index Field index to return data for - RETURN VALUE - - Will return a representation of the type data for field - 'i'. Currently, only the type identifier is returned. + @return Will return a representation of the type data for field + <code>index</code>. Currently, only the type identifier is + returned. */ - field_type type(my_ptrdiff_t i) const { return m_type[i]; } + field_type type(my_ptrdiff_t index) const + { + DBUG_ASSERT(0 <= index); + DBUG_ASSERT(static_cast<my_size_t>(index) < m_size); + return m_type[index]; + } - /* + /** Decide if the table definition is compatible with a table. - SYNOPSIS - compatible_with() - rli Pointer to relay log info - table Pointer to table to compare with. - - DESCRIPTION - - Compare the definition with a table to see if it is compatible - with it. A table definition is compatible with a table if: + Compare the definition with a table to see if it is compatible + with it. + A table definition is compatible with a table if: - the columns types of the table definition is a (not necessarily proper) prefix of the column type of the table, or - - the other way around - RETURN VALUE - 1 if the table definition is not compatible with 'table' - 0 if the table definition is compatible with 'table' + @param rli Pointer to relay log info + @param table Pointer to table to compare with. + + @retval 1 if the table definition is not compatible with @c table + @retval 0 if the table definition is compatible with @c table */ - int compatible_with(RELAY_LOG_INFO *rli, TABLE *table) const; + int compatible_with(RELAY_LOG_INFO const *rli, TABLE *table) const; private: my_size_t m_size; // Number of elements in the types array field_type *m_type; // Array of type descriptors }; +/** + Extend the normal table list with a few new fields needed by the + slave thread, but nowhere else. + */ +struct RPL_TABLE_LIST + : public st_table_list +{ + bool m_tabledef_valid; + table_def m_tabledef; +}; + #endif /* RPL_UTILITY_H */ diff --git a/sql/scheduler.cc b/sql/scheduler.cc new file mode 100644 index 00000000000..b05bdf4756f --- /dev/null +++ b/sql/scheduler.cc @@ -0,0 +1,88 @@ +/* Copyright (C) 2007 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + Implementation for the thread scheduler +*/ + +#ifdef USE_PRAGMA_INTERFACE +#pragma implementation +#endif + +#include <mysql_priv.h> + +/* + 'Dummy' functions to be used when we don't need any handling for a scheduler + event + */ + +static bool init_dummy(void) {return 0;} +static void post_kill_dummy(THD* thd) {} +static void end_dummy(void) {} +static bool end_thread_dummy(THD *thd, bool cache_thread) { return 0; } + +/* + Initialize default scheduler with dummy functions so that setup functions + only need to declare those that are relvant for their usage +*/ + +scheduler_functions::scheduler_functions() + :init(init_dummy), + init_new_connection_thread(init_new_connection_handler_thread), + add_connection(0), // Must be defined + post_kill_notification(post_kill_dummy), + end_thread(end_thread_dummy), end(end_dummy) +{} + + +/* + End connection, in case when we are using 'no-threads' +*/ + +static bool no_threads_end(THD *thd, bool put_in_cache) +{ + unlink_thd(thd); + pthread_mutex_unlock(&LOCK_thread_count); + return 1; // Abort handle_one_connection +} + + +/* + Initailize scheduler for --thread-handling=no-threads +*/ + +void one_thread_scheduler(scheduler_functions* func) +{ + func->max_threads= 1; +#ifndef EMBEDDED_LIBRARY + func->add_connection= handle_connection_in_main_thread; +#endif + func->init_new_connection_thread= init_dummy; + func->end_thread= no_threads_end; +} + + +/* + Initialize scheduler for --thread-handling=one-thread-per-connection +*/ + +#ifndef EMBEDDED_LIBRARY +void one_thread_per_connection_scheduler(scheduler_functions* func) +{ + func->max_threads= max_connections; + func->add_connection= create_thread_to_handle_connection; + func->end_thread= one_thread_per_connection_end; +} +#endif /* EMBEDDED_LIBRARY */ diff --git a/sql/scheduler.h b/sql/scheduler.h new file mode 100644 index 00000000000..8351cefda4c --- /dev/null +++ b/sql/scheduler.h @@ -0,0 +1,60 @@ +/* Copyright (C) 2007 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + Classes for the thread scheduler +*/ + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface +#endif + +class THD; + +/* Functions used when manipulating threads */ + +class scheduler_functions +{ +public: + uint max_threads; + bool (*init)(void); + bool (*init_new_connection_thread)(void); + void (*add_connection)(THD *thd); + void (*post_kill_notification)(THD *thd); + bool (*end_thread)(THD *thd, bool cache_thread); + void (*end)(void); + scheduler_functions(); +}; + +enum scheduler_types +{ + SCHEDULER_ONE_THREAD_PER_CONNECTION=1, + SCHEDULER_NO_THREADS, + SCHEDULER_POOL_OF_THREADS +}; + +void one_thread_per_connection_scheduler(scheduler_functions* func); +void one_thread_scheduler(scheduler_functions* func); + +enum pool_command_op +{ + NOT_IN_USE_OP= 0, NORMAL_OP= 1, CONNECT_OP, KILL_OP, DIE_OP +}; + +#define HAVE_POOL_OF_THREADS 0 /* For easyer tests */ +#define pool_of_threads_scheduler(A) one_thread_per_connection_scheduler(A) + +class thd_scheduler +{}; diff --git a/sql/set_var.cc b/sql/set_var.cc index 55fbeac5622..7f55556c134 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -78,7 +78,8 @@ extern my_bool innobase_log_archive, innobase_use_checksums, innobase_file_per_table, innobase_locks_unsafe_for_binlog, - innobase_rollback_on_timeout; + innobase_rollback_on_timeout, + innobase_stats_on_metadata; extern "C" { extern ulong srv_max_buf_pool_modified_pct; @@ -355,6 +356,8 @@ sys_var_thd_ulong sys_net_retry_count("net_retry_count", &SV::net_retry_count, 0, fix_net_retry_count); sys_var_thd_bool sys_new_mode("new", &SV::new_mode); +sys_var_bool_ptr_readonly sys_old_mode("old", + &global_system_variables.old_mode); sys_var_thd_bool sys_old_alter_table("old_alter_table", &SV::old_alter_table); sys_var_thd_bool sys_old_passwords("old_passwords", &SV::old_passwords); @@ -396,6 +399,10 @@ sys_var_thd_ulong sys_trans_alloc_block_size("transaction_alloc_block_size", sys_var_thd_ulong sys_trans_prealloc_size("transaction_prealloc_size", &SV::trans_prealloc_size, 0, fix_trans_mem_root); +sys_var_thd_enum sys_thread_handling("thread_handling", + &SV::thread_handling, + &thread_handling_typelib, + NULL); #ifdef HAVE_QUERY_CACHE sys_var_long_ptr sys_query_cache_limit("query_cache_limit", @@ -411,6 +418,8 @@ sys_query_cache_wlock_invalidate("query_cache_wlock_invalidate", &SV::query_cache_wlock_invalidate); #endif /* HAVE_QUERY_CACHE */ sys_var_bool_ptr sys_secure_auth("secure_auth", &opt_secure_auth); +sys_var_const_str_ptr sys_secure_file_priv("secure_file_priv", + &opt_secure_file_priv); sys_var_long_ptr sys_server_id("server_id", &server_id, fix_server_id); sys_var_bool_ptr sys_slave_compressed_protocol("slave_compressed_protocol", &opt_slave_compressed_protocol); @@ -464,6 +473,10 @@ sys_var_long_ptr sys_table_lock_wait_timeout("table_lock_wait_timeout", &table_lock_wait_timeout); sys_var_long_ptr sys_thread_cache_size("thread_cache_size", &thread_cache_size); +#if HAVE_POOL_OF_THREADS == 1 +sys_var_long_ptr sys_thread_pool_size("thread_pool_size", + &thread_pool_size); +#endif sys_var_thd_enum sys_tx_isolation("tx_isolation", &SV::tx_isolation, &tx_isolation_typelib, @@ -653,6 +666,10 @@ sys_var_thd_time_zone sys_time_zone("time_zone"); /* Read only variables */ +/* Global read-only variable containing hostname */ +sys_var_const_str sys_hostname("hostname", glob_hostname); + + sys_var_have_variable sys_have_compress("have_compress", &have_compress); sys_var_have_variable sys_have_crypt("have_crypt", &have_crypt); sys_var_have_variable sys_have_csv_db("have_csv", &have_csv_db); @@ -660,7 +677,8 @@ sys_var_have_variable sys_have_dlopen("have_dynamic_loading", &have_dlopen); sys_var_have_variable sys_have_geometry("have_geometry", &have_geometry); sys_var_have_variable sys_have_innodb("have_innodb", &have_innodb); sys_var_have_variable sys_have_ndbcluster("have_ndbcluster", &have_ndbcluster); -sys_var_have_variable sys_have_openssl("have_openssl", &have_openssl); +sys_var_have_variable sys_have_openssl("have_openssl", &have_ssl); +sys_var_have_variable sys_have_ssl("have_ssl", &have_ssl); sys_var_have_variable sys_have_partition_db("have_partitioning", &have_partition_db); sys_var_have_variable sys_have_query_cache("have_query_cache", @@ -669,7 +687,6 @@ sys_var_have_variable sys_have_rtree_keys("have_rtree_keys", &have_rtree_keys); sys_var_have_variable sys_have_symlink("have_symlink", &have_symlink); /* Global read-only variable describing server license */ sys_var_const_str sys_license("license", STRINGIFY_ARG(LICENSE)); - /* Global variables which enable|disable logging */ sys_var_log_state sys_var_general_log("general_log", &opt_log, QUERY_LOG_GENERAL); @@ -778,6 +795,7 @@ SHOW_VAR init_vars[]= { {sys_var_general_log.name, (char*) &opt_log, SHOW_MY_BOOL}, {sys_var_general_log_path.name, (char*) &sys_var_general_log_path, SHOW_SYS}, {sys_group_concat_max_len.name, (char*) &sys_group_concat_max_len, SHOW_SYS}, + {sys_hostname.name, (char*) &sys_hostname, SHOW_SYS}, {sys_have_compress.name, (char*) &have_compress, SHOW_HAVE}, {sys_have_crypt.name, (char*) &have_crypt, SHOW_HAVE}, {sys_have_csv_db.name, (char*) &have_csv_db, SHOW_HAVE}, @@ -785,7 +803,8 @@ SHOW_VAR init_vars[]= { {sys_have_geometry.name, (char*) &have_geometry, SHOW_HAVE}, {sys_have_innodb.name, (char*) &have_innodb, SHOW_HAVE}, {sys_have_ndbcluster.name, (char*) &have_ndbcluster, SHOW_HAVE}, - {sys_have_openssl.name, (char*) &have_openssl, SHOW_HAVE}, + {sys_have_openssl.name, (char*) &have_ssl, SHOW_HAVE}, + {sys_have_ssl.name, (char*) &have_ssl, SHOW_HAVE}, {sys_have_partition_db.name,(char*) &have_partition_db, SHOW_HAVE}, {sys_have_query_cache.name, (char*) &have_query_cache, SHOW_HAVE}, {sys_have_rtree_keys.name, (char*) &have_rtree_keys, SHOW_HAVE}, @@ -821,6 +840,7 @@ SHOW_VAR init_vars[]= { {"innodb_mirrored_log_groups", (char*) &innobase_mirrored_log_groups, SHOW_LONG}, {"innodb_open_files", (char*) &innobase_open_files, SHOW_LONG }, {"innodb_rollback_on_timeout", (char*) &innobase_rollback_on_timeout, SHOW_MY_BOOL}, + {"innodb_stats_on_metadata", (char*) &innobase_stats_on_metadata, SHOW_MY_BOOL}, {sys_innodb_support_xa.name, (char*) &sys_innodb_support_xa, SHOW_SYS}, {sys_innodb_sync_spin_loops.name, (char*) &sys_innodb_sync_spin_loops, SHOW_SYS}, {sys_innodb_table_locks.name, (char*) &sys_innodb_table_locks, SHOW_SYS}, @@ -924,6 +944,7 @@ SHOW_VAR init_vars[]= { {sys_net_retry_count.name, (char*) &sys_net_retry_count, SHOW_SYS}, {sys_net_write_timeout.name,(char*) &sys_net_write_timeout, SHOW_SYS}, {sys_new_mode.name, (char*) &sys_new_mode, SHOW_SYS}, + {sys_old_mode.name, (char*) &sys_old_mode, SHOW_SYS}, {sys_old_alter_table.name, (char*) &sys_old_alter_table, SHOW_SYS}, {sys_old_passwords.name, (char*) &sys_old_passwords, SHOW_SYS}, {"open_files_limit", (char*) &open_files_limit, SHOW_LONG}, @@ -959,6 +980,7 @@ SHOW_VAR init_vars[]= { #endif {sys_rpl_recovery_rank.name,(char*) &sys_rpl_recovery_rank, SHOW_SYS}, {"secure_auth", (char*) &sys_secure_auth, SHOW_SYS}, + {"secure_file_priv", (char*) &sys_secure_file_priv, SHOW_SYS}, #ifdef HAVE_SMEM {"shared_memory", (char*) &opt_enable_shared_memory, SHOW_MY_BOOL}, {"shared_memory_base_name", (char*) &shared_memory_base_name, SHOW_CHAR_PTR}, @@ -1007,6 +1029,10 @@ SHOW_VAR init_vars[]= { #ifdef HAVE_THR_SETCONCURRENCY {"thread_concurrency", (char*) &concurrency, SHOW_LONG}, #endif + {sys_thread_handling.name, (char*) &sys_thread_handling, SHOW_SYS}, +#if HAVE_POOL_OF_THREADS == 1 + {sys_thread_pool_size.name, (char*) &sys_thread_pool_size, SHOW_SYS}, +#endif {"thread_stack", (char*) &thread_stack, SHOW_LONG}, {sys_time_format.name, (char*) &sys_time_format, SHOW_SYS}, {"time_zone", (char*) &sys_time_zone, SHOW_SYS}, @@ -2587,7 +2613,7 @@ bool update_sys_var_str_path(THD *thd, sys_var_str *var_str, file_log= logger.get_log_file_handler(); break; default: - DBUG_ASSERT(0); + assert(0); // Impossible } if (!old_value) @@ -2614,7 +2640,7 @@ bool update_sys_var_str_path(THD *thd, sys_var_str *var_str, { switch (log_type) { case QUERY_LOG_SLOW: - file_log->open_slow_log(sys_var_general_log_path.value); + file_log->open_slow_log(sys_var_slow_log_path.value); break; case QUERY_LOG_GENERAL: file_log->open_query_log(sys_var_general_log_path.value); @@ -2727,8 +2753,8 @@ int set_var_collation_client::update(THD *thd) thd->variables.character_set_results= character_set_results; thd->variables.collation_connection= collation_connection; thd->update_charset(); - thd->protocol_simple.init(thd); - thd->protocol_prep.init(thd); + thd->protocol_text.init(thd); + thd->protocol_binary.init(thd); return 0; } @@ -2857,8 +2883,7 @@ bool sys_var_thd_time_zone::check(THD *thd, set_var *var) String str(buff, sizeof(buff), &my_charset_latin1); String *res= var->value->val_str(&str); - if (!(var->save_result.time_zone= - my_tz_find(res, thd->lex->time_zone_tables_used))) + if (!(var->save_result.time_zone= my_tz_find(thd, res))) { my_error(ER_UNKNOWN_TIME_ZONE, MYF(0), res ? res->c_ptr() : "NULL"); return 1; @@ -2919,8 +2944,7 @@ void sys_var_thd_time_zone::set_default(THD *thd, enum_var_type type) We are guaranteed to find this time zone since its existence is checked during start-up. */ - global_system_variables.time_zone= - my_tz_find(&str, thd->lex->time_zone_tables_used); + global_system_variables.time_zone= my_tz_find(thd, &str); } else global_system_variables.time_zone= my_tz_SYSTEM; @@ -3626,7 +3650,7 @@ bool sys_var_thd_table_type::update(THD *thd, set_var *var) */ byte *sys_var_thd_sql_mode::symbolic_mode_representation(THD *thd, - ulong val, + ulonglong val, ulong *len) { char buff[256]; @@ -3990,7 +4014,7 @@ sys_var_event_scheduler::update(THD *thd, set_var *var) DBUG_ENTER("sys_var_event_scheduler::update"); if (Events::opt_event_scheduler == Events::EVENTS_DISABLED) { - my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--event-scheduler=DISABLED"); + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--event-scheduler=DISABLED or --skip-grant-tables"); DBUG_RETURN(TRUE); } @@ -4002,7 +4026,7 @@ sys_var_event_scheduler::update(THD *thd, set_var *var) res= Events::get_instance()->stop_execution_of_events(); else { - DBUG_ASSERT(0); + assert(0); // Impossible } if (res) my_error(ER_EVENT_SET_VAR_ERROR, MYF(0)); diff --git a/sql/set_var.h b/sql/set_var.h index 338ec5513b0..eac03fce610 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -169,6 +169,16 @@ public: }; +class sys_var_bool_ptr_readonly :public sys_var_bool_ptr +{ +public: + sys_var_bool_ptr_readonly(const char *name_arg, my_bool *value_arg) + :sys_var_bool_ptr(name_arg, value_arg) + {} + bool is_readonly() const { return 1; } +}; + + class sys_var_str :public sys_var { public: @@ -440,7 +450,7 @@ public: } void set_default(THD *thd, enum_var_type type); byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); - static byte *symbolic_mode_representation(THD *thd, ulong sql_mode, + static byte *symbolic_mode_representation(THD *thd, ulonglong sql_mode, ulong *length); }; diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt index 29fde49bbd6..b5357493e05 100644 --- a/sql/share/errmsg.txt +++ b/sql/share/errmsg.txt @@ -51,54 +51,54 @@ ER_YES spa "SI" ukr "ôáë" ER_CANT_CREATE_FILE - cze "Nemohu vytvo-Bøit soubor '%-.64s' (chybový kód: %d)" - dan "Kan ikke oprette filen '%-.64s' (Fejlkode: %d)" - nla "Kan file '%-.64s' niet aanmaken (Errcode: %d)" + cze "Nemohu vytvo-Bøit soubor '%-.200s' (chybový kód: %d)" + dan "Kan ikke oprette filen '%-.200s' (Fejlkode: %d)" + nla "Kan file '%-.200s' niet aanmaken (Errcode: %d)" eng "Can't create file '%-.200s' (errno: %d)" - est "Ei suuda luua faili '%-.64s' (veakood: %d)" - fre "Ne peut créer le fichier '%-.64s' (Errcode: %d)" - ger "Kann Datei '%-.64s' nicht erzeugen (Fehler: %d)" - greek "Áäýíáôç ç äçìéïõñãßá ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)" - hun "A '%-.64s' file nem hozhato letre (hibakod: %d)" - ita "Impossibile creare il file '%-.64s' (errno: %d)" - jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤¬ºî¤ì¤Þ¤»¤ó (errno: %d)" - kor "ÈÀÏ '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" - nor "Kan ikke opprette fila '%-.64s' (Feilkode: %d)" - norwegian-ny "Kan ikkje opprette fila '%-.64s' (Feilkode: %d)" - pol "Nie mo¿na stworzyæ pliku '%-.64s' (Kod b³êdu: %d)" - por "Não pode criar o arquivo '%-.64s' (erro no. %d)" - rum "Nu pot sa creez fisierul '%-.64s' (Eroare: %d)" - rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÆÁÊÌ '%-.64s' (ÏÛÉÂËÁ: %d)" - serbian "Ne mogu da kreiram file '%-.64s' (errno: %d)" - slo "Nemô¾em vytvori» súbor '%-.64s' (chybový kód: %d)" - spa "No puedo crear archivo '%-.64s' (Error: %d)" - swe "Kan inte skapa filen '%-.64s' (Felkod: %d)" - ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)" + est "Ei suuda luua faili '%-.200s' (veakood: %d)" + fre "Ne peut créer le fichier '%-.200s' (Errcode: %d)" + ger "Kann Datei '%-.200s' nicht erzeugen (Fehler: %d)" + greek "Áäýíáôç ç äçìéïõñãßá ôïõ áñ÷åßïõ '%-.200s' (êùäéêüò ëÜèïõò: %d)" + hun "A '%-.200s' file nem hozhato letre (hibakod: %d)" + ita "Impossibile creare il file '%-.200s' (errno: %d)" + jpn "'%-.200s' ¥Õ¥¡¥¤¥ë¤¬ºî¤ì¤Þ¤»¤ó (errno: %d)" + kor "ÈÀÏ '%-.200s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke opprette fila '%-.200s' (Feilkode: %d)" + norwegian-ny "Kan ikkje opprette fila '%-.200s' (Feilkode: %d)" + pol "Nie mo¿na stworzyæ pliku '%-.200s' (Kod b³êdu: %d)" + por "Não pode criar o arquivo '%-.200s' (erro no. %d)" + rum "Nu pot sa creez fisierul '%-.200s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÆÁÊÌ '%-.200s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da kreiram file '%-.200s' (errno: %d)" + slo "Nemô¾em vytvori» súbor '%-.200s' (chybový kód: %d)" + spa "No puedo crear archivo '%-.200s' (Error: %d)" + swe "Kan inte skapa filen '%-.200s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÆÁÊÌ '%-.200s' (ÐÏÍÉÌËÁ: %d)" ER_CANT_CREATE_TABLE - cze "Nemohu vytvo-Bøit tabulku '%-.64s' (chybový kód: %d)" - dan "Kan ikke oprette tabellen '%-.64s' (Fejlkode: %d)" - nla "Kan tabel '%-.64s' niet aanmaken (Errcode: %d)" + cze "Nemohu vytvo-Bøit tabulku '%-.200s' (chybový kód: %d)" + dan "Kan ikke oprette tabellen '%-.200s' (Fejlkode: %d)" + nla "Kan tabel '%-.200s' niet aanmaken (Errcode: %d)" eng "Can't create table '%-.200s' (errno: %d)" - jps "'%-.64s' ƒe[ƒuƒ‹‚ªì‚ê‚Ü‚¹‚ñ.(errno: %d)", - est "Ei suuda luua tabelit '%-.64s' (veakood: %d)" - fre "Ne peut créer la table '%-.64s' (Errcode: %d)" - ger "Kann Tabelle '%-.64s' nicht erzeugen (Fehler: %d)" - greek "Áäýíáôç ç äçìéïõñãßá ôïõ ðßíáêá '%-.64s' (êùäéêüò ëÜèïõò: %d)" - hun "A '%-.64s' tabla nem hozhato letre (hibakod: %d)" - ita "Impossibile creare la tabella '%-.64s' (errno: %d)" - jpn "'%-.64s' ¥Æ¡¼¥Ö¥ë¤¬ºî¤ì¤Þ¤»¤ó.(errno: %d)" - kor "Å×À̺í '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" - nor "Kan ikke opprette tabellen '%-.64s' (Feilkode: %d)" - norwegian-ny "Kan ikkje opprette tabellen '%-.64s' (Feilkode: %d)" - pol "Nie mo¿na stworzyæ tabeli '%-.64s' (Kod b³êdu: %d)" - por "Não pode criar a tabela '%-.64s' (erro no. %d)" - rum "Nu pot sa creez tabla '%-.64s' (Eroare: %d)" - rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÔÁÂÌÉÃÕ '%-.64s' (ÏÛÉÂËÁ: %d)" - serbian "Ne mogu da kreiram tabelu '%-.64s' (errno: %d)" - slo "Nemô¾em vytvori» tabuµku '%-.64s' (chybový kód: %d)" - spa "No puedo crear tabla '%-.64s' (Error: %d)" - swe "Kan inte skapa tabellen '%-.64s' (Felkod: %d)" - ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÔÁÂÌÉÃÀ '%-.64s' (ÐÏÍÉÌËÁ: %d)" + jps "'%-.200s' ƒe[ƒuƒ‹‚ªì‚ê‚Ü‚¹‚ñ.(errno: %d)", + est "Ei suuda luua tabelit '%-.200s' (veakood: %d)" + fre "Ne peut créer la table '%-.200s' (Errcode: %d)" + ger "Kann Tabelle '%-.200s' nicht erzeugen (Fehler: %d)" + greek "Áäýíáôç ç äçìéïõñãßá ôïõ ðßíáêá '%-.200s' (êùäéêüò ëÜèïõò: %d)" + hun "A '%-.200s' tabla nem hozhato letre (hibakod: %d)" + ita "Impossibile creare la tabella '%-.200s' (errno: %d)" + jpn "'%-.200s' ¥Æ¡¼¥Ö¥ë¤¬ºî¤ì¤Þ¤»¤ó.(errno: %d)" + kor "Å×À̺í '%-.200s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke opprette tabellen '%-.200s' (Feilkode: %d)" + norwegian-ny "Kan ikkje opprette tabellen '%-.200s' (Feilkode: %d)" + pol "Nie mo¿na stworzyæ tabeli '%-.200s' (Kod b³êdu: %d)" + por "Não pode criar a tabela '%-.200s' (erro no. %d)" + rum "Nu pot sa creez tabla '%-.200s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÔÁÂÌÉÃÕ '%-.200s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da kreiram tabelu '%-.200s' (errno: %d)" + slo "Nemô¾em vytvori» tabuµku '%-.200s' (chybový kód: %d)" + spa "No puedo crear tabla '%-.200s' (Error: %d)" + swe "Kan inte skapa tabellen '%-.200s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÔÁÂÌÉÃÀ '%-.200s' (ÐÏÍÉÌËÁ: %d)" ER_CANT_CREATE_DB cze "Nemohu vytvo-Bøit databázi '%-.64s' (chybový kód: %d)" dan "Kan ikke oprette databasen '%-.64s' (Fejlkode: %d)" @@ -275,30 +275,30 @@ ER_CANT_FIND_SYSTEM_REC swe "Hittar inte posten i systemregistret" ukr "îÅ ÍÏÖÕ ÚÞÉÔÁÔÉ ÚÁÐÉÓ Ú ÓÉÓÔÅÍÎϧ ÔÁÂÌÉæ" ER_CANT_GET_STAT - cze "Nemohu z-Bískat stav '%-.64s' (chybový kód: %d)" - dan "Kan ikke læse status af '%-.64s' (Fejlkode: %d)" - nla "Kan de status niet krijgen van '%-.64s' (Errcode: %d)" + cze "Nemohu z-Bískat stav '%-.200s' (chybový kód: %d)" + dan "Kan ikke læse status af '%-.200s' (Fejlkode: %d)" + nla "Kan de status niet krijgen van '%-.200s' (Errcode: %d)" eng "Can't get status of '%-.200s' (errno: %d)" - jps "'%-.64s' ‚̃XƒeƒCƒ^ƒX‚ª“¾‚ç‚ê‚Ü‚¹‚ñ. (errno: %d)", - est "Ei suuda lugeda '%-.64s' olekut (veakood: %d)" - fre "Ne peut obtenir le status de '%-.64s' (Errcode: %d)" - ger "Kann Status von '%-.64s' nicht ermitteln (Fehler: %d)" - greek "Áäýíáôç ç ëÞøç ðëçñïöïñéþí ãéá ôçí êáôÜóôáóç ôïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)" - hun "A(z) '%-.64s' statusza nem allapithato meg (hibakod: %d)" - ita "Impossibile leggere lo stato di '%-.64s' (errno: %d)" - jpn "'%-.64s' ¤Î¥¹¥Æ¥¤¥¿¥¹¤¬ÆÀ¤é¤ì¤Þ¤»¤ó. (errno: %d)" - kor "'%-.64s'ÀÇ »óŸ¦ ¾òÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" - nor "Kan ikke lese statusen til '%-.64s' (Feilkode: %d)" - norwegian-ny "Kan ikkje lese statusen til '%-.64s' (Feilkode: %d)" - pol "Nie mo¿na otrzymaæ statusu '%-.64s' (Kod b³êdu: %d)" - por "Não pode obter o status de '%-.64s' (erro no. %d)" - rum "Nu pot sa obtin statusul lui '%-.64s' (Eroare: %d)" - rus "îÅ×ÏÚÍÏÖÎÏ ÐÏÌÕÞÉÔØ ÓÔÁÔÕÓÎÕÀ ÉÎÆÏÒÍÁÃÉÀ Ï '%-.64s' (ÏÛÉÂËÁ: %d)" - serbian "Ne mogu da dobijem stanje file-a '%-.64s' (errno: %d)" - slo "Nemô¾em zisti» stav '%-.64s' (chybový kód: %d)" - spa "No puedo obtener el estado de '%-.64s' (Error: %d)" - swe "Kan inte läsa filinformationen (stat) från '%-.64s' (Felkod: %d)" - ukr "îÅ ÍÏÖÕ ÏÔÒÉÍÁÔÉ ÓÔÁÔÕÓ '%-.64s' (ÐÏÍÉÌËÁ: %d)" + jps "'%-.200s' ‚̃XƒeƒCƒ^ƒX‚ª“¾‚ç‚ê‚Ü‚¹‚ñ. (errno: %d)", + est "Ei suuda lugeda '%-.200s' olekut (veakood: %d)" + fre "Ne peut obtenir le status de '%-.200s' (Errcode: %d)" + ger "Kann Status von '%-.200s' nicht ermitteln (Fehler: %d)" + greek "Áäýíáôç ç ëÞøç ðëçñïöïñéþí ãéá ôçí êáôÜóôáóç ôïõ '%-.200s' (êùäéêüò ëÜèïõò: %d)" + hun "A(z) '%-.200s' statusza nem allapithato meg (hibakod: %d)" + ita "Impossibile leggere lo stato di '%-.200s' (errno: %d)" + jpn "'%-.200s' ¤Î¥¹¥Æ¥¤¥¿¥¹¤¬ÆÀ¤é¤ì¤Þ¤»¤ó. (errno: %d)" + kor "'%-.200s'ÀÇ »óŸ¦ ¾òÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke lese statusen til '%-.200s' (Feilkode: %d)" + norwegian-ny "Kan ikkje lese statusen til '%-.200s' (Feilkode: %d)" + pol "Nie mo¿na otrzymaæ statusu '%-.200s' (Kod b³êdu: %d)" + por "Não pode obter o status de '%-.200s' (erro no. %d)" + rum "Nu pot sa obtin statusul lui '%-.200s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÏÌÕÞÉÔØ ÓÔÁÔÕÓÎÕÀ ÉÎÆÏÒÍÁÃÉÀ Ï '%-.200s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da dobijem stanje file-a '%-.200s' (errno: %d)" + slo "Nemô¾em zisti» stav '%-.200s' (chybový kód: %d)" + spa "No puedo obtener el estado de '%-.200s' (Error: %d)" + swe "Kan inte läsa filinformationen (stat) från '%-.200s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÏÔÒÉÍÁÔÉ ÓÔÁÔÕÓ '%-.200s' (ÐÏÍÉÌËÁ: %d)" ER_CANT_GET_WD cze "Chyba p-Bøi zji¹»ování pracovní adresáø (chybový kód: %d)" dan "Kan ikke læse aktive folder (Fejlkode: %d)" @@ -350,55 +350,55 @@ ER_CANT_LOCK swe "Kan inte låsa filen. (Felkod: %d)" ukr "îÅ ÍÏÖÕ ÚÁÂÌÏËÕ×ÁÔÉ ÆÁÊÌ (ÐÏÍÉÌËÁ: %d)" ER_CANT_OPEN_FILE - cze "Nemohu otev-Bøít soubor '%-.64s' (chybový kód: %d)" - dan "Kan ikke åbne fil: '%-.64s' (Fejlkode: %d)" - nla "Kan de file '%-.64s' niet openen (Errcode: %d)" + cze "Nemohu otev-Bøít soubor '%-.200s' (chybový kód: %d)" + dan "Kan ikke åbne fil: '%-.200s' (Fejlkode: %d)" + nla "Kan de file '%-.200s' niet openen (Errcode: %d)" eng "Can't open file: '%-.200s' (errno: %d)" - jps "'%-.64s' ƒtƒ@ƒCƒ‹‚ðŠJ‚Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d)", - est "Ei suuda avada faili '%-.64s' (veakood: %d)" - fre "Ne peut ouvrir le fichier: '%-.64s' (Errcode: %d)" - ger "Kann Datei '%-.64s' nicht öffnen (Fehler: %d)" - greek "Äåí åßíáé äõíáôü íá áíïé÷ôåß ôï áñ÷åßï: '%-.64s' (êùäéêüò ëÜèïõò: %d)" - hun "A '%-.64s' file nem nyithato meg (hibakod: %d)" - ita "Impossibile aprire il file: '%-.64s' (errno: %d)" - jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤ò³«¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d)" - kor "ÈÀÏÀ» ¿Áö ¸øÇß½À´Ï´Ù.: '%-.64s' (¿¡·¯¹øÈ£: %d)" - nor "Kan ikke åpne fila: '%-.64s' (Feilkode: %d)" - norwegian-ny "Kan ikkje åpne fila: '%-.64s' (Feilkode: %d)" - pol "Nie mo¿na otworzyæ pliku: '%-.64s' (Kod b³êdu: %d)" - por "Não pode abrir o arquivo '%-.64s' (erro no. %d)" - rum "Nu pot sa deschid fisierul: '%-.64s' (Eroare: %d)" - rus "îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÆÁÊÌ: '%-.64s' (ÏÛÉÂËÁ: %d)" - serbian "Ne mogu da otvorim file: '%-.64s' (errno: %d)" - slo "Nemô¾em otvori» súbor: '%-.64s' (chybový kód: %d)" - spa "No puedo abrir archivo: '%-.64s' (Error: %d)" - swe "Kan inte använda '%-.64s' (Felkod: %d)" - ukr "îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÆÁÊÌ: '%-.64s' (ÐÏÍÉÌËÁ: %d)" + jps "'%-.200s' ƒtƒ@ƒCƒ‹‚ðŠJ‚Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d)", + est "Ei suuda avada faili '%-.200s' (veakood: %d)" + fre "Ne peut ouvrir le fichier: '%-.200s' (Errcode: %d)" + ger "Kann Datei '%-.200s' nicht öffnen (Fehler: %d)" + greek "Äåí åßíáé äõíáôü íá áíïé÷ôåß ôï áñ÷åßï: '%-.200s' (êùäéêüò ëÜèïõò: %d)" + hun "A '%-.200s' file nem nyithato meg (hibakod: %d)" + ita "Impossibile aprire il file: '%-.200s' (errno: %d)" + jpn "'%-.200s' ¥Õ¥¡¥¤¥ë¤ò³«¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d)" + kor "ÈÀÏÀ» ¿Áö ¸øÇß½À´Ï´Ù.: '%-.200s' (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke åpne fila: '%-.200s' (Feilkode: %d)" + norwegian-ny "Kan ikkje åpne fila: '%-.200s' (Feilkode: %d)" + pol "Nie mo¿na otworzyæ pliku: '%-.200s' (Kod b³êdu: %d)" + por "Não pode abrir o arquivo '%-.200s' (erro no. %d)" + rum "Nu pot sa deschid fisierul: '%-.200s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÆÁÊÌ: '%-.200s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da otvorim file: '%-.200s' (errno: %d)" + slo "Nemô¾em otvori» súbor: '%-.200s' (chybový kód: %d)" + spa "No puedo abrir archivo: '%-.200s' (Error: %d)" + swe "Kan inte använda '%-.200s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÆÁÊÌ: '%-.200s' (ÐÏÍÉÌËÁ: %d)" ER_FILE_NOT_FOUND - cze "Nemohu naj-Bít soubor '%-.64s' (chybový kód: %d)" - dan "Kan ikke finde fila: '%-.64s' (Fejlkode: %d)" - nla "Kan de file: '%-.64s' niet vinden (Errcode: %d)" + cze "Nemohu naj-Bít soubor '%-.200s' (chybový kód: %d)" + dan "Kan ikke finde fila: '%-.200s' (Fejlkode: %d)" + nla "Kan de file: '%-.200s' niet vinden (Errcode: %d)" eng "Can't find file: '%-.200s' (errno: %d)" - jps "'%-.64s' ƒtƒ@ƒCƒ‹‚ðŒ©•t‚¯‚鎖‚ª‚Å‚«‚Ü‚¹‚ñ.(errno: %d)", - est "Ei suuda leida faili '%-.64s' (veakood: %d)" - fre "Ne peut trouver le fichier: '%-.64s' (Errcode: %d)" - ger "Kann Datei '%-.64s' nicht finden (Fehler: %d)" - greek "Äåí âñÝèçêå ôï áñ÷åßï: '%-.64s' (êùäéêüò ëÜèïõò: %d)" - hun "A(z) '%-.64s' file nem talalhato (hibakod: %d)" - ita "Impossibile trovare il file: '%-.64s' (errno: %d)" - jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤ò¸«ÉÕ¤±¤ë»ö¤¬¤Ç¤¤Þ¤»¤ó.(errno: %d)" - kor "ÈÀÏÀ» ãÁö ¸øÇß½À´Ï´Ù.: '%-.64s' (¿¡·¯¹øÈ£: %d)" - nor "Kan ikke finne fila: '%-.64s' (Feilkode: %d)" - norwegian-ny "Kan ikkje finne fila: '%-.64s' (Feilkode: %d)" - pol "Nie mo¿na znale¥æ pliku: '%-.64s' (Kod b³êdu: %d)" - por "Não pode encontrar o arquivo '%-.64s' (erro no. %d)" - rum "Nu pot sa gasesc fisierul: '%-.64s' (Eroare: %d)" - rus "îÅ×ÏÚÍÏÖÎÏ ÎÁÊÔÉ ÆÁÊÌ: '%-.64s' (ÏÛÉÂËÁ: %d)" - serbian "Ne mogu da pronaðem file: '%-.64s' (errno: %d)" - slo "Nemô¾em nájs» súbor: '%-.64s' (chybový kód: %d)" - spa "No puedo encontrar archivo: '%-.64s' (Error: %d)" - swe "Hittar inte filen '%-.64s' (Felkod: %d)" - ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ ÆÁÊÌ: '%-.64s' (ÐÏÍÉÌËÁ: %d)" + jps "'%-.200s' ƒtƒ@ƒCƒ‹‚ðŒ©•t‚¯‚鎖‚ª‚Å‚«‚Ü‚¹‚ñ.(errno: %d)", + est "Ei suuda leida faili '%-.200s' (veakood: %d)" + fre "Ne peut trouver le fichier: '%-.200s' (Errcode: %d)" + ger "Kann Datei '%-.200s' nicht finden (Fehler: %d)" + greek "Äåí âñÝèçêå ôï áñ÷åßï: '%-.200s' (êùäéêüò ëÜèïõò: %d)" + hun "A(z) '%-.200s' file nem talalhato (hibakod: %d)" + ita "Impossibile trovare il file: '%-.200s' (errno: %d)" + jpn "'%-.200s' ¥Õ¥¡¥¤¥ë¤ò¸«ÉÕ¤±¤ë»ö¤¬¤Ç¤¤Þ¤»¤ó.(errno: %d)" + kor "ÈÀÏÀ» ãÁö ¸øÇß½À´Ï´Ù.: '%-.200s' (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke finne fila: '%-.200s' (Feilkode: %d)" + norwegian-ny "Kan ikkje finne fila: '%-.200s' (Feilkode: %d)" + pol "Nie mo¿na znale¥æ pliku: '%-.200s' (Kod b³êdu: %d)" + por "Não pode encontrar o arquivo '%-.200s' (erro no. %d)" + rum "Nu pot sa gasesc fisierul: '%-.200s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÎÁÊÔÉ ÆÁÊÌ: '%-.200s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da pronaðem file: '%-.200s' (errno: %d)" + slo "Nemô¾em nájs» súbor: '%-.200s' (chybový kód: %d)" + spa "No puedo encontrar archivo: '%-.200s' (Error: %d)" + swe "Hittar inte filen '%-.200s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ ÆÁÊÌ: '%-.200s' (ÐÏÍÉÌËÁ: %d)" ER_CANT_READ_DIR cze "Nemohu -Bèíst adresáø '%-.64s' (chybový kód: %d)" dan "Kan ikke læse folder '%-.64s' (Fejlkode: %d)" @@ -480,7 +480,7 @@ ER_DISK_FULL jps "Disk full (%s). ’N‚©‚ª‰½‚©‚ðŒ¸‚ç‚·‚Ü‚Å‚Ü‚Á‚Ä‚‚¾‚³‚¢...", est "Ketas täis (%s). Ootame kuni tekib vaba ruumi..." fre "Disque plein (%s). J'attend que quelqu'un libère de l'espace..." - ger "Festplatte voll (%-.64s). Warte, bis jemand Platz schafft ..." + ger "Festplatte voll (%s). Warte, bis jemand Platz schafft ..." greek "Äåí õðÜñ÷åé ÷þñïò óôï äßóêï (%s). Ðáñáêáëþ, ðåñéìÝíåôå íá åëåõèåñùèåß ÷þñïò..." hun "A lemez megtelt (%s)." ita "Disco pieno (%s). In attesa che qualcuno liberi un po' di spazio..." @@ -546,80 +546,80 @@ ER_ERROR_ON_CLOSE swe "Fick fel vid stängning av '%-.64s' (Felkod: %d)" ukr "îÅ ÍÏÖÕ ÚÁËÒÉÔÉ '%-.64s' (ÐÏÍÉÌËÁ: %d)" ER_ERROR_ON_READ - cze "Chyba p-Bøi ètení souboru '%-.64s' (chybový kód: %d)" - dan "Fejl ved læsning af '%-.64s' (Fejlkode: %d)" - nla "Fout bij het lezen van file '%-.64s' (Errcode: %d)" + cze "Chyba p-Bøi ètení souboru '%-.200s' (chybový kód: %d)" + dan "Fejl ved læsning af '%-.200s' (Fejlkode: %d)" + nla "Fout bij het lezen van file '%-.200s' (Errcode: %d)" eng "Error reading file '%-.200s' (errno: %d)" - jps "'%-.64s' ƒtƒ@ƒCƒ‹‚Ì“Ç‚Ýž‚݃Gƒ‰[ (errno: %d)", - est "Viga faili '%-.64s' lugemisel (veakood: %d)" - fre "Erreur en lecture du fichier '%-.64s' (Errcode: %d)" - ger "Fehler beim Lesen der Datei '%-.64s' (Fehler: %d)" - greek "Ðñüâëçìá êáôÜ ôçí áíÜãíùóç ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)" - hun "Hiba a '%-.64s'file olvasasakor. (hibakod: %d)" - ita "Errore durante la lettura del file '%-.64s' (errno: %d)" - jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤ÎÆɤ߹þ¤ß¥¨¥é¡¼ (errno: %d)" - kor "'%-.64s'ÈÀÏ Àб⠿¡·¯ (¿¡·¯¹øÈ£: %d)" - nor "Feil ved lesing av '%-.64s' (Feilkode: %d)" - norwegian-ny "Feil ved lesing av '%-.64s' (Feilkode: %d)" - pol "B³?d podczas odczytu pliku '%-.64s' (Kod b³êdu: %d)" - por "Erro ao ler arquivo '%-.64s' (erro no. %d)" - rum "Eroare citind fisierul '%-.64s' (errno: %d)" - rus "ïÛÉÂËÁ ÞÔÅÎÉÑ ÆÁÊÌÁ '%-.64s' (ÏÛÉÂËÁ: %d)" - serbian "Greška pri èitanju file-a '%-.64s' (errno: %d)" - slo "Chyba pri èítaní súboru '%-.64s' (chybový kód: %d)" - spa "Error leyendo el fichero '%-.64s' (Error: %d)" - swe "Fick fel vid läsning av '%-.64s' (Felkod %d)" - ukr "îÅ ÍÏÖÕ ÐÒÏÞÉÔÁÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)" + jps "'%-.200s' ƒtƒ@ƒCƒ‹‚Ì“Ç‚Ýž‚݃Gƒ‰[ (errno: %d)", + est "Viga faili '%-.200s' lugemisel (veakood: %d)" + fre "Erreur en lecture du fichier '%-.200s' (Errcode: %d)" + ger "Fehler beim Lesen der Datei '%-.200s' (Fehler: %d)" + greek "Ðñüâëçìá êáôÜ ôçí áíÜãíùóç ôïõ áñ÷åßïõ '%-.200s' (êùäéêüò ëÜèïõò: %d)" + hun "Hiba a '%-.200s'file olvasasakor. (hibakod: %d)" + ita "Errore durante la lettura del file '%-.200s' (errno: %d)" + jpn "'%-.200s' ¥Õ¥¡¥¤¥ë¤ÎÆɤ߹þ¤ß¥¨¥é¡¼ (errno: %d)" + kor "'%-.200s'ÈÀÏ Àб⠿¡·¯ (¿¡·¯¹øÈ£: %d)" + nor "Feil ved lesing av '%-.200s' (Feilkode: %d)" + norwegian-ny "Feil ved lesing av '%-.200s' (Feilkode: %d)" + pol "B³?d podczas odczytu pliku '%-.200s' (Kod b³êdu: %d)" + por "Erro ao ler arquivo '%-.200s' (erro no. %d)" + rum "Eroare citind fisierul '%-.200s' (errno: %d)" + rus "ïÛÉÂËÁ ÞÔÅÎÉÑ ÆÁÊÌÁ '%-.200s' (ÏÛÉÂËÁ: %d)" + serbian "Greška pri èitanju file-a '%-.200s' (errno: %d)" + slo "Chyba pri èítaní súboru '%-.200s' (chybový kód: %d)" + spa "Error leyendo el fichero '%-.200s' (Error: %d)" + swe "Fick fel vid läsning av '%-.200s' (Felkod %d)" + ukr "îÅ ÍÏÖÕ ÐÒÏÞÉÔÁÔÉ ÆÁÊÌ '%-.200s' (ÐÏÍÉÌËÁ: %d)" ER_ERROR_ON_RENAME - cze "Chyba p-Bøi pøejmenování '%-.64s' na '%-.64s' (chybový kód: %d)" - dan "Fejl ved omdøbning af '%-.64s' til '%-.64s' (Fejlkode: %d)" - nla "Fout bij het hernoemen van '%-.64s' naar '%-.64s' (Errcode: %d)" + cze "Chyba p-Bøi pøejmenování '%-.150s' na '%-.150s' (chybový kód: %d)" + dan "Fejl ved omdøbning af '%-.150s' til '%-.150s' (Fejlkode: %d)" + nla "Fout bij het hernoemen van '%-.150s' naar '%-.150s' (Errcode: %d)" eng "Error on rename of '%-.150s' to '%-.150s' (errno: %d)" - jps "'%-.64s' ‚ð '%-.64s' ‚É rename ‚Å‚«‚Ü‚¹‚ñ (errno: %d)", - est "Viga faili '%-.64s' ümbernimetamisel '%-.64s'-ks (veakood: %d)" - fre "Erreur en renommant '%-.64s' en '%-.64s' (Errcode: %d)" - ger "Fehler beim Umbenennen von '%-.64s' in '%-.64s' (Fehler: %d)" - greek "Ðñüâëçìá êáôÜ ôçí ìåôïíïìáóßá ôïõ áñ÷åßïõ '%-.64s' to '%-.64s' (êùäéêüò ëÜèïõò: %d)" - hun "Hiba a '%-.64s' file atnevezesekor. (hibakod: %d)" - ita "Errore durante la rinominazione da '%-.64s' a '%-.64s' (errno: %d)" - jpn "'%-.64s' ¤ò '%-.64s' ¤Ë rename ¤Ç¤¤Þ¤»¤ó (errno: %d)" - kor "'%-.64s'¸¦ '%-.64s'·Î À̸§ º¯°æÁß ¿¡·¯ (¿¡·¯¹øÈ£: %d)" - nor "Feil ved omdøping av '%-.64s' til '%-.64s' (Feilkode: %d)" - norwegian-ny "Feil ved omdøyping av '%-.64s' til '%-.64s' (Feilkode: %d)" - pol "B³?d podczas zmieniania nazwy '%-.64s' na '%-.64s' (Kod b³êdu: %d)" - por "Erro ao renomear '%-.64s' para '%-.64s' (erro no. %d)" - rum "Eroare incercind sa renumesc '%-.64s' in '%-.64s' (errno: %d)" - rus "ïÛÉÂËÁ ÐÒÉ ÐÅÒÅÉÍÅÎÏ×ÁÎÉÉ '%-.64s' × '%-.64s' (ÏÛÉÂËÁ: %d)" - serbian "Greška pri promeni imena '%-.64s' na '%-.64s' (errno: %d)" - slo "Chyba pri premenovávaní '%-.64s' na '%-.64s' (chybový kód: %d)" - spa "Error en el renombrado de '%-.64s' a '%-.64s' (Error: %d)" - swe "Kan inte byta namn från '%-.64s' till '%-.64s' (Felkod: %d)" - ukr "îÅ ÍÏÖÕ ÐÅÒÅÊÍÅÎÕ×ÁÔÉ '%-.64s' Õ '%-.64s' (ÐÏÍÉÌËÁ: %d)" + jps "'%-.150s' ‚ð '%-.150s' ‚É rename ‚Å‚«‚Ü‚¹‚ñ (errno: %d)", + est "Viga faili '%-.150s' ümbernimetamisel '%-.150s'-ks (veakood: %d)" + fre "Erreur en renommant '%-.150s' en '%-.150s' (Errcode: %d)" + ger "Fehler beim Umbenennen von '%-.150s' in '%-.150s' (Fehler: %d)" + greek "Ðñüâëçìá êáôÜ ôçí ìåôïíïìáóßá ôïõ áñ÷åßïõ '%-.150s' to '%-.150s' (êùäéêüò ëÜèïõò: %d)" + hun "Hiba a '%-.150s' file atnevezesekor '%-.150s'. (hibakod: %d)" + ita "Errore durante la rinominazione da '%-.150s' a '%-.150s' (errno: %d)" + jpn "'%-.150s' ¤ò '%-.150s' ¤Ë rename ¤Ç¤¤Þ¤»¤ó (errno: %d)" + kor "'%-.150s'¸¦ '%-.150s'·Î À̸§ º¯°æÁß ¿¡·¯ (¿¡·¯¹øÈ£: %d)" + nor "Feil ved omdøping av '%-.150s' til '%-.150s' (Feilkode: %d)" + norwegian-ny "Feil ved omdøyping av '%-.150s' til '%-.150s' (Feilkode: %d)" + pol "B³?d podczas zmieniania nazwy '%-.150s' na '%-.150s' (Kod b³êdu: %d)" + por "Erro ao renomear '%-.150s' para '%-.150s' (erro no. %d)" + rum "Eroare incercind sa renumesc '%-.150s' in '%-.150s' (errno: %d)" + rus "ïÛÉÂËÁ ÐÒÉ ÐÅÒÅÉÍÅÎÏ×ÁÎÉÉ '%-.150s' × '%-.150s' (ÏÛÉÂËÁ: %d)" + serbian "Greška pri promeni imena '%-.150s' na '%-.150s' (errno: %d)" + slo "Chyba pri premenovávaní '%-.150s' na '%-.150s' (chybový kód: %d)" + spa "Error en el renombrado de '%-.150s' a '%-.150s' (Error: %d)" + swe "Kan inte byta namn från '%-.150s' till '%-.150s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÐÅÒÅÊÍÅÎÕ×ÁÔÉ '%-.150s' Õ '%-.150s' (ÐÏÍÉÌËÁ: %d)" ER_ERROR_ON_WRITE - cze "Chyba p-Bøi zápisu do souboru '%-.64s' (chybový kód: %d)" - dan "Fejl ved skriving av filen '%-.64s' (Fejlkode: %d)" - nla "Fout bij het wegschrijven van file '%-.64s' (Errcode: %d)" + cze "Chyba p-Bøi zápisu do souboru '%-.200s' (chybový kód: %d)" + dan "Fejl ved skriving av filen '%-.200s' (Fejlkode: %d)" + nla "Fout bij het wegschrijven van file '%-.200s' (Errcode: %d)" eng "Error writing file '%-.200s' (errno: %d)" - jps "'%-.64s' ƒtƒ@ƒCƒ‹‚ð‘‚Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d)", - est "Viga faili '%-.64s' kirjutamisel (veakood: %d)" - fre "Erreur d'écriture du fichier '%-.64s' (Errcode: %d)" - ger "Fehler beim Speichern der Datei '%-.64s' (Fehler: %d)" - greek "Ðñüâëçìá êáôÜ ôçí áðïèÞêåõóç ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)" - hun "Hiba a '%-.64s' file irasakor. (hibakod: %d)" - ita "Errore durante la scrittura del file '%-.64s' (errno: %d)" - jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤ò½ñ¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d)" - kor "'%-.64s'ÈÀÏ ±â·Ï Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)" - nor "Feil ved skriving av fila '%-.64s' (Feilkode: %d)" - norwegian-ny "Feil ved skriving av fila '%-.64s' (Feilkode: %d)" - pol "B³?d podczas zapisywania pliku '%-.64s' (Kod b³êdu: %d)" - por "Erro ao gravar arquivo '%-.64s' (erro no. %d)" - rum "Eroare scriind fisierul '%-.64s' (errno: %d)" - rus "ïÛÉÂËÁ ÚÁÐÉÓÉ × ÆÁÊÌ '%-.64s' (ÏÛÉÂËÁ: %d)" - serbian "Greška pri upisu '%-.64s' (errno: %d)" - slo "Chyba pri zápise do súboru '%-.64s' (chybový kód: %d)" - spa "Error escribiendo el archivo '%-.64s' (Error: %d)" - swe "Fick fel vid skrivning till '%-.64s' (Felkod %d)" - ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)" + jps "'%-.200s' ƒtƒ@ƒCƒ‹‚ð‘‚Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d)", + est "Viga faili '%-.200s' kirjutamisel (veakood: %d)" + fre "Erreur d'écriture du fichier '%-.200s' (Errcode: %d)" + ger "Fehler beim Speichern der Datei '%-.200s' (Fehler: %d)" + greek "Ðñüâëçìá êáôÜ ôçí áðïèÞêåõóç ôïõ áñ÷åßïõ '%-.200s' (êùäéêüò ëÜèïõò: %d)" + hun "Hiba a '%-.200s' file irasakor. (hibakod: %d)" + ita "Errore durante la scrittura del file '%-.200s' (errno: %d)" + jpn "'%-.200s' ¥Õ¥¡¥¤¥ë¤ò½ñ¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d)" + kor "'%-.200s'ÈÀÏ ±â·Ï Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)" + nor "Feil ved skriving av fila '%-.200s' (Feilkode: %d)" + norwegian-ny "Feil ved skriving av fila '%-.200s' (Feilkode: %d)" + pol "B³?d podczas zapisywania pliku '%-.200s' (Kod b³êdu: %d)" + por "Erro ao gravar arquivo '%-.200s' (erro no. %d)" + rum "Eroare scriind fisierul '%-.200s' (errno: %d)" + rus "ïÛÉÂËÁ ÚÁÐÉÓÉ × ÆÁÊÌ '%-.200s' (ÏÛÉÂËÁ: %d)" + serbian "Greška pri upisu '%-.200s' (errno: %d)" + slo "Chyba pri zápise do súboru '%-.200s' (chybový kód: %d)" + spa "Error escribiendo el archivo '%-.200s' (Error: %d)" + swe "Fick fel vid skrivning till '%-.200s' (Felkod %d)" + ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ ÆÁÊÌ '%-.200s' (ÐÏÍÉÌËÁ: %d)" ER_FILE_USED cze "'%-.64s' je zam-Bèen proti zmìnám" dan "'%-.64s' er låst mod opdateringer" @@ -741,7 +741,7 @@ ER_ILLEGAL_HA serbian "Handler tabela za '%-.64s' nema ovu opciju" slo "Obsluha tabuµky '%-.64s' nemá tento parameter" spa "El manejador de la tabla de '%-.64s' no tiene esta opcion" - swe "Registrets databas har inte denna facilitet" + swe "Tabellhanteraren for tabell '%-.64s' stödjer ej detta" ukr "äÅÓËÒÉÐÔÏÒ ÔÁÂÌÉæ '%-.64s' ÎÅ ÍÁ¤ 椧 ×ÌÁÓÔÉ×ÏÓÔ¦" ER_KEY_NOT_FOUND cze "Nemohu naj-Bít záznam v '%-.64s'" @@ -766,58 +766,58 @@ ER_KEY_NOT_FOUND serbian "Ne mogu da pronaðem slog u '%-.64s'" slo "Nemô¾em nájs» záznam v '%-.64s'" spa "No puedo encontrar el registro en '%-.64s'" - swe "Hittar inte posten" + swe "Hittar inte posten '%-.64s'" ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ Õ '%-.64s'" ER_NOT_FORM_FILE - cze "Nespr-Bávná informace v souboru '%-.64s'" - dan "Forkert indhold i: '%-.64s'" - nla "Verkeerde info in file: '%-.64s'" + cze "Nespr-Bávná informace v souboru '%-.200s'" + dan "Forkert indhold i: '%-.200s'" + nla "Verkeerde info in file: '%-.200s'" eng "Incorrect information in file: '%-.200s'" - jps "ƒtƒ@ƒCƒ‹ '%-.64s' ‚Ì info ‚ªŠÔˆá‚Á‚Ä‚¢‚é‚悤‚Å‚·", - est "Vigane informatsioon failis '%-.64s'" - fre "Information erronnée dans le fichier: '%-.64s'" - ger "Falsche Information in Datei '%-.64s'" - greek "ËÜèïò ðëçñïöïñßåò óôï áñ÷åßï: '%-.64s'" - hun "Ervenytelen info a file-ban: '%-.64s'" - ita "Informazione errata nel file: '%-.64s'" - jpn "¥Õ¥¡¥¤¥ë '%-.64s' ¤Î info ¤¬´Ö°ã¤Ã¤Æ¤¤¤ë¤è¤¦¤Ç¤¹" - kor "ÈÀÏÀÇ ºÎÁ¤È®ÇÑ Á¤º¸: '%-.64s'" - nor "Feil informasjon i filen: '%-.64s'" - norwegian-ny "Feil informasjon i fila: '%-.64s'" - pol "Niew³a?ciwa informacja w pliku: '%-.64s'" - por "Informação incorreta no arquivo '%-.64s'" - rum "Informatie incorecta in fisierul: '%-.64s'" - rus "îÅËÏÒÒÅËÔÎÁÑ ÉÎÆÏÒÍÁÃÉÑ × ÆÁÊÌÅ '%-.64s'" - serbian "Pogrešna informacija u file-u: '%-.64s'" - slo "Nesprávna informácia v súbore: '%-.64s'" - spa "Informacion erronea en el archivo: '%-.64s'" - swe "Felaktig fil: '%-.64s'" - ukr "èÉÂÎÁ ¦ÎÆÏÒÍÁÃ¦Ñ Õ ÆÁÊ̦: '%-.64s'" + jps "ƒtƒ@ƒCƒ‹ '%-.200s' ‚Ì info ‚ªŠÔˆá‚Á‚Ä‚¢‚é‚悤‚Å‚·", + est "Vigane informatsioon failis '%-.200s'" + fre "Information erronnée dans le fichier: '%-.200s'" + ger "Falsche Information in Datei '%-.200s'" + greek "ËÜèïò ðëçñïöïñßåò óôï áñ÷åßï: '%-.200s'" + hun "Ervenytelen info a file-ban: '%-.200s'" + ita "Informazione errata nel file: '%-.200s'" + jpn "¥Õ¥¡¥¤¥ë '%-.200s' ¤Î info ¤¬´Ö°ã¤Ã¤Æ¤¤¤ë¤è¤¦¤Ç¤¹" + kor "ÈÀÏÀÇ ºÎÁ¤È®ÇÑ Á¤º¸: '%-.200s'" + nor "Feil informasjon i filen: '%-.200s'" + norwegian-ny "Feil informasjon i fila: '%-.200s'" + pol "Niew³a?ciwa informacja w pliku: '%-.200s'" + por "Informação incorreta no arquivo '%-.200s'" + rum "Informatie incorecta in fisierul: '%-.200s'" + rus "îÅËÏÒÒÅËÔÎÁÑ ÉÎÆÏÒÍÁÃÉÑ × ÆÁÊÌÅ '%-.200s'" + serbian "Pogrešna informacija u file-u: '%-.200s'" + slo "Nesprávna informácia v súbore: '%-.200s'" + spa "Informacion erronea en el archivo: '%-.200s'" + swe "Felaktig fil: '%-.200s'" + ukr "èÉÂÎÁ ¦ÎÆÏÒÍÁÃ¦Ñ Õ ÆÁÊ̦: '%-.200s'" ER_NOT_KEYFILE - cze "Nespr-Bávný klíè pro tabulku '%-.64s'; pokuste se ho opravit" - dan "Fejl i indeksfilen til tabellen '%-.64s'; prøv at reparere den" - nla "Verkeerde zoeksleutel file voor tabel: '%-.64s'; probeer het te repareren" + cze "Nespr-Bávný klíè pro tabulku '%-.200s'; pokuste se ho opravit" + dan "Fejl i indeksfilen til tabellen '%-.200s'; prøv at reparere den" + nla "Verkeerde zoeksleutel file voor tabel: '%-.200s'; probeer het te repareren" eng "Incorrect key file for table '%-.200s'; try to repair it" - jps "'%-.64s' ƒe[ƒuƒ‹‚Ì key file ‚ªŠÔˆá‚Á‚Ä‚¢‚é‚悤‚Å‚·. C•œ‚ð‚µ‚Ä‚‚¾‚³‚¢", - est "Tabeli '%-.64s' võtmefail on vigane; proovi seda parandada" - fre "Index corrompu dans la table: '%-.64s'; essayez de le réparer" - ger "Fehlerhafte Index-Datei für Tabelle '%-.64s'; versuche zu reparieren" - greek "ËÜèïò áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá: '%-.64s'; Ðáñáêáëþ, äéïñèþóôå ôï!" - hun "Ervenytelen kulcsfile a tablahoz: '%-.64s'; probalja kijavitani!" - ita "File chiave errato per la tabella : '%-.64s'; prova a riparalo" - jpn "'%-.64s' ¥Æ¡¼¥Ö¥ë¤Î key file ¤¬´Ö°ã¤Ã¤Æ¤¤¤ë¤è¤¦¤Ç¤¹. ½¤Éü¤ò¤·¤Æ¤¯¤À¤µ¤¤" - kor "'%-.64s' Å×À̺íÀÇ ºÎÁ¤È®ÇÑ Å° Á¸Àç. ¼öÁ¤ÇϽÿÀ!" - nor "Tabellen '%-.64s' har feil i nøkkelfilen; forsøk å reparer den" - norwegian-ny "Tabellen '%-.64s' har feil i nykkelfila; prøv å reparere den" - pol "Niew³a?ciwy plik kluczy dla tabeli: '%-.64s'; spróbuj go naprawiæ" - por "Arquivo de índice incorreto para tabela '%-.64s'; tente repará-lo" - rum "Cheia fisierului incorecta pentru tabela: '%-.64s'; incearca s-o repari" - rus "îÅËÏÒÒÅËÔÎÙÊ ÉÎÄÅËÓÎÙÊ ÆÁÊÌ ÄÌÑ ÔÁÂÌÉÃÙ: '%-.64s'. ðÏÐÒÏÂÕÊÔÅ ×ÏÓÓÔÁÎÏ×ÉÔØ ÅÇÏ" - serbian "Pogrešan key file za tabelu: '%-.64s'; probajte da ga ispravite" - slo "Nesprávny kµúè pre tabuµku '%-.64s'; pokúste sa ho opravi»" - spa "Clave de archivo erronea para la tabla: '%-.64s'; intente repararlo" - swe "Fatalt fel vid hantering av register '%-.64s'; kör en reparation" - ukr "èÉÂÎÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ: '%-.64s'; óÐÒÏÂÕÊÔÅ ÊÏÇÏ ×¦ÄÎÏ×ÉÔÉ" + jps "'%-.200s' ƒe[ƒuƒ‹‚Ì key file ‚ªŠÔˆá‚Á‚Ä‚¢‚é‚悤‚Å‚·. C•œ‚ð‚µ‚Ä‚‚¾‚³‚¢", + est "Tabeli '%-.200s' võtmefail on vigane; proovi seda parandada" + fre "Index corrompu dans la table: '%-.200s'; essayez de le réparer" + ger "Fehlerhafte Index-Datei für Tabelle '%-.200s'; versuche zu reparieren" + greek "ËÜèïò áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá: '%-.200s'; Ðáñáêáëþ, äéïñèþóôå ôï!" + hun "Ervenytelen kulcsfile a tablahoz: '%-.200s'; probalja kijavitani!" + ita "File chiave errato per la tabella : '%-.200s'; prova a riparalo" + jpn "'%-.200s' ¥Æ¡¼¥Ö¥ë¤Î key file ¤¬´Ö°ã¤Ã¤Æ¤¤¤ë¤è¤¦¤Ç¤¹. ½¤Éü¤ò¤·¤Æ¤¯¤À¤µ¤¤" + kor "'%-.200s' Å×À̺íÀÇ ºÎÁ¤È®ÇÑ Å° Á¸Àç. ¼öÁ¤ÇϽÿÀ!" + nor "Tabellen '%-.200s' har feil i nøkkelfilen; forsøk å reparer den" + norwegian-ny "Tabellen '%-.200s' har feil i nykkelfila; prøv å reparere den" + pol "Niew³a?ciwy plik kluczy dla tabeli: '%-.200s'; spróbuj go naprawiæ" + por "Arquivo de índice incorreto para tabela '%-.200s'; tente repará-lo" + rum "Cheia fisierului incorecta pentru tabela: '%-.200s'; incearca s-o repari" + rus "îÅËÏÒÒÅËÔÎÙÊ ÉÎÄÅËÓÎÙÊ ÆÁÊÌ ÄÌÑ ÔÁÂÌÉÃÙ: '%-.200s'. ðÏÐÒÏÂÕÊÔÅ ×ÏÓÓÔÁÎÏ×ÉÔØ ÅÇÏ" + serbian "Pogrešan key file za tabelu: '%-.200s'; probajte da ga ispravite" + slo "Nesprávny kµúè pre tabuµku '%-.200s'; pokúste sa ho opravi»" + spa "Clave de archivo erronea para la tabla: '%-.200s'; intente repararlo" + swe "Fatalt fel vid hantering av register '%-.200s'; kör en reparation" + ukr "èÉÂÎÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ: '%-.200s'; óÐÒÏÂÕÊÔÅ ÊÏÇÏ ×¦ÄÎÏ×ÉÔÉ" ER_OLD_KEYFILE cze "Star-Bý klíèový soubor pro '%-.64s'; opravte ho." dan "Gammel indeksfil for tabellen '%-.64s'; reparer den" @@ -1072,7 +1072,7 @@ ER_ACCESS_DENIED_ERROR 28000 jps "ƒ†[ƒU[ '%-.32s'@'%-.64s' ‚ð‹‘”Û‚µ‚Ü‚·.uUsing password: %s)", est "Ligipääs keelatud kasutajale '%-.32s'@'%-.64s' (kasutab parooli: %s)" fre "Accès refusé pour l'utilisateur: '%-.32s'@'@%-.64s' (mot de passe: %s)" - ger "Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung (verwendetes Passwort: %-.64s)" + ger "Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung (verwendetes Passwort: %s)" greek "Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s'@'%-.64s' (÷ñÞóç password: %s)" hun "A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres. (Hasznalja a jelszot: %s)" ita "Accesso non consentito per l'utente: '%-.32s'@'%-.64s' (Password: %s)" @@ -1239,28 +1239,28 @@ ER_BAD_TABLE_ERROR 42S02 swe "Okänd tabell '%-.100s'" ukr "îÅצÄÏÍÁ ÔÁÂÌÉÃÑ '%-.100s'" ER_NON_UNIQ_ERROR 23000 - cze "Sloupec '%-.64s' v %s nen-Bí zcela jasný" - dan "Felt: '%-.64s' i tabel %s er ikke entydigt" - nla "Kolom: '%-.64s' in %s is niet eenduidig" + cze "Sloupec '%-.64s' v %-.64s nen-Bí zcela jasný" + dan "Felt: '%-.64s' i tabel %-.64s er ikke entydigt" + nla "Kolom: '%-.64s' in %-.64s is niet eenduidig" eng "Column '%-.64s' in %-.64s is ambiguous" est "Väli '%-.64s' %-.64s-s ei ole ühene" - fre "Champ: '%-.64s' dans %s est ambigu" + fre "Champ: '%-.64s' dans %-.64s est ambigu" ger "Feld '%-.64s' in %-.64s ist nicht eindeutig" greek "Ôï ðåäßï: '%-.64s' óå %-.64s äåí Ý÷åé êáèïñéóôåß" hun "A(z) '%-.64s' oszlop %-.64s-ben ketertelmu" ita "Colonna: '%-.64s' di %-.64s e` ambigua" jpn "Column: '%-.64s' in %-.64s is ambiguous" kor "Ä®·³: '%-.64s' in '%-.64s' ÀÌ ¸ðÈ£ÇÔ" - nor "Felt: '%-.64s' i tabell %s er ikke entydig" - norwegian-ny "Kolonne: '%-.64s' i tabell %s er ikkje eintydig" - pol "Kolumna: '%-.64s' w %s jest dwuznaczna" + nor "Felt: '%-.64s' i tabell %-.64s er ikke entydig" + norwegian-ny "Kolonne: '%-.64s' i tabell %-.64s er ikkje eintydig" + pol "Kolumna: '%-.64s' w %-.64s jest dwuznaczna" por "Coluna '%-.64s' em '%-.64s' é ambígua" rum "Coloana: '%-.64s' in %-.64s este ambigua" rus "óÔÏÌÂÅà '%-.64s' × %-.64s ÚÁÄÁÎ ÎÅÏÄÎÏÚÎÁÞÎÏ" serbian "Kolona '%-.64s' u %-.64s nije jedinstvena u kontekstu" slo "Pole: '%-.64s' v %-.64s je nejasné" - spa "La columna: '%-.64s' en %s es ambigua" - swe "Kolumn '%-.64s' i %s är inte unik" + spa "La columna: '%-.64s' en %-.64s es ambigua" + swe "Kolumn '%-.64s' i %-.64s är inte unik" ukr "óÔÏ×ÂÅÃØ '%-.64s' Õ %-.64s ×ÉÚÎÁÞÅÎÉÊ ÎÅÏÄÎÏÚÎÁÞÎÏ" ER_SERVER_SHUTDOWN 08S01 cze "Prob-Bíhá ukonèování práce serveru" @@ -1288,29 +1288,29 @@ ER_SERVER_SHUTDOWN 08S01 swe "Servern går nu ned" ukr "úÁ×ÅÒÛÕ¤ÔØÓÑ ÒÁÂÏÔÁ ÓÅÒ×ÅÒÁ" ER_BAD_FIELD_ERROR 42S22 S0022 - cze "Nezn-Bámý sloupec '%-.64s' v %s" - dan "Ukendt kolonne '%-.64s' i tabel %s" - nla "Onbekende kolom '%-.64s' in %s" + cze "Nezn-Bámý sloupec '%-.64s' v %-.64s" + dan "Ukendt kolonne '%-.64s' i tabel %-.64s" + nla "Onbekende kolom '%-.64s' in %-.64s" eng "Unknown column '%-.64s' in '%-.64s'" jps "'%-.64s' column ‚Í '%-.64s' ‚É‚Í‚ ‚è‚Ü‚¹‚ñ.", est "Tundmatu tulp '%-.64s' '%-.64s'-s" - fre "Champ '%-.64s' inconnu dans %s" + fre "Champ '%-.64s' inconnu dans %-.64s" ger "Unbekanntes Tabellenfeld '%-.64s' in %-.64s" greek "Áãíùóôï ðåäßï '%-.64s' óå '%-.64s'" hun "A(z) '%-.64s' oszlop ervenytelen '%-.64s'-ben" ita "Colonna sconosciuta '%-.64s' in '%-.64s'" jpn "'%-.64s' column ¤Ï '%-.64s' ¤Ë¤Ï¤¢¤ê¤Þ¤»¤ó." kor "Unknown Ä®·³ '%-.64s' in '%-.64s'" - nor "Ukjent kolonne '%-.64s' i tabell %s" - norwegian-ny "Ukjent felt '%-.64s' i tabell %s" - pol "Nieznana kolumna '%-.64s' w %s" + nor "Ukjent kolonne '%-.64s' i tabell %-.64s" + norwegian-ny "Ukjent felt '%-.64s' i tabell %-.64s" + pol "Nieznana kolumna '%-.64s' w %-.64s" por "Coluna '%-.64s' desconhecida em '%-.64s'" rum "Coloana invalida '%-.64s' in '%-.64s'" rus "îÅÉÚ×ÅÓÔÎÙÊ ÓÔÏÌÂÅà '%-.64s' × '%-.64s'" serbian "Nepoznata kolona '%-.64s' u '%-.64s'" slo "Neznáme pole '%-.64s' v '%-.64s'" - spa "La columna '%-.64s' en %s es desconocida" - swe "Okänd kolumn '%-.64s' i %s" + spa "La columna '%-.64s' en %-.64s es desconocida" + swe "Okänd kolumn '%-.64s' i %-.64s" ukr "îÅצÄÏÍÉÊ ÓÔÏ×ÂÅÃØ '%-.64s' Õ '%-.64s'" ER_WRONG_FIELD_WITH_GROUP 42000 S1009 cze "Pou-B¾ité '%-.64s' nebylo v group by" @@ -1404,29 +1404,29 @@ ER_WRONG_VALUE_COUNT 21S01 swe "Antalet kolumner motsvarar inte antalet värden" ukr "ë¦ÌØ˦ÓÔØ ÓÔÏ×ÂÃ¦× ÎÅ ÓЦ×ÐÁÄÁ¤ Ú Ë¦ÌØ˦ÓÔÀ ÚÎÁÞÅÎØ" ER_TOO_LONG_IDENT 42000 S1009 - cze "Jm-Béno identifikátoru '%-.64s' je pøíli¹ dlouhé" - dan "Navnet '%-.64s' er for langt" - nla "Naam voor herkenning '%-.64s' is te lang" + cze "Jm-Béno identifikátoru '%-.100s' je pøíli¹ dlouhé" + dan "Navnet '%-.100s' er for langt" + nla "Naam voor herkenning '%-.100s' is te lang" eng "Identifier name '%-.100s' is too long" jps "Identifier name '%-.100s' ‚Í’·‚·‚¬‚Ü‚·", est "Identifikaatori '%-.100s' nimi on liiga pikk" - fre "Le nom de l'identificateur '%-.64s' est trop long" + fre "Le nom de l'identificateur '%-.100s' est trop long" ger "Name des Bezeichners '%-.100s' ist zu lang" greek "Ôï identifier name '%-.100s' åßíáé ðïëý ìåãÜëï" hun "A(z) '%-.100s' azonositonev tul hosszu." ita "Il nome dell'identificatore '%-.100s' e` troppo lungo" jpn "Identifier name '%-.100s' ¤ÏŤ¹¤®¤Þ¤¹" kor "Identifier '%-.100s'´Â ³Ê¹« ±æ±º¿ä." - nor "Identifikator '%-.64s' er for lang" - norwegian-ny "Identifikator '%-.64s' er for lang" - pol "Nazwa identyfikatora '%-.64s' jest zbyt d³uga" + nor "Identifikator '%-.100s' er for lang" + norwegian-ny "Identifikator '%-.100s' er for lang" + pol "Nazwa identyfikatora '%-.100s' jest zbyt d³uga" por "Nome identificador '%-.100s' é longo demais" rum "Numele indentificatorului '%-.100s' este prea lung" rus "óÌÉÛËÏÍ ÄÌÉÎÎÙÊ ÉÄÅÎÔÉÆÉËÁÔÏÒ '%-.100s'" serbian "Ime '%-.100s' je predugaèko" slo "Meno identifikátora '%-.100s' je príli¹ dlhé" - spa "El nombre del identificador '%-.64s' es demasiado grande" - swe "Kolumnnamn '%-.64s' är för långt" + spa "El nombre del identificador '%-.100s' es demasiado grande" + swe "Kolumnnamn '%-.100s' är för långt" ukr "¶Í'Ñ ¦ÄÅÎÔÉƦËÁÔÏÒÁ '%-.100s' ÚÁÄÏ×ÇÅ" ER_DUP_FIELDNAME 42S21 S1009 cze "Zdvojen-Bé jméno sloupce '%-.64s'" @@ -1479,7 +1479,7 @@ ER_DUP_KEYNAME 42000 S1009 swe "Nyckelnamn '%-.64s' finns flera gånger" ukr "äÕÂÌÀÀÞÅ ¦Í'Ñ ËÌÀÞÁ '%-.64s'" ER_DUP_ENTRY 23000 S1009 - cze "Zvojen-Bý klíè '%-.64s' (èíslo klíèe %d)" + cze "Zdvojen-Bý klíè '%-.64s' (èíslo klíèe %d)" dan "Ens værdier '%-.64s' for indeks %d" nla "Dubbele ingang '%-.64s' voor zoeksleutel %d" eng "Duplicate entry '%-.64s' for key %d" @@ -1526,30 +1526,30 @@ ER_WRONG_FIELD_SPEC 42000 S1009 spa "Especificador de columna erroneo para la columna '%-.64s'" swe "Felaktigt kolumntyp för kolumn '%-.64s'" ukr "îÅצÒÎÉÊ ÓÐÅÃÉƦËÁÔÏÒ ÓÔÏ×ÂÃÑ '%-.64s'" -ER_PARSE_ERROR 42000 - cze "%s bl-Bízko '%-.64s' na øádku %d" - dan "%s nær '%-.64s' på linje %d" - nla "%s bij '%-.64s' in regel %d" +ER_PARSE_ERROR 42000 s1009 + cze "%s bl-Bízko '%-.80s' na øádku %d" + dan "%s nær '%-.80s' på linje %d" + nla "%s bij '%-.80s' in regel %d" eng "%s near '%-.80s' at line %d" jps "%s : '%-.80s' •t‹ß : %d s–Ú", est "%s '%-.80s' ligidal real %d" - fre "%s près de '%-.64s' à la ligne %d" + fre "%s près de '%-.80s' à la ligne %d" ger "%s bei '%-.80s' in Zeile %d" greek "%s ðëçóßïí '%-.80s' óôç ãñáììÞ %d" hun "A %s a '%-.80s'-hez kozeli a %d sorban" ita "%s vicino a '%-.80s' linea %d" jpn "%s : '%-.80s' ÉÕ¶á : %d ¹ÔÌÜ" - kor "'%-.64s' ¿¡·¯ °°À¾´Ï´Ù. ('%-.80s' ¸í·É¾î ¶óÀÎ %d)" - nor "%s nær '%-.64s' på linje %d" - norwegian-ny "%s attmed '%-.64s' på line %d" - pol "%s obok '%-.64s' w linii %d" + kor "'%s' ¿¡·¯ °°À¾´Ï´Ù. ('%-.80s' ¸í·É¾î ¶óÀÎ %d)" + nor "%s nær '%-.80s' på linje %d" + norwegian-ny "%s attmed '%-.80s' på line %d" + pol "%s obok '%-.80s' w linii %d" por "%s próximo a '%-.80s' na linha %d" rum "%s linga '%-.80s' pe linia %d" rus "%s ÏËÏÌÏ '%-.80s' ÎÁ ÓÔÒÏËÅ %d" serbian "'%s' u iskazu '%-.80s' na liniji %d" slo "%s blízko '%-.80s' na riadku %d" - spa "%s cerca '%-.64s' en la linea %d" - swe "%s nära '%-.64s' på rad %d" + spa "%s cerca '%-.80s' en la linea %d" + swe "%s nära '%-.80s' på rad %d" ukr "%s ¦ÌÑ '%-.80s' × ÓÔÒÏæ %d" ER_EMPTY_QUERY 42000 cze "V-Býsledek dotazu je prázdný" @@ -1821,30 +1821,30 @@ ER_WRONG_AUTO_KEY 42000 S1009 swe "Det får finnas endast ett AUTO_INCREMENT-fält och detta måste vara en nyckel" ukr "îÅצÒÎÅ ×ÉÚÎÁÞÅÎÎÑ ÔÁÂÌÉæ; íÏÖÅ ÂÕÔÉ ÌÉÛÅ ÏÄÉÎ Á×ÔÏÍÁÔÉÞÎÉÊ ÓÔÏ×ÂÅÃØ, ÝÏ ÐÏ×ÉÎÅÎ ÂÕÔÉ ×ÉÚÎÁÞÅÎÉÊ ÑË ËÌÀÞ" ER_READY - cze "%s: p-Bøipraven na spojení" - dan "%s: klar til tilslutninger" - nla "%s: klaar voor verbindingen" + cze "%s: p-Bøipraven na spojení\nVersion: '%s' socket: '%s' port: %d"" + dan "%s: klar til tilslutninger\nVersion: '%s' socket: '%s' port: %d"" + nla "%s: klaar voor verbindingen\nVersion: '%s' socket: '%s' port: %d"" eng "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d" - jps "%s: €”õŠ®—¹", - est "%s: ootab ühendusi" - fre "%s: Prêt pour des connexions" - ger "%-.64s: Bereit für Verbindungen.\nVersion: '%2' Socket: '%s' Port: %d" - greek "%s: óå áíáìïíÞ óõíäÝóåùí" - hun "%s: kapcsolatra kesz" - ita "%s: Pronto per le connessioni\n" - jpn "%s: ½àÈ÷´°Î»" - kor "%s: ¿¬°á ÁغñÁßÀÔ´Ï´Ù" - nor "%s: klar for tilkoblinger" - norwegian-ny "%s: klar for tilkoblingar" - pol "%s: gotowe do po³?czenia" - por "%s: Pronto para conexões" - rum "%s: sint gata pentru conectii" - rus "%s: çÏÔÏ× ÐÒÉÎÉÍÁÔØ ÓÏÅÄÉÎÅÎÉÑ.\n÷ÅÒÓÉÑ: '%s' ÓÏËÅÔ: '%s' ÐÏÒÔ: %d %s" - serbian "%s: Spreman za konekcije\n" - slo "%s: pripravený na spojenie" - spa "%s: preparado para conexiones" - swe "%s: klar att ta emot klienter" - ukr "%s: çÏÔÏ×ÉÊ ÄÌÑ Ú'¤ÄÎÁÎØ!" + jps "%s: €”õŠ®—¹\nVersion: '%s' socket: '%s' port: %d"", + est "%s: ootab ühendusi\nVersion: '%s' socket: '%s' port: %d"" + fre "%s: Prêt pour des connexions\nVersion: '%s' socket: '%s' port: %d"" + ger "%s: Bereit für Verbindungen.\nVersion: '%s' Socket: '%s' Port: %d" + greek "%s: óå áíáìïíÞ óõíäÝóåùí\nVersion: '%s' socket: '%s' port: %d"" + hun "%s: kapcsolatra kesz\nVersion: '%s' socket: '%s' port: %d"" + ita "%s: Pronto per le connessioni\nVersion: '%s' socket: '%s' port: %d"" + jpn "%s: ½àÈ÷´°Î»\nVersion: '%s' socket: '%s' port: %d"" + kor "%s: ¿¬°á ÁغñÁßÀÔ´Ï´Ù\nVersion: '%s' socket: '%s' port: %d"" + nor "%s: klar for tilkoblinger\nVersion: '%s' socket: '%s' port: %d"" + norwegian-ny "%s: klar for tilkoblingar\nVersion: '%s' socket: '%s' port: %d"" + pol "%s: gotowe do po³?czenia\nVersion: '%s' socket: '%s' port: %d"" + por "%s: Pronto para conexões\nVersion: '%s' socket: '%s' port: %d"" + rum "%s: sint gata pentru conectii\nVersion: '%s' socket: '%s' port: %d"" + rus "%s: çÏÔÏ× ÐÒÉÎÉÍÁÔØ ÓÏÅÄÉÎÅÎÉÑ.\n÷ÅÒÓÉÑ: '%s' ÓÏËÅÔ: '%s' ÐÏÒÔ: %d" + serbian "%s: Spreman za konekcije\nVersion: '%s' socket: '%s' port: %d"" + slo "%s: pripravený na spojenie\nVersion: '%s' socket: '%s' port: %d"" + spa "%s: preparado para conexiones\nVersion: '%s' socket: '%s' port: %d"" + swe "%s: klar att ta emot klienter\nVersion: '%s' socket: '%s' port: %d"" + ukr "%s: çÏÔÏ×ÉÊ ÄÌÑ Ú'¤ÄÎÁÎØ!\nVersion: '%s' socket: '%s' port: %d"" ER_NORMAL_SHUTDOWN cze "%s: norm-Bální ukonèení\n" dan "%s: Normal nedlukning\n" @@ -1852,7 +1852,7 @@ ER_NORMAL_SHUTDOWN eng "%s: Normal shutdown\n" est "%s: MySQL lõpetas\n" fre "%s: Arrêt normal du serveur\n" - ger "%-.64s: Normal heruntergefahren\n" + ger "%s: Normal heruntergefahren\n" greek "%s: ÖõóéïëïãéêÞ äéáäéêáóßá shutdown\n" hun "%s: Normal leallitas\n" ita "%s: Shutdown normale\n" @@ -1876,7 +1876,7 @@ ER_GOT_SIGNAL jps "%s: Got signal %d. ’†’f!\n", est "%s: sain signaali %d. Lõpetan!\n" fre "%s: Reçu le signal %d. Abandonne!\n" - ger "%-.64s: Signal %d erhalten. Abbruch!\n" + ger "%s: Signal %d erhalten. Abbruch!\n" greek "%s: ÅëÞöèç ôï ìÞíõìá %d. Ç äéáäéêáóßá åãêáôáëåßðåôáé!\n" hun "%s: %d jelzes. Megszakitva!\n" ita "%s: Ricevuto segnale %d. Interruzione!\n" @@ -1901,7 +1901,7 @@ ER_SHUTDOWN_COMPLETE jps "%s: Shutdown Š®—¹\n", est "%s: Lõpp\n" fre "%s: Arrêt du serveur terminé\n" - ger "%-.64s: Herunterfahren beendet\n" + ger "%s: Herunterfahren beendet\n" greek "%s: Ç äéáäéêáóßá Shutdown ïëïêëçñþèçêå\n" hun "%s: A leallitas kesz\n" ita "%s: Shutdown completato\n" @@ -1919,29 +1919,29 @@ ER_SHUTDOWN_COMPLETE swe "%s: Avslutning klar\n" ukr "%s: òÏÂÏÔÕ ÚÁ×ÅÒÛÅÎÏ\n" ER_FORCING_CLOSE 08S01 - cze "%s: n-Básilné uzavøení threadu %ld u¾ivatele '%-.64s'\n" - dan "%s: Forceret nedlukning af tråd: %ld bruger: '%-.64s'\n" - nla "%s: Afsluiten afgedwongen van thread %ld gebruiker: '%-.64s'\n" + cze "%s: n-Básilné uzavøení threadu %ld u¾ivatele '%-.32s'\n" + dan "%s: Forceret nedlukning af tråd: %ld bruger: '%-.32s'\n" + nla "%s: Afsluiten afgedwongen van thread %ld gebruiker: '%-.32s'\n" eng "%s: Forcing close of thread %ld user: '%-.32s'\n" - jps "%s: ƒXƒŒƒbƒh %ld ‹§I—¹ user: '%-.64s'\n", + jps "%s: ƒXƒŒƒbƒh %ld ‹§I—¹ user: '%-.32s'\n", est "%s: Sulgen jõuga lõime %ld kasutaja: '%-.32s'\n" - fre "%s: Arrêt forcé de la tâche (thread) %ld utilisateur: '%-.64s'\n" + fre "%s: Arrêt forcé de la tâche (thread) %ld utilisateur: '%-.32s'\n" ger "%s: Thread %ld zwangsweise beendet. Benutzer: '%-.32s'\n" - greek "%s: Ôï thread èá êëåßóåé %ld user: '%-.64s'\n" - hun "%s: A(z) %ld thread kenyszeritett zarasa. Felhasznalo: '%-.64s'\n" - ita "%s: Forzata la chiusura del thread %ld utente: '%-.64s'\n" - jpn "%s: ¥¹¥ì¥Ã¥É %ld ¶¯À©½ªÎ» user: '%-.64s'\n" - kor "%s: thread %ldÀÇ °Á¦ Á¾·á user: '%-.64s'\n" - nor "%s: Påtvinget avslutning av tråd %ld bruker: '%-.64s'\n" - norwegian-ny "%s: Påtvinga avslutning av tråd %ld brukar: '%-.64s'\n" - pol "%s: Wymuszenie zamkniêcia w?tku %ld u¿ytkownik: '%-.64s'\n" + greek "%s: Ôï thread èá êëåßóåé %ld user: '%-.32s'\n" + hun "%s: A(z) %ld thread kenyszeritett zarasa. Felhasznalo: '%-.32s'\n" + ita "%s: Forzata la chiusura del thread %ld utente: '%-.32s'\n" + jpn "%s: ¥¹¥ì¥Ã¥É %ld ¶¯À©½ªÎ» user: '%-.32s'\n" + kor "%s: thread %ldÀÇ °Á¦ Á¾·á user: '%-.32s'\n" + nor "%s: Påtvinget avslutning av tråd %ld bruker: '%-.32s'\n" + norwegian-ny "%s: Påtvinga avslutning av tråd %ld brukar: '%-.32s'\n" + pol "%s: Wymuszenie zamkniêcia w?tku %ld u¿ytkownik: '%-.32s'\n" por "%s: Forçando finalização da 'thread' %ld - usuário '%-.32s'\n" rum "%s: Terminare fortata a thread-ului %ld utilizatorului: '%-.32s'\n" rus "%s: ðÒÉÎÕÄÉÔÅÌØÎÏ ÚÁËÒÙ×ÁÅÍ ÐÏÔÏË %ld ÐÏÌØÚÏ×ÁÔÅÌÑ: '%-.32s'\n" serbian "%s: Usiljeno gašenje thread-a %ld koji pripada korisniku: '%-.32s'\n" - slo "%s: násilné ukonèenie vlákna %ld u¾ívateµa '%-.64s'\n" - spa "%s: Forzando a cerrar el thread %ld usuario: '%-.64s'\n" - swe "%s: Stänger av tråd %ld; användare: '%-.64s'\n" + slo "%s: násilné ukonèenie vlákna %ld u¾ívateµa '%-.32s'\n" + spa "%s: Forzando a cerrar el thread %ld usuario: '%-.32s'\n" + swe "%s: Stänger av tråd %ld; användare: '%-.32s'\n" ukr "%s: ðÒÉÓËÏÒÀÀ ÚÁËÒÉÔÔÑ Ç¦ÌËÉ %ld ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'\n" ER_IPSOCK_ERROR 08S01 cze "Nemohu vytvo-Bøit IP socket" @@ -2041,55 +2041,55 @@ ER_BLOBS_AND_NO_TERMINATED 42000 S1009 swe "Man kan inte använda fast radlängd med blobs. Använd 'fields terminated by'" ukr "îÅ ÍÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÓÔÁÌÕ ÄÏ×ÖÉÎÕ ÓÔÒÏËÉ Ú BLOB. úËÏÒÉÓÔÁÊÔÅÓÑ 'fields terminated by'" ER_TEXTFILE_NOT_READABLE - cze "Soubor '%-.64s' mus-Bí být v adresáøi databáze nebo èitelný pro v¹echny" - dan "Filen '%-.64s' skal være i database-folderen og kunne læses af alle" - nla "Het bestand '%-.64s' dient in de database directory voor the komen of leesbaar voor iedereen te zijn." + cze "Soubor '%-.128s' mus-Bí být v adresáøi databáze nebo èitelný pro v¹echny" + dan "Filen '%-.128s' skal være i database-folderen og kunne læses af alle" + nla "Het bestand '%-.128s' dient in de database directory voor the komen of leesbaar voor iedereen te zijn." eng "The file '%-.128s' must be in the database directory or be readable by all" - jps "ƒtƒ@ƒCƒ‹ '%-.64s' ‚Í databse ‚Ì directory ‚É‚ ‚é‚©‘S‚Ẵ†[ƒU[‚ª“Ç‚ß‚é‚悤‚É‹–‰Â‚³‚ê‚Ä‚¢‚È‚¯‚ê‚΂Ȃè‚Ü‚¹‚ñ.", - est "Fail '%-.64s' peab asuma andmebaasi kataloogis või olema kõigile loetav" - fre "Le fichier '%-.64s' doit être dans le répertoire de la base et lisible par tous" - ger "Datei '%-.64s' muss im Datenbank-Verzeichnis vorhanden oder lesbar für alle sein" - greek "Ôï áñ÷åßï '%-.64s' ðñÝðåé íá õðÜñ÷åé óôï database directory Þ íá ìðïñåß íá äéáâáóôåß áðü üëïõò" - hun "A(z) '%-.64s'-nak az adatbazis konyvtarban kell lennie, vagy mindenki szamara olvashatonak" - ita "Il file '%-.64s' deve essere nella directory del database e deve essere leggibile da tutti" - jpn "¥Õ¥¡¥¤¥ë '%-.64s' ¤Ï databse ¤Î directory ¤Ë¤¢¤ë¤«Á´¤Æ¤Î¥æ¡¼¥¶¡¼¤¬Æɤá¤ë¤è¤¦¤Ëµö²Ä¤µ¤ì¤Æ¤¤¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó." - kor "'%-.64s' ÈÀÏ´Â µ¥ÀÌŸº£À̽º µð·ºÅ丮¿¡ Á¸ÀçÇϰųª ¸ðµÎ¿¡°Ô Àб⠰¡´ÉÇÏ¿©¾ß ÇÕ´Ï´Ù." - nor "Filen '%-.64s' må være i database-katalogen for å være lesbar for alle" - norwegian-ny "Filen '%-.64s' må være i database-katalogen for å være lesbar for alle" - pol "Plik '%-.64s' musi znajdowaæ sie w katalogu bazy danych lub mieæ prawa czytania przez wszystkich" - por "Arquivo '%-.64s' tem que estar no diretório do banco de dados ou ter leitura possível para todos" - rum "Fisierul '%-.64s' trebuie sa fie in directorul bazei de data sau trebuie sa poata sa fie citit de catre toata lumea (verifica permisiile)" - rus "æÁÊÌ '%-.64s' ÄÏÌÖÅÎ ÎÁÈÏÄÉÔØÓÑ × ÔÏÍ ÖÅ ËÁÔÁÌÏÇÅ, ÞÔÏ É ÂÁÚÁ ÄÁÎÎÙÈ, ÉÌÉ ÂÙÔØ ÏÂÝÅÄÏÓÔÕÐÎÙÍ ÄÌÑ ÞÔÅÎÉÑ" - serbian "File '%-.64s' mora biti u direktorijumu gde su file-ovi baze i mora imati odgovarajuæa prava pristupa" - slo "Súbor '%-.64s' musí by» v adresári databázy, alebo èitateµný pre v¹etkých" - spa "El archivo '%-.64s' debe estar en el directorio de la base de datos o ser de lectura por todos" - swe "Textfilen '%.64s' måste finnas i databasbiblioteket eller vara läsbar för alla" - ukr "æÁÊÌ '%-.64s' ÐÏ×ÉÎÅÎ ÂÕÔÉ Õ ÔÅæ ÂÁÚÉ ÄÁÎÎÉÈ ÁÂÏ ÍÁÔÉ ×ÓÔÁÎÏ×ÌÅÎÅ ÐÒÁ×Ï ÎÁ ÞÉÔÁÎÎÑ ÄÌÑ ÕÓ¦È" + jps "ƒtƒ@ƒCƒ‹ '%-.128s' ‚Í databse ‚Ì directory ‚É‚ ‚é‚©‘S‚Ẵ†[ƒU[‚ª“Ç‚ß‚é‚悤‚É‹–‰Â‚³‚ê‚Ä‚¢‚È‚¯‚ê‚΂Ȃè‚Ü‚¹‚ñ.", + est "Fail '%-.128s' peab asuma andmebaasi kataloogis või olema kõigile loetav" + fre "Le fichier '%-.128s' doit être dans le répertoire de la base et lisible par tous" + ger "Datei '%-.128s' muss im Datenbank-Verzeichnis vorhanden oder lesbar für alle sein" + greek "Ôï áñ÷åßï '%-.128s' ðñÝðåé íá õðÜñ÷åé óôï database directory Þ íá ìðïñåß íá äéáâáóôåß áðü üëïõò" + hun "A(z) '%-.128s'-nak az adatbazis konyvtarban kell lennie, vagy mindenki szamara olvashatonak" + ita "Il file '%-.128s' deve essere nella directory del database e deve essere leggibile da tutti" + jpn "¥Õ¥¡¥¤¥ë '%-.128s' ¤Ï databse ¤Î directory ¤Ë¤¢¤ë¤«Á´¤Æ¤Î¥æ¡¼¥¶¡¼¤¬Æɤá¤ë¤è¤¦¤Ëµö²Ä¤µ¤ì¤Æ¤¤¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó." + kor "'%-.128s' ÈÀÏ´Â µ¥ÀÌŸº£À̽º µð·ºÅ丮¿¡ Á¸ÀçÇϰųª ¸ðµÎ¿¡°Ô Àб⠰¡´ÉÇÏ¿©¾ß ÇÕ´Ï´Ù." + nor "Filen '%-.128s' må være i database-katalogen for å være lesbar for alle" + norwegian-ny "Filen '%-.128s' må være i database-katalogen for å være lesbar for alle" + pol "Plik '%-.128s' musi znajdowaæ sie w katalogu bazy danych lub mieæ prawa czytania przez wszystkich" + por "Arquivo '%-.128s' tem que estar no diretório do banco de dados ou ter leitura possível para todos" + rum "Fisierul '%-.128s' trebuie sa fie in directorul bazei de data sau trebuie sa poata sa fie citit de catre toata lumea (verifica permisiile)" + rus "æÁÊÌ '%-.128s' ÄÏÌÖÅÎ ÎÁÈÏÄÉÔØÓÑ × ÔÏÍ ÖÅ ËÁÔÁÌÏÇÅ, ÞÔÏ É ÂÁÚÁ ÄÁÎÎÙÈ, ÉÌÉ ÂÙÔØ ÏÂÝÅÄÏÓÔÕÐÎÙÍ ÄÌÑ ÞÔÅÎÉÑ" + serbian "File '%-.128s' mora biti u direktorijumu gde su file-ovi baze i mora imati odgovarajuæa prava pristupa" + slo "Súbor '%-.128s' musí by» v adresári databázy, alebo èitateµný pre v¹etkých" + spa "El archivo '%-.128s' debe estar en el directorio de la base de datos o ser de lectura por todos" + swe "Textfilen '%-.128s' måste finnas i databasbiblioteket eller vara läsbar för alla" + ukr "æÁÊÌ '%-.128s' ÐÏ×ÉÎÅÎ ÂÕÔÉ Õ ÔÅæ ÂÁÚÉ ÄÁÎÎÉÈ ÁÂÏ ÍÁÔÉ ×ÓÔÁÎÏ×ÌÅÎÅ ÐÒÁ×Ï ÎÁ ÞÉÔÁÎÎÑ ÄÌÑ ÕÓ¦È" ER_FILE_EXISTS_ERROR - cze "Soubor '%-.64s' ji-B¾ existuje" - dan "Filen '%-.64s' eksisterer allerede" - nla "Het bestand '%-.64s' bestaat reeds" + cze "Soubor '%-.200s' ji-B¾ existuje" + dan "Filen '%-.200s' eksisterer allerede" + nla "Het bestand '%-.200s' bestaat reeds" eng "File '%-.200s' already exists" - jps "File '%-.64s' ‚ÍŠù‚É‘¶Ý‚µ‚Ü‚·", - est "Fail '%-.80s' juba eksisteerib" - fre "Le fichier '%-.64s' existe déjà" - ger "Datei '%-.80s' bereits vorhanden" - greek "Ôï áñ÷åßï '%-.64s' õðÜñ÷åé Þäç" - hun "A '%-.64s' file mar letezik." - ita "Il file '%-.64s' esiste gia`" - jpn "File '%-.64s' ¤Ï´û¤Ë¸ºß¤·¤Þ¤¹" - kor "'%-.64s' ÈÀÏÀº ÀÌ¹Ì Á¸ÀçÇÕ´Ï´Ù." - nor "Filen '%-.64s' eksisterte allerede" - norwegian-ny "Filen '%-.64s' eksisterte allereide" - pol "Plik '%-.64s' ju¿ istnieje" - por "Arquivo '%-.80s' já existe" - rum "Fisierul '%-.80s' exista deja" - rus "æÁÊÌ '%-.80s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ" - serbian "File '%-.80s' veæ postoji" - slo "Súbor '%-.64s' u¾ existuje" - spa "El archivo '%-.64s' ya existe" - swe "Filen '%-.64s' existerar redan" - ukr "æÁÊÌ '%-.80s' ×ÖÅ ¦ÓÎÕ¤" + jps "File '%-.200s' ‚ÍŠù‚É‘¶Ý‚µ‚Ü‚·", + est "Fail '%-.200s' juba eksisteerib" + fre "Le fichier '%-.200s' existe déjà" + ger "Datei '%-.200s' bereits vorhanden" + greek "Ôï áñ÷åßï '%-.200s' õðÜñ÷åé Þäç" + hun "A '%-.200s' file mar letezik." + ita "Il file '%-.200s' esiste gia`" + jpn "File '%-.200s' ¤Ï´û¤Ë¸ºß¤·¤Þ¤¹" + kor "'%-.200s' ÈÀÏÀº ÀÌ¹Ì Á¸ÀçÇÕ´Ï´Ù." + nor "Filen '%-.200s' eksisterte allerede" + norwegian-ny "Filen '%-.200s' eksisterte allereide" + pol "Plik '%-.200s' ju¿ istnieje" + por "Arquivo '%-.200s' já existe" + rum "Fisierul '%-.200s' exista deja" + rus "æÁÊÌ '%-.200s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "File '%-.200s' veæ postoji" + slo "Súbor '%-.200s' u¾ existuje" + spa "El archivo '%-.200s' ya existe" + swe "Filen '%-.200s' existerar redan" + ukr "æÁÊÌ '%-.200s' ×ÖÅ ¦ÓÎÕ¤" ER_LOAD_INFO cze "Z-Báznamù: %ld Vymazáno: %ld Pøeskoèeno: %ld Varování: %ld" dan "Poster: %ld Fjernet: %ld Sprunget over: %ld Advarsler: %ld" @@ -2319,51 +2319,51 @@ ER_NO_TABLES_USED swe "Inga tabeller angivna" ukr "îÅ ×ÉËÏÒÉÓÔÁÎÏ ÔÁÂÌÉÃØ" ER_TOO_BIG_SET - cze "P-Bøíli¹ mnoho øetìzcù pro sloupec %s a SET" + cze "P-Bøíli¹ mnoho øetìzcù pro sloupec %-.64s a SET" dan "For mange tekststrenge til specifikationen af SET i kolonne %-.64s" - nla "Teveel strings voor kolom %s en SET" + nla "Teveel strings voor kolom %-.64s en SET" eng "Too many strings for column %-.64s and SET" est "Liiga palju string tulbale %-.64s tüübile SET" - fre "Trop de chaînes dans la colonne %s avec SET" + fre "Trop de chaînes dans la colonne %-.64s avec SET" ger "Zu viele Strings für Feld %-.64s und SET angegeben" greek "ÐÜñá ðïëëÜ strings ãéá ôï ðåäßï %-.64s êáé SET" hun "Tul sok karakter: %-.64s es SET" ita "Troppe stringhe per la colonna %-.64s e la SET" kor "Ä®·³ %-.64s¿Í SET¿¡¼ ½ºÆ®¸µÀÌ ³Ê¹« ¸¹½À´Ï´Ù." - nor "For mange tekststrenger kolonne %s og SET" - norwegian-ny "For mange tekststrengar felt %s og SET" - pol "Zbyt wiele ³añcuchów dla kolumny %s i polecenia SET" + nor "For mange tekststrenger kolonne %-.64s og SET" + norwegian-ny "For mange tekststrengar felt %-.64s og SET" + pol "Zbyt wiele ³añcuchów dla kolumny %-.64s i polecenia SET" por "'Strings' demais para coluna '%-.64s' e SET" rum "Prea multe siruri pentru coloana %-.64s si SET" rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÚÎÁÞÅÎÉÊ ÄÌÑ ÓÔÏÌÂÃÁ %-.64s × SET" serbian "Previše string-ova za kolonu '%-.64s' i komandu 'SET'" slo "Príli¹ mnoho re»azcov pre pole %-.64s a SET" - spa "Muchas strings para columna %s y SET" - swe "För många alternativ till kolumn %s för SET" + spa "Muchas strings para columna %-.64s y SET" + swe "För många alternativ till kolumn %-.64s för SET" ukr "úÁÂÁÇÁÔÏ ÓÔÒÏË ÄÌÑ ÓÔÏ×ÂÃÑ %-.64s ÔÁ SET" ER_NO_UNIQUE_LOGFILE - cze "Nemohu vytvo-Bøit jednoznaèné jméno logovacího souboru %s.(1-999)\n" - dan "Kan ikke lave unikt log-filnavn %s.(1-999)\n" - nla "Het is niet mogelijk een unieke naam te maken voor de logfile %s.(1-999)\n" + cze "Nemohu vytvo-Bøit jednoznaèné jméno logovacího souboru %-.200s.(1-999)\n" + dan "Kan ikke lave unikt log-filnavn %-.200s.(1-999)\n" + nla "Het is niet mogelijk een unieke naam te maken voor de logfile %-.200s.(1-999)\n" eng "Can't generate a unique log-filename %-.200s.(1-999)\n" - est "Ei suuda luua unikaalset logifaili nime %-.64s.(1-999)\n" - fre "Ne peut générer un unique nom de journal %s.(1-999)\n" - ger "Kann keinen eindeutigen Dateinamen für die Logdatei %-.64s(1-999) erzeugen\n" - greek "Áäýíáôç ç äçìéïõñãßá unique log-filename %-.64s.(1-999)\n" - hun "Egyedi log-filenev nem generalhato: %-.64s.(1-999)\n" - ita "Impossibile generare un nome del file log unico %-.64s.(1-999)\n" - kor "Unique ·Î±×ÈÀÏ '%-.64s'¸¦ ¸¸µé¼ö ¾ø½À´Ï´Ù.(1-999)\n" - nor "Kan ikke lage unikt loggfilnavn %s.(1-999)\n" - norwegian-ny "Kan ikkje lage unikt loggfilnavn %s.(1-999)\n" - pol "Nie mo¿na stworzyæ unikalnej nazwy pliku z logiem %s.(1-999)\n" - por "Não pode gerar um nome de arquivo de 'log' único '%-.64s'.(1-999)\n" - rum "Nu pot sa generez un nume de log unic %-.64s.(1-999)\n" - rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÕÎÉËÁÌØÎÏÅ ÉÍÑ ÆÁÊÌÁ ÖÕÒÎÁÌÁ %-.64s.(1-999)\n" - serbian "Ne mogu da generišem jedinstveno ime log-file-a: '%-.64s.(1-999)'\n" - slo "Nemô¾em vytvori» unikátne meno log-súboru %-.64s.(1-999)\n" - spa "No puede crear un unico archivo log %s.(1-999)\n" - swe "Kan inte generera ett unikt filnamn %s.(1-999)\n" - ukr "îÅ ÍÏÖÕ ÚÇÅÎÅÒÕ×ÁÔÉ ÕΦËÁÌØÎÅ ¦Í'Ñ log-ÆÁÊÌÕ %-.64s.(1-999)\n" + est "Ei suuda luua unikaalset logifaili nime %-.200s.(1-999)\n" + fre "Ne peut générer un unique nom de journal %-.200s.(1-999)\n" + ger "Kann keinen eindeutigen Dateinamen für die Logdatei %-.200s(1-999) erzeugen\n" + greek "Áäýíáôç ç äçìéïõñãßá unique log-filename %-.200s.(1-999)\n" + hun "Egyedi log-filenev nem generalhato: %-.200s.(1-999)\n" + ita "Impossibile generare un nome del file log unico %-.200s.(1-999)\n" + kor "Unique ·Î±×ÈÀÏ '%-.200s'¸¦ ¸¸µé¼ö ¾ø½À´Ï´Ù.(1-999)\n" + nor "Kan ikke lage unikt loggfilnavn %-.200s.(1-999)\n" + norwegian-ny "Kan ikkje lage unikt loggfilnavn %-.200s.(1-999)\n" + pol "Nie mo¿na stworzyæ unikalnej nazwy pliku z logiem %-.200s.(1-999)\n" + por "Não pode gerar um nome de arquivo de 'log' único '%-.200s'.(1-999)\n" + rum "Nu pot sa generez un nume de log unic %-.200s.(1-999)\n" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÕÎÉËÁÌØÎÏÅ ÉÍÑ ÆÁÊÌÁ ÖÕÒÎÁÌÁ %-.200s.(1-999)\n" + serbian "Ne mogu da generišem jedinstveno ime log-file-a: '%-.200s.(1-999)'\n" + slo "Nemô¾em vytvori» unikátne meno log-súboru %-.200s.(1-999)\n" + spa "No puede crear un unico archivo log %-.200s.(1-999)\n" + swe "Kan inte generera ett unikt filnamn %-.200s.(1-999)\n" + ukr "îÅ ÍÏÖÕ ÚÇÅÎÅÒÕ×ÁÔÉ ÕΦËÁÌØÎÅ ¦Í'Ñ log-ÆÁÊÌÕ %-.200s.(1-999)\n" ER_TABLE_NOT_LOCKED_FOR_WRITE cze "Tabulka '%-.64s' byla zam-Bèena s READ a nemù¾e být zmìnìna" dan "Tabellen '%-.64s' var låst med READ lås og kan ikke opdateres" @@ -2439,54 +2439,54 @@ ER_BLOB_CANT_HAVE_DEFAULT 42000 swe "BLOB fält '%-.64s' kan inte ha ett DEFAULT-värde" ukr "óÔÏ×ÂÅÃØ BLOB '%-.64s' ÎÅ ÍÏÖÅ ÍÁÔÉ ÚÎÁÞÅÎÎÑ ÐÏ ÚÁÍÏ×ÞÕ×ÁÎÎÀ" ER_WRONG_DB_NAME 42000 - cze "Nep-Bøípustné jméno databáze '%-.64s'" - dan "Ugyldigt database navn '%-.64s'" - nla "Databasenaam '%-.64s' is niet getoegestaan" + cze "Nep-Bøípustné jméno databáze '%-.100s'" + dan "Ugyldigt database navn '%-.100s'" + nla "Databasenaam '%-.100s' is niet getoegestaan" eng "Incorrect database name '%-.100s'" jps "Žw’肵‚½ database –¼ '%-.100s' ‚ªŠÔˆá‚Á‚Ä‚¢‚Ü‚·", est "Vigane andmebaasi nimi '%-.100s'" - fre "Nom de base de donnée illégal: '%-.64s'" + fre "Nom de base de donnée illégal: '%-.100s'" ger "Unerlaubter Datenbankname '%-.100s'" greek "ËÜèïò üíïìá âÜóçò äåäïìÝíùí '%-.100s'" hun "Hibas adatbazisnev: '%-.100s'" ita "Nome database errato '%-.100s'" jpn "»ØÄꤷ¤¿ database ̾ '%-.100s' ¤¬´Ö°ã¤Ã¤Æ¤¤¤Þ¤¹" kor "'%-.100s' µ¥ÀÌŸº£À̽ºÀÇ À̸§ÀÌ ºÎÁ¤È®ÇÕ´Ï´Ù." - nor "Ugyldig database navn '%-.64s'" - norwegian-ny "Ugyldig database namn '%-.64s'" - pol "Niedozwolona nazwa bazy danych '%-.64s'" + nor "Ugyldig database navn '%-.100s'" + norwegian-ny "Ugyldig database namn '%-.100s'" + pol "Niedozwolona nazwa bazy danych '%-.100s'" por "Nome de banco de dados '%-.100s' incorreto" rum "Numele bazei de date este incorect '%-.100s'" rus "îÅËÏÒÒÅËÔÎÏÅ ÉÍÑ ÂÁÚÙ ÄÁÎÎÙÈ '%-.100s'" serbian "Pogrešno ime baze '%-.100s'" slo "Neprípustné meno databázy '%-.100s'" - spa "Nombre de base de datos ilegal '%-.64s'" - swe "Felaktigt databasnamn '%-.64s'" + spa "Nombre de base de datos ilegal '%-.100s'" + swe "Felaktigt databasnamn '%-.100s'" ukr "îÅצÒÎÅ ¦Í'Ñ ÂÁÚÉ ÄÁÎÎÉÈ '%-.100s'" ER_WRONG_TABLE_NAME 42000 - cze "Nep-Bøípustné jméno tabulky '%-.64s'" - dan "Ugyldigt tabel navn '%-.64s'" - nla "Niet toegestane tabelnaam '%-.64s'" + cze "Nep-Bøípustné jméno tabulky '%-.100s'" + dan "Ugyldigt tabel navn '%-.100s'" + nla "Niet toegestane tabelnaam '%-.100s'" eng "Incorrect table name '%-.100s'" jps "Žw’肵‚½ table –¼ '%-.100s' ‚Í‚Ü‚¿‚ª‚Á‚Ä‚¢‚Ü‚·", est "Vigane tabeli nimi '%-.100s'" - fre "Nom de table illégal: '%-.64s'" + fre "Nom de table illégal: '%-.100s'" ger "Unerlaubter Tabellenname '%-.100s'" greek "ËÜèïò üíïìá ðßíáêá '%-.100s'" hun "Hibas tablanev: '%-.100s'" ita "Nome tabella errato '%-.100s'" jpn "»ØÄꤷ¤¿ table ̾ '%-.100s' ¤Ï¤Þ¤Á¤¬¤Ã¤Æ¤¤¤Þ¤¹" kor "'%-.100s' Å×À̺í À̸§ÀÌ ºÎÁ¤È®ÇÕ´Ï´Ù." - nor "Ugyldig tabell navn '%-.64s'" - norwegian-ny "Ugyldig tabell namn '%-.64s'" - pol "Niedozwolona nazwa tabeli '%-.64s'..." + nor "Ugyldig tabell navn '%-.100s'" + norwegian-ny "Ugyldig tabell namn '%-.100s'" + pol "Niedozwolona nazwa tabeli '%-.100s'..." por "Nome de tabela '%-.100s' incorreto" rum "Numele tabelei este incorect '%-.100s'" rus "îÅËÏÒÒÅËÔÎÏÅ ÉÍÑ ÔÁÂÌÉÃÙ '%-.100s'" serbian "Pogrešno ime tabele '%-.100s'" slo "Neprípustné meno tabuµky '%-.100s'" - spa "Nombre de tabla ilegal '%-.64s'" - swe "Felaktigt tabellnamn '%-.64s'" + spa "Nombre de tabla ilegal '%-.100s'" + swe "Felaktigt tabellnamn '%-.100s'" ukr "îÅצÒÎÅ ¦Í'Ñ ÔÁÂÌÉæ '%-.100s'" ER_TOO_BIG_SELECT 42000 cze "Zadan-Bý SELECT by procházel pøíli¹ mnoho záznamù a trval velmi dlouho. Zkontrolujte tvar WHERE a je-li SELECT v poøádku, pou¾ijte SET SQL_BIG_SELECTS=1" @@ -2534,97 +2534,97 @@ ER_UNKNOWN_ERROR swe "Oidentifierat fel" ukr "îÅצÄÏÍÁ ÐÏÍÉÌËÁ" ER_UNKNOWN_PROCEDURE 42000 - cze "Nezn-Bámá procedura %s" - dan "Ukendt procedure %s" - nla "Onbekende procedure %s" + cze "Nezn-Bámá procedura %-.64s" + dan "Ukendt procedure %-.64s" + nla "Onbekende procedure %-.64s" eng "Unknown procedure '%-.64s'" est "Tundmatu protseduur '%-.64s'" - fre "Procédure %s inconnue" + fre "Procédure %-.64s inconnue" ger "Unbekannte Prozedur '%-.64s'" greek "Áãíùóôç äéáäéêáóßá '%-.64s'" hun "Ismeretlen eljaras: '%-.64s'" ita "Procedura '%-.64s' sconosciuta" kor "¾Ë¼ö ¾ø´Â ¼öÇ๮ : '%-.64s'" - nor "Ukjent prosedyre %s" - norwegian-ny "Ukjend prosedyre %s" - pol "Unkown procedure %s" + nor "Ukjent prosedyre %-.64s" + norwegian-ny "Ukjend prosedyre %-.64s" + pol "Unkown procedure %-.64s" por "'Procedure' '%-.64s' desconhecida" rum "Procedura unknown '%-.64s'" rus "îÅÉÚ×ÅÓÔÎÁÑ ÐÒÏÃÅÄÕÒÁ '%-.64s'" serbian "Nepoznata procedura '%-.64s'" slo "Neznámá procedúra '%-.64s'" - spa "Procedimiento desconocido %s" - swe "Okänd procedur: %s" + spa "Procedimiento desconocido %-.64s" + swe "Okänd procedur: %-.64s" ukr "îÅצÄÏÍÁ ÐÒÏÃÅÄÕÒÁ '%-.64s'" ER_WRONG_PARAMCOUNT_TO_PROCEDURE 42000 - cze "Chybn-Bý poèet parametrù procedury %s" - dan "Forkert antal parametre til proceduren %s" - nla "Foutief aantal parameters doorgegeven aan procedure %s" + cze "Chybn-Bý poèet parametrù procedury %-.64s" + dan "Forkert antal parametre til proceduren %-.64s" + nla "Foutief aantal parameters doorgegeven aan procedure %-.64s" eng "Incorrect parameter count to procedure '%-.64s'" est "Vale parameetrite hulk protseduurile '%-.64s'" - fre "Mauvais nombre de paramètres pour la procedure %s" + fre "Mauvais nombre de paramètres pour la procedure %-.64s" ger "Falsche Parameterzahl für Prozedur '%-.64s'" greek "ËÜèïò áñéèìüò ðáñáìÝôñùí óôç äéáäéêáóßá '%-.64s'" hun "Rossz parameter a(z) '%-.64s'eljaras szamitasanal" ita "Numero di parametri errato per la procedura '%-.64s'" kor "'%-.64s' ¼öÇ๮¿¡ ´ëÇÑ ºÎÁ¤È®ÇÑ ÆĶó¸ÞÅÍ" - nor "Feil parameter antall til prosedyren %s" - norwegian-ny "Feil parameter tal til prosedyra %s" - pol "Incorrect parameter count to procedure %s" + nor "Feil parameter antall til prosedyren %-.64s" + norwegian-ny "Feil parameter tal til prosedyra %-.64s" + pol "Incorrect parameter count to procedure %-.64s" por "Número de parâmetros incorreto para a 'procedure' '%-.64s'" rum "Procedura '%-.64s' are un numar incorect de parametri" rus "îÅËÏÒÒÅËÔÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÐÁÒÁÍÅÔÒÏ× ÄÌÑ ÐÒÏÃÅÄÕÒÙ '%-.64s'" serbian "Pogrešan broj parametara za proceduru '%-.64s'" slo "Chybný poèet parametrov procedúry '%-.64s'" - spa "Equivocado parametro count para procedimiento %s" - swe "Felaktigt antal parametrar till procedur %s" + spa "Equivocado parametro count para procedimiento %-.64s" + swe "Felaktigt antal parametrar till procedur %-.64s" ukr "èÉÂÎÁ ˦ÌØ˦ÓÔØ ÐÁÒÁÍÅÔÒ¦× ÐÒÏÃÅÄÕÒÉ '%-.64s'" ER_WRONG_PARAMETERS_TO_PROCEDURE - cze "Chybn-Bé parametry procedury %s" - dan "Forkert(e) parametre til proceduren %s" - nla "Foutieve parameters voor procedure %s" + cze "Chybn-Bé parametry procedury %-.64s" + dan "Forkert(e) parametre til proceduren %-.64s" + nla "Foutieve parameters voor procedure %-.64s" eng "Incorrect parameters to procedure '%-.64s'" est "Vigased parameetrid protseduurile '%-.64s'" - fre "Paramètre erroné pour la procedure %s" + fre "Paramètre erroné pour la procedure %-.64s" ger "Falsche Parameter für Prozedur '%-.64s'" greek "ËÜèïò ðáñÜìåôñïé óôçí äéáäéêáóßá '%-.64s'" hun "Rossz parameter a(z) '%-.64s' eljarasban" ita "Parametri errati per la procedura '%-.64s'" kor "'%-.64s' ¼öÇ๮¿¡ ´ëÇÑ ºÎÁ¤È®ÇÑ ÆĶó¸ÞÅÍ" - nor "Feil parametre til prosedyren %s" - norwegian-ny "Feil parameter til prosedyra %s" - pol "Incorrect parameters to procedure %s" + nor "Feil parametre til prosedyren %-.64s" + norwegian-ny "Feil parameter til prosedyra %-.64s" + pol "Incorrect parameters to procedure %-.64s" por "Parâmetros incorretos para a 'procedure' '%-.64s'" rum "Procedura '%-.64s' are parametrii incorecti" rus "îÅËÏÒÒÅËÔÎÙÅ ÐÁÒÁÍÅÔÒÙ ÄÌÑ ÐÒÏÃÅÄÕÒÙ '%-.64s'" serbian "Pogrešni parametri prosleðeni proceduri '%-.64s'" slo "Chybné parametre procedúry '%-.64s'" - spa "Equivocados parametros para procedimiento %s" - swe "Felaktiga parametrar till procedur %s" + spa "Equivocados parametros para procedimiento %-.64s" + swe "Felaktiga parametrar till procedur %-.64s" ukr "èÉÂÎÉÊ ÐÁÒÁÍÅÔÅÒ ÐÒÏÃÅÄÕÒÉ '%-.64s'" ER_UNKNOWN_TABLE 42S02 - cze "Nezn-Bámá tabulka '%-.64s' v %s" - dan "Ukendt tabel '%-.64s' i %s" - nla "Onbekende tabel '%-.64s' in %s" + cze "Nezn-Bámá tabulka '%-.64s' v %-.32s" + dan "Ukendt tabel '%-.64s' i %-.32s" + nla "Onbekende tabel '%-.64s' in %-.32s" eng "Unknown table '%-.64s' in %-.32s" est "Tundmatu tabel '%-.64s' %-.32s-s" - fre "Table inconnue '%-.64s' dans %s" - ger "Unbekannte Tabelle '%-.64s' in '%-.64s'" - greek "Áãíùóôïò ðßíáêáò '%-.64s' óå %s" - hun "Ismeretlen tabla: '%-.64s' %s-ban" - ita "Tabella '%-.64s' sconosciuta in %s" - jpn "Unknown table '%-.64s' in %s" - kor "¾Ë¼ö ¾ø´Â Å×À̺í '%-.64s' (µ¥ÀÌŸº£À̽º %s)" - nor "Ukjent tabell '%-.64s' i %s" - norwegian-ny "Ukjend tabell '%-.64s' i %s" - pol "Unknown table '%-.64s' in %s" + fre "Table inconnue '%-.64s' dans %-.32s" + ger "Unbekannte Tabelle '%-.64s' in '%-.32s'" + greek "Áãíùóôïò ðßíáêáò '%-.64s' óå %-.32s" + hun "Ismeretlen tabla: '%-.64s' %-.32s-ban" + ita "Tabella '%-.64s' sconosciuta in %-.32s" + jpn "Unknown table '%-.64s' in %-.32s" + kor "¾Ë¼ö ¾ø´Â Å×À̺í '%-.64s' (µ¥ÀÌŸº£À̽º %-.32s)" + nor "Ukjent tabell '%-.64s' i %-.32s" + norwegian-ny "Ukjend tabell '%-.64s' i %-.32s" + pol "Unknown table '%-.64s' in %-.32s" por "Tabela '%-.64s' desconhecida em '%-.32s'" rum "Tabla '%-.64s' invalida in %-.32s" rus "îÅÉÚ×ÅÓÔÎÁÑ ÔÁÂÌÉÃÁ '%-.64s' × %-.32s" serbian "Nepoznata tabela '%-.64s' u '%-.32s'" - slo "Neznáma tabuµka '%-.64s' v %s" - spa "Tabla desconocida '%-.64s' in %s" - swe "Okänd tabell '%-.64s' i '%-.64s'" + slo "Neznáma tabuµka '%-.64s' v %-.32s" + spa "Tabla desconocida '%-.64s' in %-.32s" + swe "Okänd tabell '%-.64s' i '%-.32s'" ukr "îÅצÄÏÍÁ ÔÁÂÌÉÃÑ '%-.64s' Õ %-.32s" ER_FIELD_SPECIFIED_TWICE 42000 cze "Polo-B¾ka '%-.64s' je zadána dvakrát" @@ -2803,27 +2803,27 @@ ER_TOO_MANY_FIELDS swe "För många fält" ukr "úÁÂÁÇÁÔÏ ÓÔÏ×Âæ×" ER_TOO_BIG_ROWSIZE 42000 - cze "-BØádek je pøíli¹ velký. Maximální velikost øádku, nepoèítaje polo¾ky blob, je %d. Musíte zmìnit nìkteré polo¾ky na blob" - dan "For store poster. Max post størrelse, uden BLOB's, er %d. Du må lave nogle felter til BLOB's" - nla "Rij-grootte is groter dan toegestaan. Maximale rij grootte, blobs niet meegeteld, is %d. U dient sommige velden in blobs te veranderen." + cze "-BØádek je pøíli¹ velký. Maximální velikost øádku, nepoèítaje polo¾ky blob, je %ld. Musíte zmìnit nìkteré polo¾ky na blob" + dan "For store poster. Max post størrelse, uden BLOB's, er %ld. Du må lave nogle felter til BLOB's" + nla "Rij-grootte is groter dan toegestaan. Maximale rij grootte, blobs niet meegeteld, is %ld. U dient sommige velden in blobs te veranderen." eng "Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs" - jps "row size ‚ª‘å‚«‚·‚¬‚Ü‚·. BLOB ‚ðŠÜ‚Ü‚È‚¢ê‡‚Ì row size ‚ÌÅ‘å‚Í %d ‚Å‚·. ‚¢‚‚‚©‚Ì field ‚ð BLOB ‚É•Ï‚¦‚Ä‚‚¾‚³‚¢.", - est "Liiga pikk kirje. Kirje maksimumpikkus arvestamata BLOB-tüüpi välju on %d. Muuda mõned väljad BLOB-tüüpi väljadeks" - fre "Ligne trop grande. Le taille maximale d'une ligne, sauf les BLOBs, est %d. Changez le type de quelques colonnes en BLOB" + jps "row size ‚ª‘å‚«‚·‚¬‚Ü‚·. BLOB ‚ðŠÜ‚Ü‚È‚¢ê‡‚Ì row size ‚ÌÅ‘å‚Í %ld ‚Å‚·. ‚¢‚‚‚©‚Ì field ‚ð BLOB ‚É•Ï‚¦‚Ä‚‚¾‚³‚¢.", + est "Liiga pikk kirje. Kirje maksimumpikkus arvestamata BLOB-tüüpi välju on %ld. Muuda mõned väljad BLOB-tüüpi väljadeks" + fre "Ligne trop grande. Le taille maximale d'une ligne, sauf les BLOBs, est %ld. Changez le type de quelques colonnes en BLOB" ger "Zeilenlänge zu groß. Die maximale Zeilenlänge für den verwendeten Tabellentyp (ohne BLOB-Felder) beträgt %ld. Einige Felder müssen in BLOB oder TEXT umgewandelt werden" - greek "Ðïëý ìåãÜëï ìÝãåèïò åããñáöÞò. Ôï ìÝãéóôï ìÝãåèïò åããñáöÞò, ÷ùñßò íá õðïëïãßæïíôáé ôá blobs, åßíáé %d. ÐñÝðåé íá ïñßóåôå êÜðïéá ðåäßá óáí blobs" - hun "Tul nagy sormeret. A maximalis sormeret (nem szamolva a blob objektumokat) %d. Nehany mezot meg kell valtoztatnia" - ita "Riga troppo grande. La massima grandezza di una riga, non contando i BLOB, e` %d. Devi cambiare alcuni campi in BLOB" - jpn "row size ¤¬Â礤¹¤®¤Þ¤¹. BLOB ¤ò´Þ¤Þ¤Ê¤¤¾ì¹ç¤Î row size ¤ÎºÇÂç¤Ï %d ¤Ç¤¹. ¤¤¤¯¤Ä¤«¤Î field ¤ò BLOB ¤ËÊѤ¨¤Æ¤¯¤À¤µ¤¤." - kor "³Ê¹« Å« row »çÀÌÁîÀÔ´Ï´Ù. BLOB¸¦ °è»êÇÏÁö ¾Ê°í ÃÖ´ë row »çÀÌÁî´Â %dÀÔ´Ï´Ù. ¾ó¸¶°£ÀÇ ÇʵåµéÀ» BLOB·Î ¹Ù²Ù¼Å¾ß °Ú±º¿ä.." - por "Tamanho de linha grande demais. O máximo tamanho de linha, não contando BLOBs, é %d. Você tem que mudar alguns campos para BLOBs" - rum "Marimea liniei (row) prea mare. Marimea maxima a liniei, excluzind BLOB-urile este de %d. Trebuie sa schimbati unele cimpuri in BLOB-uri" - rus "óÌÉÛËÏÍ ÂÏÌØÛÏÊ ÒÁÚÍÅÒ ÚÁÐÉÓÉ. íÁËÓÉÍÁÌØÎÙÊ ÒÁÚÍÅÒ ÓÔÒÏËÉ, ÉÓËÌÀÞÁÑ ÐÏÌÑ BLOB, - %d. ÷ÏÚÍÏÖÎÏ, ×ÁÍ ÓÌÅÄÕÅÔ ÉÚÍÅÎÉÔØ ÔÉÐ ÎÅËÏÔÏÒÙÈ ÐÏÌÅÊ ÎÁ BLOB" - serbian "Prevelik slog. Maksimalna velièina sloga, ne raèunajuæi BLOB polja, je %d. Trebali bi da promenite tip nekih polja u BLOB" - slo "Riadok je príli¹ veµký. Maximálna veµkos» riadku, okrem 'BLOB', je %d. Musíte zmeni» niektoré polo¾ky na BLOB" - spa "Tamaño de línea muy grande. Máximo tamaño de línea, no contando blob, es %d. Tu tienes que cambiar algunos campos para blob" - swe "För stor total radlängd. Den högst tillåtna radlängden, förutom BLOBs, är %d. Ändra några av dina fält till BLOB" - ukr "úÁÄÏ×ÇÁ ÓÔÒÏËÁ. îÁʦÌØÛÏÀ ÄÏ×ÖÉÎÏÀ ÓÔÒÏËÉ, ÎÅ ÒÁÈÕÀÞÉ BLOB, ¤ %d. ÷ÁÍ ÐÏÔÒ¦ÂÎÏ ÐÒÉ×ÅÓÔÉ ÄÅÑ˦ ÓÔÏ×Âæ ÄÏ ÔÉÐÕ BLOB" + greek "Ðïëý ìåãÜëï ìÝãåèïò åããñáöÞò. Ôï ìÝãéóôï ìÝãåèïò åããñáöÞò, ÷ùñßò íá õðïëïãßæïíôáé ôá blobs, åßíáé %ld. ÐñÝðåé íá ïñßóåôå êÜðïéá ðåäßá óáí blobs" + hun "Tul nagy sormeret. A maximalis sormeret (nem szamolva a blob objektumokat) %ld. Nehany mezot meg kell valtoztatnia" + ita "Riga troppo grande. La massima grandezza di una riga, non contando i BLOB, e` %ld. Devi cambiare alcuni campi in BLOB" + jpn "row size ¤¬Â礤¹¤®¤Þ¤¹. BLOB ¤ò´Þ¤Þ¤Ê¤¤¾ì¹ç¤Î row size ¤ÎºÇÂç¤Ï %ld ¤Ç¤¹. ¤¤¤¯¤Ä¤«¤Î field ¤ò BLOB ¤ËÊѤ¨¤Æ¤¯¤À¤µ¤¤." + kor "³Ê¹« Å« row »çÀÌÁîÀÔ´Ï´Ù. BLOB¸¦ °è»êÇÏÁö ¾Ê°í ÃÖ´ë row »çÀÌÁî´Â %ldÀÔ´Ï´Ù. ¾ó¸¶°£ÀÇ ÇʵåµéÀ» BLOB·Î ¹Ù²Ù¼Å¾ß °Ú±º¿ä.." + por "Tamanho de linha grande demais. O máximo tamanho de linha, não contando BLOBs, é %ld. Você tem que mudar alguns campos para BLOBs" + rum "Marimea liniei (row) prea mare. Marimea maxima a liniei, excluzind BLOB-urile este de %ld. Trebuie sa schimbati unele cimpuri in BLOB-uri" + rus "óÌÉÛËÏÍ ÂÏÌØÛÏÊ ÒÁÚÍÅÒ ÚÁÐÉÓÉ. íÁËÓÉÍÁÌØÎÙÊ ÒÁÚÍÅÒ ÓÔÒÏËÉ, ÉÓËÌÀÞÁÑ ÐÏÌÑ BLOB, - %ld. ÷ÏÚÍÏÖÎÏ, ×ÁÍ ÓÌÅÄÕÅÔ ÉÚÍÅÎÉÔØ ÔÉÐ ÎÅËÏÔÏÒÙÈ ÐÏÌÅÊ ÎÁ BLOB" + serbian "Prevelik slog. Maksimalna velièina sloga, ne raèunajuæi BLOB polja, je %ld. Trebali bi da promenite tip nekih polja u BLOB" + slo "Riadok je príli¹ veµký. Maximálna veµkos» riadku, okrem 'BLOB', je %ld. Musíte zmeni» niektoré polo¾ky na BLOB" + spa "Tamaño de línea muy grande. Máximo tamaño de línea, no contando blob, es %ld. Tu tienes que cambiar algunos campos para blob" + swe "För stor total radlängd. Den högst tillåtna radlängden, förutom BLOBs, är %ld. Ändra några av dina fält till BLOB" + ukr "úÁÄÏ×ÇÁ ÓÔÒÏËÁ. îÁʦÌØÛÏÀ ÄÏ×ÖÉÎÏÀ ÓÔÒÏËÉ, ÎÅ ÒÁÈÕÀÞÉ BLOB, ¤ %ld. ÷ÁÍ ÐÏÔÒ¦ÂÎÏ ÐÒÉ×ÅÓÔÉ ÄÅÑ˦ ÓÔÏ×Âæ ÄÏ ÔÉÐÕ BLOB" ER_STACK_OVERRUN cze "P-Bøeteèení zásobníku threadu: pou¾ito %ld z %ld. Pou¾ijte 'mysqld -O thread_stack=#' k zadání vìt¹ího zásobníku" dan "Thread stack brugt: Brugt: %ld af en %ld stak. Brug 'mysqld -O thread_stack=#' for at allokere en større stak om nødvendigt" @@ -3026,14 +3026,14 @@ ER_FUNCTION_NOT_DEFINED ukr "æÕÎËæÀ '%-.64s' ÎÅ ×ÉÚÎÁÞÅÎÏ" ER_HOST_IS_BLOCKED cze "Stroj '%-.64s' je zablokov-Bán kvùli mnoha chybám pøi pøipojování. Odblokujete pou¾itím 'mysqladmin flush-hosts'" - dan "Værten er blokeret på grund af mange fejlforespørgsler. Lås op med 'mysqladmin flush-hosts'" + dan "Værten '%-.64s' er blokeret på grund af mange fejlforespørgsler. Lås op med 'mysqladmin flush-hosts'" nla "Host '%-.64s' is geblokkeeerd vanwege te veel verbindings fouten. Deblokkeer met 'mysqladmin flush-hosts'" eng "Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'" jps "Host '%-.64s' ‚Í many connection error ‚Ì‚½‚ßA‹‘”Û‚³‚ê‚Ü‚µ‚½. 'mysqladmin flush-hosts' ‚ʼn𜂵‚Ä‚‚¾‚³‚¢", est "Masin '%-.64s' on blokeeritud hulgaliste ühendusvigade tõttu. Blokeeringu saab tühistada 'mysqladmin flush-hosts' käsuga" fre "L'hôte '%-.64s' est bloqué à cause d'un trop grand nombre d'erreur de connexion. Débloquer le par 'mysqladmin flush-hosts'" ger "Host '%-.64s' blockiert wegen zu vieler Verbindungsfehler. Aufheben der Blockierung mit 'mysqladmin flush-hosts'" - greek "Ï õðïëïãéóôÞò Ý÷åé áðïêëåéóèåß ëüãù ðïëëáðëþí ëáèþí óýíäåóçò. ÐñïóðáèÞóôå íá äéïñþóåôå ìå 'mysqladmin flush-hosts'" + greek "Ï õðïëïãéóôÞò '%-.64s' Ý÷åé áðïêëåéóèåß ëüãù ðïëëáðëþí ëáèþí óýíäåóçò. ÐñïóðáèÞóôå íá äéïñþóåôå ìå 'mysqladmin flush-hosts'" hun "A '%-.64s' host blokkolodott, tul sok kapcsolodasi hiba miatt. Hasznalja a 'mysqladmin flush-hosts' parancsot" ita "Sistema '%-.64s' bloccato a causa di troppi errori di connessione. Per sbloccarlo: 'mysqladmin flush-hosts'" jpn "Host '%-.64s' ¤Ï many connection error ¤Î¤¿¤á¡¢µñÈݤµ¤ì¤Þ¤·¤¿. 'mysqladmin flush-hosts' ¤Ç²ò½ü¤·¤Æ¤¯¤À¤µ¤¤" @@ -3054,7 +3054,7 @@ ER_HOST_NOT_PRIVILEGED est "Masinal '%-.64s' puudub ligipääs sellele MySQL serverile" fre "Le hôte '%-.64s' n'est pas authorisé à se connecter à ce serveur MySQL" ger "Host '%-.64s' hat keine Berechtigung, sich mit diesem MySQL-Server zu verbinden" - greek "Ï õðïëïãéóôÞò äåí Ý÷åé äéêáßùìá óýíäåóçò ìå ôïí MySQL server" + greek "Ï õðïëïãéóôÞò '%-.64s' äåí Ý÷åé äéêáßùìá óýíäåóçò ìå ôïí MySQL server" hun "A '%-.64s' host szamara nem engedelyezett a kapcsolodas ehhez a MySQL szerverhez" ita "Al sistema '%-.64s' non e` consentita la connessione a questo server MySQL" jpn "Host '%-.64s' ¤Ï MySQL server ¤ËÀܳ¤òµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" @@ -3368,27 +3368,27 @@ ER_GRANT_WRONG_HOST_OR_USER 42000 swe "Felaktigt maskinnamn eller användarnamn använt med GRANT" ukr "áÒÇÕÍÅÎÔ host ÁÂÏ user ÄÌÑ GRANT ÚÁÄÏ×ÇÉÊ" ER_NO_SUCH_TABLE 42S02 - cze "Tabulka '%-.64s.%s' neexistuje" + cze "Tabulka '%-.64s.%-.64s' neexistuje" dan "Tabellen '%-.64s.%-.64s' eksisterer ikke" - nla "Tabel '%-.64s.%s' bestaat niet" + nla "Tabel '%-.64s.%-.64s' bestaat niet" eng "Table '%-.64s.%-.64s' doesn't exist" est "Tabelit '%-.64s.%-.64s' ei eksisteeri" - fre "La table '%-.64s.%s' n'existe pas" + fre "La table '%-.64s.%-.64s' n'existe pas" ger "Tabelle '%-.64s.%-.64s' existiert nicht" - hun "A '%-.64s.%s' tabla nem letezik" - ita "La tabella '%-.64s.%s' non esiste" - jpn "Table '%-.64s.%s' doesn't exist" - kor "Å×À̺í '%-.64s.%s' ´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù." - nor "Table '%-.64s.%s' doesn't exist" - norwegian-ny "Table '%-.64s.%s' doesn't exist" - pol "Table '%-.64s.%s' doesn't exist" + hun "A '%-.64s.%-.64s' tabla nem letezik" + ita "La tabella '%-.64s.%-.64s' non esiste" + jpn "Table '%-.64s.%-.64s' doesn't exist" + kor "Å×À̺í '%-.64s.%-.64s' ´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù." + nor "Table '%-.64s.%-.64s' doesn't exist" + norwegian-ny "Table '%-.64s.%-.64s' doesn't exist" + pol "Table '%-.64s.%-.64s' doesn't exist" por "Tabela '%-.64s.%-.64s' não existe" rum "Tabela '%-.64s.%-.64s' nu exista" rus "ôÁÂÌÉÃÁ '%-.64s.%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ" serbian "Tabela '%-.64s.%-.64s' ne postoji" - slo "Table '%-.64s.%s' doesn't exist" - spa "Tabla '%-.64s.%s' no existe" - swe "Det finns ingen tabell som heter '%-.64s.%s'" + slo "Table '%-.64s.%-.64s' doesn't exist" + spa "Tabla '%-.64s.%-.64s' no existe" + swe "Det finns ingen tabell som heter '%-.64s.%-.64s'" ukr "ôÁÂÌÉÃÑ '%-.64s.%-.64s' ÎÅ ¦ÓÎÕ¤" ER_NONEXISTING_TABLE_GRANT 42000 cze "Neexistuje odpov-Bídající grant pro u¾ivatele '%-.32s' na stroji '%-.64s' pro tabulku '%-.64s'" @@ -3487,27 +3487,27 @@ ER_TOO_MANY_DELAYED_THREADS swe "Det finns redan 'max_delayed_threads' trådar i använding" ukr "úÁÂÁÇÁÔÏ ÚÁÔÒÉÍÁÎÉÈ Ç¦ÌÏË ×ÉËÏÒÉÓÔÏ×Õ¤ÔØÓÑ" ER_ABORTING_CONNECTION 08S01 - cze "Zru-B¹eno spojení %ld do databáze: '%-.64s' u¾ivatel: '%-.64s' (%s)" - dan "Afbrudt forbindelse %ld til database: '%-.64s' bruger: '%-.64s' (%-.64s)" - nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.64s' (%s)" + cze "Zru-B¹eno spojení %ld do databáze: '%-.64s' u¾ivatel: '%-.32s' (%-.64s)" + dan "Afbrudt forbindelse %ld til database: '%-.64s' bruger: '%-.32s' (%-.64s)" + nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.32s' (%-.64s)" eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" est "Ühendus katkestatud %ld andmebaasile: '%-.64s' kasutajale: '%-.32s' (%-.64s)" - fre "Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.64s' (%s)" - ger "Abbruch der Verbindung %ld zur Datenbank '%-.64s'. Benutzer: '%-.64s' (%-.64s)" - hun "Megszakitott kapcsolat %ld db: '%-.64s' adatbazishoz, felhasznalo: '%-.64s' (%s)" - ita "Interrotta la connessione %ld al db: '%-.64s' utente: '%-.64s' (%s)" - jpn "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" - kor "µ¥ÀÌŸº£À̽º Á¢¼ÓÀ» À§ÇÑ ¿¬°á %ld°¡ Áß´ÜµÊ : '%-.64s' »ç¿ëÀÚ: '%-.64s' (%s)" - nor "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" - norwegian-ny "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" - pol "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" + fre "Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.32s' (%-.64s)" + ger "Abbruch der Verbindung %ld zur Datenbank '%-.64s'. Benutzer: '%-.32s' (%-.64s)" + hun "Megszakitott kapcsolat %ld db: '%-.64s' adatbazishoz, felhasznalo: '%-.32s' (%-.64s)" + ita "Interrotta la connessione %ld al db: '%-.64s' utente: '%-.32s' (%-.64s)" + jpn "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" + kor "µ¥ÀÌŸº£À̽º Á¢¼ÓÀ» À§ÇÑ ¿¬°á %ld°¡ Áß´ÜµÊ : '%-.64s' »ç¿ëÀÚ: '%-.32s' (%-.64s)" + nor "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" + norwegian-ny "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" + pol "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" por "Conexão %ld abortou para o banco de dados '%-.64s' - usuário '%-.32s' (%-.64s)" rum "Conectie terminata %ld la baza de date: '%-.64s' utilizator: '%-.32s' (%-.64s)" rus "ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' (%-.64s)" serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' (%-.64s)" - slo "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" - spa "Conexión abortada %ld para db: '%-.64s' usuario: '%-.64s' (%s)" - swe "Avbröt länken för tråd %ld till db '%-.64s', användare '%-.64s' (%s)" + slo "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" + spa "Conexión abortada %ld para db: '%-.64s' usuario: '%-.32s' (%-.64s)" + swe "Avbröt länken för tråd %ld till db '%-.64s', användare '%-.32s' (%-.64s)" ukr "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s' (%-.64s)" ER_NET_PACKET_TOO_LARGE 08S01 cze "Zji-B¹tìn pøíchozí packet del¹í ne¾ 'max_allowed_packet'" @@ -3986,7 +3986,7 @@ ER_CHECK_NOT_IMPLEMENTED 42000 por "O manipulador de tabela não suporta %s" rum "The handler for the table doesn't support %s" rus "ïÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÜÔÏÇÏ: %s" - serbian "Handler za ovu tabelu ne dozvoljava 'check' odnosno 'repair' komande" + serbian "Handler za ovu tabelu ne dozvoljava %s komande" slo "The handler for the table doesn't support %s" spa "El manipulador de la tabla no permite soporte para %s" swe "Tabellhanteraren för denna tabell kan inte göra %s" @@ -4654,14 +4654,14 @@ ER_NO_DEFAULT 42000 spa "Variable '%-.64s' no tiene un valor patrón" swe "Variabel '%-.64s' har inte ett DEFAULT-värde" ER_WRONG_VALUE_FOR_VAR 42000 - nla "Variabele '%-.64s' kan niet worden gewijzigd naar de waarde '%-.64s'" - eng "Variable '%-.64s' can't be set to the value of '%-.64s'" - ger "Variable '%-.64s' kann nicht auf '%-.64s' gesetzt werden" - ita "Alla variabile '%-.64s' non puo' essere assegato il valore '%-.64s'" - por "Variável '%-.64s' não pode ser configurada para o valor de '%-.64s'" - rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÕÓÔÁÎÏ×ÌÅÎÁ × ÚÎÁÞÅÎÉÅ '%-.64s'" - spa "Variable '%-.64s' no puede ser configurada para el valor de '%-.64s'" - swe "Variabel '%-.64s' kan inte sättas till '%-.64s'" + nla "Variabele '%-.64s' kan niet worden gewijzigd naar de waarde '%-.200s'" + eng "Variable '%-.64s' can't be set to the value of '%-.200s'" + ger "Variable '%-.64s' kann nicht auf '%-.200s' gesetzt werden" + ita "Alla variabile '%-.64s' non puo' essere assegato il valore '%-.200s'" + por "Variável '%-.64s' não pode ser configurada para o valor de '%-.200s'" + rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÕÓÔÁÎÏ×ÌÅÎÁ × ÚÎÁÞÅÎÉÅ '%-.200s'" + spa "Variable '%-.64s' no puede ser configurada para el valor de '%-.200s'" + swe "Variabel '%-.64s' kan inte sättas till '%-.200s'" ER_WRONG_TYPE_FOR_VAR 42000 nla "Foutief argumenttype voor variabele '%-.64s'" eng "Incorrect argument type to variable '%-.64s'" @@ -5220,9 +5220,9 @@ ER_FPARSER_BAD_HEADER ukr "îÅצÒÎÉÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÕ Õ ÆÁÊ̦ '%-.64s'" ER_FPARSER_EOF_IN_COMMENT eng "Unexpected end of file while parsing comment '%-.200s'" - ger "Unerwartetes Dateiende beim Parsen des Kommentars '%-.64s'" - rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ × ËÏÍÅÎÔÁÒÉÉ '%-.64s'" - ukr "îÅÓÐÏĦ×ÁÎÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ Õ ËÏÍÅÎÔÁÒ¦ '%-.64s'" + ger "Unerwartetes Dateiende beim Parsen des Kommentars '%-.200s'" + rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ × ËÏÍÅÎÔÁÒÉÉ '%-.200s'" + ukr "îÅÓÐÏĦ×ÁÎÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ Õ ËÏÍÅÎÔÁÒ¦ '%-.200s'" ER_FPARSER_ERROR_IN_PARAMETER eng "Error while parsing parameter '%-.64s' (line: '%-.64s')" ger "Fehler beim Parsen des Parameters '%-.64s' (Zeile: '%-.64s')" @@ -5307,7 +5307,7 @@ ER_TRG_ON_VIEW_OR_TEMP_TABLE ger "'%-.64s' des Triggers ist View oder temporäre Tabelle" ER_TRG_CANT_CHANGE_ROW eng "Updating of %s row is not allowed in %strigger" - ger "Aktualisieren einer %s-Zeile ist in einem %-Trigger nicht erlaubt" + ger "Aktualisieren einer %s-Zeile ist in einem %s-Trigger nicht erlaubt" ER_TRG_NO_SUCH_ROW_IN_TRG eng "There is no %s row in %s trigger" ger "Es gibt keine %s-Zeile im %s-Trigger" @@ -5389,7 +5389,7 @@ ER_LOGGING_PROHIBIT_CHANGING_OF ger "Binärlogs und Replikation verhindern Wechsel des globalen Servers %s" ER_NO_FILE_MAPPING eng "Can't map file: %-.200s, errno: %d" - ger "Kann Datei nicht abbilden: %-.64s, Fehler: %d" + ger "Kann Datei nicht abbilden: %-.200s, Fehler: %d" ER_WRONG_MAGIC eng "Wrong magic in %-.64s" ger "Falsche magische Zahlen in %-.64s" @@ -5518,13 +5518,12 @@ ER_M_BIGGER_THAN_D 42000 S1009 eng "For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column '%-.64s')." ger "Für FLOAT(M,D), DOUBLE(M,D) oder DECIMAL(M,D) muss M >= D sein (Feld '%-.64s')" ER_WRONG_LOCK_OF_SYSTEM_TABLE - eng "You can't combine write-locking of system '%-.64s.%-.64s' table with other tables" - ger "Sie können Schreibsperren auf der Systemtabelle '%-.64s.%-.64s' nicht mit anderen Tabellen kombinieren" + eng "You can't combine write-locking of system tables with other tables or lock types" ER_CONNECT_TO_FOREIGN_DATA_SOURCE eng "Unable to connect to foreign data source: %.64s" ger "Kann nicht mit Fremddatenquelle verbinden: %.64s" ER_QUERY_ON_FOREIGN_DATA_SOURCE - eng "There was a problem processing the query on the foreign data source. Data source error: %-.64" + eng "There was a problem processing the query on the foreign data source. Data source error: %-.64s" ger "Bei der Verarbeitung der Abfrage ist in der Fremddatenquelle ein Problem aufgetreten. Datenquellenfehlermeldung: %-.64s" ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST eng "The foreign data source you are trying to reference does not exist. Data source error: %-.64s" @@ -5606,7 +5605,7 @@ ER_SP_RECURSION_LIMIT ger "Rekursionsgrenze %d (durch Variable max_sp_recursion_depth gegeben) wurde für Routine %.64s überschritten" ER_SP_PROC_TABLE_CORRUPT eng "Failed to load routine %-.64s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)" - ger "Routine %-64s konnte nicht geladen werden. Die Tabelle mysql.proc fehlt, ist beschädigt, oder enthält fehlerhaften Daten (interner Code: %d)" + ger "Routine %-.64s konnte nicht geladen werden. Die Tabelle mysql.proc fehlt, ist beschädigt, oder enthält fehlerhaften Daten (interner Code: %d)" ER_FOREIGN_SERVER_EXISTS eng "The foreign server, %s, you are trying to create already exists." ER_SP_WRONG_NAME 42000 @@ -5874,8 +5873,7 @@ ER_EVENT_ENDS_BEFORE_STARTS eng "ENDS is either invalid or before STARTS" ger "ENDS ist entweder ungültig oder liegt vor STARTS" ER_EVENT_EXEC_TIME_IN_THE_PAST - eng "Activation (AT) time is in the past" - ger "Aktivierungszeit (AT) liegt in der Vergangenheit" + eng "Event execution time is in the past. Event has been disabled" ER_EVENT_OPEN_TABLE_FAILED eng "Failed to open mysql.event" ger "Öffnen von mysql.event fehlgeschlagen" @@ -5883,10 +5881,10 @@ ER_EVENT_NEITHER_M_EXPR_NOR_M_AT eng "No datetime expression provided" ger "Kein DATETIME-Ausdruck angegeben" ER_COL_COUNT_DOESNT_MATCH_CORRUPTED - eng "Column count of mysql.%s is wrong. Expected %d, found %d. Table probably corrupted" + eng "Column count of mysql.%s is wrong. Expected %d, found %d. The table is probably corrupted" ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d gefunden. Tabelle ist wahrscheinlich beschädigt" ER_CANNOT_LOAD_FROM_TABLE - eng "Cannot load from mysql.%s. Table probably corrupted. See error log." + eng "Cannot load from mysql.%s. The table is probably corrupted. Please see the error log for details" ger "Kann mysql.%s nicht einlesen. Tabelle ist wahrscheinlich beschädigt, siehe Fehlerlog" ER_EVENT_CANNOT_DELETE eng "Failed to delete the event from mysql.event" @@ -5980,11 +5978,11 @@ ER_BASE64_DECODE_ERROR ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA eng "Triggers can not be created on system tables" ger "Trigger können nicht auf Systemtabellen erzeugt werden" -ER_EVENT_RECURSIVITY_FORBIDDEN - eng "Recursivity of EVENT DDL statements is forbidden when body is present" +ER_EVENT_RECURSION_FORBIDDEN + eng "Recursion of EVENT DDL statements is forbidden when body is present" ger "Rekursivität von EVENT-DDL-Anweisungen ist unzulässig wenn ein Hauptteil (Body) existiert" ER_EVENTS_DB_ERROR - eng "Cannot proceed because the tables used by events were found damaged at server start" + eng "Cannot proceed because system tables used by Event Scheduler were found damaged at server start" ger "Kann nicht weitermachen, weil die Tabellen, die von Events verwendet werden, beim Serverstart als beschädigt markiert wurden" ER_ONLY_INTEGERS_ALLOWED eng "Only integers allowed as number here" @@ -6053,3 +6051,9 @@ ER_DUP_ENTRY_WITH_KEY_NAME 23000 S1009 ER_BINLOG_PURGE_EMFILE eng "Too many files opened, please execute the command again" ger "Zu viele offene Dateien, bitte führen Sie den Befehl noch einmal aus" +ER_EVENT_CANNOT_CREATE_IN_THE_PAST + eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been created" +ER_EVENT_CANNOT_ALTER_IN_THE_PAST + eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been altered" +ER_SLAVE_INCIDENT + eng "The incident %s occured on the master. Message: %-.64s" diff --git a/sql/slave.cc b/sql/slave.cc index f2bd24bd05e..bc6cef95fc6 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -25,12 +25,15 @@ #include <thr_alarm.h> #include <my_dir.h> #include <sql_common.h> +#include <errmsg.h> #ifdef HAVE_REPLICATION #include "rpl_tblmap.h" int queue_event(MASTER_INFO* mi,const char* buf,ulong event_len); +static Log_event* next_event(RELAY_LOG_INFO* rli); + #define FLAGSTR(V,F) ((V)&(F)?#F" ":"") @@ -53,6 +56,7 @@ ulonglong relay_log_space_limit = 0; */ int disconnect_slave_event_count = 0, abort_slave_event_count = 0; +int events_till_abort = -1; typedef enum { SLAVE_THD_IO, SLAVE_THD_SQL} SLAVE_THD_TYPE; @@ -518,11 +522,11 @@ static bool sql_slave_killed(THD* thd, RELAY_LOG_INFO* rli) really one minute of idleness, we don't timeout if the slave SQL thread is actively working. */ - if (!rli->unsafe_to_stop_at) + if (rli->last_event_start_time == 0) DBUG_RETURN(1); DBUG_PRINT("info", ("Slave SQL thread is in an unsafe situation, giving " "it some grace period")); - if (difftime(time(0), rli->unsafe_to_stop_at) > 60) + if (difftime(time(0), rli->last_event_start_time) > 60) { slave_print_msg(ERROR_LEVEL, rli, 0, "SQL thread had to stop in an unsafe situation, in " @@ -556,7 +560,7 @@ static bool sql_slave_killed(THD* thd, RELAY_LOG_INFO* rli) void */ -void slave_print_msg(enum loglevel level, RELAY_LOG_INFO* rli, +void slave_print_msg(enum loglevel level, RELAY_LOG_INFO const *rli, int err_code, const char* msg, ...) { void (*report_function)(const char *, ...); @@ -578,9 +582,9 @@ void slave_print_msg(enum loglevel level, RELAY_LOG_INFO* rli, It's an error, it must be reported in Last_error and Last_errno in SHOW SLAVE STATUS. */ - pbuff= rli->last_slave_error; + pbuff= const_cast<RELAY_LOG_INFO*>(rli)->last_slave_error; pbuffsize= sizeof(rli->last_slave_error); - rli->last_slave_errno = err_code; + const_cast<RELAY_LOG_INFO*>(rli)->last_slave_errno = err_code; report_function= sql_print_error; break; case WARNING_LEVEL: @@ -812,7 +816,7 @@ do not trust column Seconds_Behind_Master of SHOW SLAVE STATUS"); { if ((master_row= mysql_fetch_row(master_res)) && (::server_id == strtoul(master_row[1], 0, 10)) && - !replicate_same_server_id) + !mi->rli.replicate_same_server_id) errmsg= "The slave I/O thread stops because master and slave have equal \ MySQL server ids; these ids must be different for replication to work (or \ the --replicate-same-server-id option must be used on slave but this does \ @@ -1389,7 +1393,7 @@ void set_slave_thread_options(THD* thd) DBUG_VOID_RETURN; } -void set_slave_thread_default_charset(THD* thd, RELAY_LOG_INFO *rli) +void set_slave_thread_default_charset(THD* thd, RELAY_LOG_INFO const *rli) { DBUG_ENTER("set_slave_thread_default_charset"); @@ -1400,7 +1404,14 @@ void set_slave_thread_default_charset(THD* thd, RELAY_LOG_INFO *rli) thd->variables.collation_server= global_system_variables.collation_server; thd->update_charset(); - rli->cached_charset_invalidate(); + + /* + We use a const cast here since the conceptual (and externally + visible) behavior of the function is to set the default charset of + the thread. That the cache has to be invalidated is a secondary + effect. + */ + const_cast<RELAY_LOG_INFO*>(rli)->cached_charset_invalidate(); DBUG_VOID_RETURN; } @@ -1425,9 +1436,8 @@ static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type) thd->slave_thread = 1; set_slave_thread_options(thd); thd->client_capabilities = CLIENT_LOCAL_FILES; - thd->real_id=pthread_self(); pthread_mutex_lock(&LOCK_thread_count); - thd->thread_id = thread_id++; + thd->thread_id= thd->variables.pseudo_thread_id= thread_id++; pthread_mutex_unlock(&LOCK_thread_count); if (init_thr_lock() || thd->store_globals()) @@ -1437,12 +1447,6 @@ static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type) DBUG_RETURN(-1); } -#if !defined(__WIN__) && !defined(__NETWARE__) - sigset_t set; - VOID(sigemptyset(&set)); // Get mask in use - VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals)); -#endif - if (thd_type == SLAVE_THD_SQL) thd->proc_info= "Waiting for the next event in relay log"; else @@ -1615,7 +1619,8 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings) } -int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int expected_error) +int check_expected_error(THD* thd, RELAY_LOG_INFO const *rli, + int expected_error) { DBUG_ENTER("check_expected_error"); @@ -1720,78 +1725,43 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) } if (ev) { - int type_code = ev->get_type_code(); - int exec_res; + int const type_code= ev->get_type_code(); + int exec_res= 0; /* - Queries originating from this server must be skipped. - Low-level events (Format_desc, Rotate, Stop) from this server - must also be skipped. But for those we don't want to modify - group_master_log_pos, because these events did not exist on the master. - Format_desc is not completely skipped. - Skip queries specified by the user in slave_skip_counter. - We can't however skip events that has something to do with the - log files themselves. - Filtering on own server id is extremely important, to ignore execution of - events created by the creation/rotation of the relay log (remember that - now the relay log starts with its Format_desc, has a Rotate etc). */ - DBUG_PRINT("info",("type_code=%d, server_id=%d",type_code,ev->server_id)); + DBUG_PRINT("info",("type_code=%d (%s), server_id=%d", + type_code, ev->get_type_str(), ev->server_id)); + DBUG_PRINT("info", ("thd->options={ %s%s}", + FLAGSTR(thd->options, OPTION_NOT_AUTOCOMMIT), + FLAGSTR(thd->options, OPTION_BEGIN))); - if ((ev->server_id == (uint32) ::server_id && - !replicate_same_server_id && - type_code != FORMAT_DESCRIPTION_EVENT) || - (rli->slave_skip_counter && - type_code != ROTATE_EVENT && type_code != STOP_EVENT && - type_code != START_EVENT_V3 && type_code!= FORMAT_DESCRIPTION_EVENT)) - { - DBUG_PRINT("info", ("event skipped")); - /* - We only skip the event here and do not increase the group log - position. In the event that we have to restart, this means - that we might have to skip the event again, but that is a - minor issue. - - If we were to increase the group log position when skipping an - event, it might be that we are restarting at the wrong - position and have events before that we should have executed, - so not increasing the group log position is a sure bet in this - case. - - In this way, we just step the group log position when we - *know* that we are at the end of a group. - */ - rli->inc_event_relay_log_pos(); - /* - Protect against common user error of setting the counter to 1 - instead of 2 while recovering from an insert which used auto_increment, - rand or user var. - */ - if (rli->slave_skip_counter && - !((type_code == INTVAR_EVENT || - type_code == RAND_EVENT || - type_code == USER_VAR_EVENT) && - rli->slave_skip_counter == 1) && - /* - The events from ourselves which have something to do with the relay - log itself must be skipped, true, but they mustn't decrement - rli->slave_skip_counter, because the user is supposed to not see - these events (they are not in the master's binlog) and if we - decremented, START SLAVE would for example decrement when it sees - the Rotate, so the event which the user probably wanted to skip - would not be skipped. - */ - !(ev->server_id == (uint32) ::server_id && - (type_code == ROTATE_EVENT || type_code == STOP_EVENT || - type_code == START_EVENT_V3 || type_code == FORMAT_DESCRIPTION_EVENT))) - --rli->slave_skip_counter; - pthread_mutex_unlock(&rli->data_lock); - delete ev; - DBUG_RETURN(0); // avoid infinite update loops - } - pthread_mutex_unlock(&rli->data_lock); + + /* + Execute the event to change the database and update the binary + log coordinates, but first we set some data that is needed for + the thread. + + The event will be executed unless it is supposed to be skipped. + + Queries originating from this server must be skipped. Low-level + events (Format_description_log_event, Rotate_log_event, + Stop_log_event) from this server must also be skipped. But for + those we don't want to modify 'group_master_log_pos', because + these events did not exist on the master. + Format_description_log_event is not completely skipped. + + Skip queries specified by the user in 'slave_skip_counter'. We + can't however skip events that has something to do with the log + files themselves. + + Filtering on own server id is extremely important, to ignore + execution of events created by the creation/rotation of the relay + log (remember that now the relay log starts with its Format_desc, + has a Rotate etc). + */ thd->server_id = ev->server_id; // use the original server id for logging thd->set_time(); // time the query @@ -1799,19 +1769,69 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) if (!ev->when) ev->when = time(NULL); ev->thd = thd; // because up to this point, ev->thd == 0 - DBUG_PRINT("info", ("thd->options={ %s%s}", - FLAGSTR(thd->options, OPTION_NOT_AUTOCOMMIT), - FLAGSTR(thd->options, OPTION_BEGIN))); - exec_res = ev->exec_event(rli); - DBUG_PRINT("info", ("exec_event result: %d", exec_res)); - DBUG_ASSERT(rli->sql_thd==thd); + int reason= ev->shall_skip(rli); + if (reason == Log_event::EVENT_SKIP_COUNT) + --rli->slave_skip_counter; + pthread_mutex_unlock(&rli->data_lock); + if (reason == Log_event::EVENT_SKIP_NOT) + exec_res= ev->apply_event(rli); +#ifndef DBUG_OFF + else + { + /* + This only prints information to the debug trace. + + TODO: Print an informational message to the error log? + */ + static const char *const explain[] = { + "event was not skipped", // EVENT_SKIP_NOT, + "event originated from this server", // EVENT_SKIP_IGNORE, + "event skip counter was non-zero" // EVENT_SKIP_COUNT + }; + DBUG_PRINT("info", ("%s was skipped because %s", + ev->get_type_str(), explain[reason])); + } +#endif + + DBUG_PRINT("info", ("apply_event error = %d", exec_res)); + if (exec_res == 0) + { + int error= ev->update_pos(rli); + char buf[22]; + DBUG_PRINT("info", ("update_pos error = %d", error)); + DBUG_PRINT("info", ("group %s %s", + llstr(rli->group_relay_log_pos, buf), + rli->group_relay_log_name)); + DBUG_PRINT("info", ("event %s %s", + llstr(rli->event_relay_log_pos, buf), + rli->event_relay_log_name)); + /* + The update should not fail, so print an error message and + return an error code. + + TODO: Replace this with a decent error message when merged + with BUG#24954 (which adds several new error message). + */ + if (error) + { + slave_print_msg(ERROR_LEVEL, rli, ER_UNKNOWN_ERROR, + "It was not possible to update the positions" + " of the relay log information: the slave may" + " be in an inconsistent state." + " Stopped in %s position %s", + rli->group_relay_log_name, + llstr(rli->group_relay_log_pos, buf)); + DBUG_RETURN(1); + } + } + /* Format_description_log_event should not be deleted because it will be used to read info about the relay log's format; it will be deleted when the SQL thread does not need it, i.e. when this thread terminates. */ - if (ev->get_type_code() != FORMAT_DESCRIPTION_EVENT) + if (type_code != FORMAT_DESCRIPTION_EVENT) { DBUG_PRINT("info", ("Deleting the event after it has been executed")); delete ev; @@ -2072,7 +2092,7 @@ after reconnect"); if (event_len == packet_error) { uint mysql_error_number= mysql_errno(mysql); - if (mysql_error_number == ER_NET_PACKET_TOO_LARGE) + if (mysql_error_number == CR_NET_PACKET_TOO_LARGE) { sql_print_error("\ Log entry on master is longer than max_allowed_packet (%ld) on \ @@ -2212,11 +2232,16 @@ err: THD_CHECK_SENTRY(thd); delete thd; pthread_mutex_unlock(&LOCK_thread_count); - mi->abort_slave = 0; - mi->slave_running = 0; - mi->io_thd = 0; - pthread_mutex_unlock(&mi->run_lock); + mi->abort_slave= 0; + mi->slave_running= 0; + mi->io_thd= 0; + /* + Note: the order of the two following calls (first broadcast, then unlock) + is important. Otherwise a killer_thread can execute between the calls and + delete the mi structure leading to a crash! (see BUG#25306 for details) + */ pthread_cond_broadcast(&mi->stop_cond); // tell the world we are done + pthread_mutex_unlock(&mi->run_lock); my_thread_end(); pthread_exit(0); DBUG_RETURN(0); // Can't return anything here @@ -2367,13 +2392,17 @@ Slave SQL thread aborted. Can't execute init_slave query"); THD_CHECK_SENTRY(thd); if (exec_relay_log_event(thd,rli)) { + DBUG_PRINT("info", ("exec_relay_log_event() failed")); // do not scare the user if SQL thread was simply killed or stopped if (!sql_slave_killed(thd,rli)) { /* - retrieve as much info as possible from the thd and, error codes and warnings - and print this to the error log as to allow the user to locate the error + retrieve as much info as possible from the thd and, error + codes and warnings and print this to the error log as to + allow the user to locate the error */ + DBUG_PRINT("info", ("thd->net.last_errno=%d; rli->last_slave_errno=%d", + thd->net.last_errno, rli->last_slave_errno)); if (thd->net.last_errno != 0) { if (rli->last_slave_errno == 0) @@ -2394,10 +2423,25 @@ Slave SQL thread aborted. Can't execute init_slave query"); /* Print any warnings issued */ List_iterator_fast<MYSQL_ERROR> it(thd->warn_list); MYSQL_ERROR *err; + /* + Added controlled slave thread cancel for replication + of user-defined variables. + */ + bool udf_error = false; while ((err= it++)) + { + if (err->code == ER_CANT_OPEN_LIBRARY) + udf_error = true; sql_print_warning("Slave: %s Error_code: %d",err->msg, err->code); - - sql_print_error("\ + } + if (udf_error) + sql_print_error("Error loading user-defined library, slave SQL " + "thread aborted. Install the missing library, and restart the " + "slave SQL thread with \"SLAVE START\". We stopped at log '%s' " + "position %s", RPL_LOG_NAME, llstr(rli->group_master_log_pos, + llbuff)); + else + sql_print_error("\ Error running query, slave SQL thread aborted. Fix the problem, and restart \ the slave SQL thread with \"SLAVE START\". We stopped at log \ '%s' position %s", RPL_LOG_NAME, llstr(rli->group_master_log_pos, llbuff)); @@ -2462,9 +2506,14 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ THD_CHECK_SENTRY(thd); delete thd; pthread_mutex_unlock(&LOCK_thread_count); + /* + Note: the order of the broadcast and unlock calls below (first broadcast, then unlock) + is important. Otherwise a killer_thread can execute between the calls and + delete the mi structure leading to a crash! (see BUG#25306 for details) + */ pthread_cond_broadcast(&rli->stop_cond); - // tell the world we are done - pthread_mutex_unlock(&rli->run_lock); + pthread_mutex_unlock(&rli->run_lock); // tell the world we are done + my_thread_end(); pthread_exit(0); DBUG_RETURN(0); // Can't return anything here @@ -2695,6 +2744,7 @@ static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf, my_free((char*) tmp_buf, MYF(MY_ALLOW_ZERO_PTR)); DBUG_RETURN(1); } + pthread_mutex_lock(&mi->data_lock); ev->log_pos= mi->master_log_pos; /* 3.23 events don't contain log_pos */ switch (ev->get_type_code()) { @@ -2958,7 +3008,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) pthread_mutex_lock(log_lock); if ((uint4korr(buf + SERVER_ID_OFFSET) == ::server_id) && - !replicate_same_server_id) + !mi->rli.replicate_same_server_id) { /* Do not write it to the relay log. @@ -3660,6 +3710,70 @@ end: } +/** + Detects, based on master's version (as found in the relay log), if master + has a certain bug. + @param rli RELAY_LOG_INFO which tells the master's version + @param bug_id Number of the bug as found in bugs.mysql.com + @return TRUE if master has the bug, FALSE if it does not. +*/ +bool rpl_master_has_bug(RELAY_LOG_INFO *rli, uint bug_id) +{ + struct st_version_range_for_one_bug { + uint bug_id; + const uchar introduced_in[3]; // first version with bug + const uchar fixed_in[3]; // first version with fix + }; + static struct st_version_range_for_one_bug versions_for_all_bugs[]= + { + {24432, { 5, 0, 24 }, { 5, 0, 38 } }, + {24432, { 5, 1, 12 }, { 5, 1, 17 } } + }; + const uchar *master_ver= + rli->relay_log.description_event_for_exec->server_version_split; + + DBUG_ASSERT(sizeof(rli->relay_log.description_event_for_exec->server_version_split) == 3); + + for (uint i= 0; + i < sizeof(versions_for_all_bugs)/sizeof(*versions_for_all_bugs);i++) + { + const uchar *introduced_in= versions_for_all_bugs[i].introduced_in, + *fixed_in= versions_for_all_bugs[i].fixed_in; + if ((versions_for_all_bugs[i].bug_id == bug_id) && + (memcmp(introduced_in, master_ver, 3) <= 0) && + (memcmp(fixed_in, master_ver, 3) > 0)) + { + // a short message for SHOW SLAVE STATUS (message length constraints) + my_printf_error(ER_UNKNOWN_ERROR, "master may suffer from" + " http://bugs.mysql.com/bug.php?id=%u" + " so slave stops; check error log on slave" + " for more info", MYF(0), bug_id); + // a verbose message for the error log + slave_print_msg(ERROR_LEVEL, rli, ER_UNKNOWN_ERROR, + "According to the master's version ('%s')," + " it is probable that master suffers from this bug:" + " http://bugs.mysql.com/bug.php?id=%u" + " and thus replicating the current binary log event" + " may make the slave's data become different from the" + " master's data." + " To take no risk, slave refuses to replicate" + " this event and stops." + " We recommend that all updates be stopped on the" + " master and slave, that the data of both be" + " manually synchronized," + " that master's binary logs be deleted," + " that master be upgraded to a version at least" + " equal to '%d.%d.%d'. Then replication can be" + " restarted.", + rli->relay_log.description_event_for_exec->server_version, + bug_id, + fixed_in[0], fixed_in[1], fixed_in[2]); + return TRUE; + } + } + return FALSE; +} + #ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION template class I_List_iterator<i_string>; template class I_List_iterator<i_string_pair>; diff --git a/sql/slave.h b/sql/slave.h index bc039f6eb75..107b74c09dd 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -159,11 +159,12 @@ int fetch_master_table(THD* thd, const char* db_name, const char* table_name, bool show_master_info(THD* thd, MASTER_INFO* mi); bool show_binlog_info(THD* thd); +bool rpl_master_has_bug(RELAY_LOG_INFO *rli, uint bug_id); const char *print_slave_db_safe(const char *db); -int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int error_code); +int check_expected_error(THD* thd, RELAY_LOG_INFO const *rli, int error_code); void skip_load_data_infile(NET* net); -void slave_print_msg(enum loglevel level, RELAY_LOG_INFO* rli, +void slave_print_msg(enum loglevel level, RELAY_LOG_INFO const *rli, int err_code, const char* msg, ...) ATTRIBUTE_FORMAT(printf, 4, 5); @@ -181,7 +182,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,ulonglong pos, int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, const char** errmsg); void set_slave_thread_options(THD* thd); -void set_slave_thread_default_charset(THD* thd, RELAY_LOG_INFO *rli); +void set_slave_thread_default_charset(THD *thd, RELAY_LOG_INFO const *rli); void rotate_relay_log(MASTER_INFO* mi); pthread_handler_t handle_slave_io(void *arg); diff --git a/sql/sp.cc b/sql/sp.cc index 3a7bea6a4b1..d8dd9b3d67e 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -69,24 +69,6 @@ enum /* - Close mysql.proc, opened with open_proc_table_for_read(). - - SYNOPSIS - close_proc_table() - thd Thread context - backup Pointer to Open_tables_state instance which holds - information about tables which were open before we - decided to access mysql.proc. -*/ - -void close_proc_table(THD *thd, Open_tables_state *backup) -{ - close_thread_tables(thd); - thd->restore_backup_open_tables_state(backup); -} - - -/* Open the mysql.proc table for read. SYNOPSIS @@ -96,13 +78,6 @@ void close_proc_table(THD *thd, Open_tables_state *backup) currently open tables will be saved, and from which will be restored when we will end work with mysql.proc. - NOTES - Thanks to restrictions which we put on opening and locking of - this table for writing, we can open and lock it for reading - even when we already have some other tables open and locked. - One must call close_proc_table() to close table opened with - this call. - RETURN 0 Error # Pointer to TABLE object of mysql.proc @@ -110,38 +85,18 @@ void close_proc_table(THD *thd, Open_tables_state *backup) TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup) { - TABLE_LIST tables; - TABLE *table; - bool not_used; - DBUG_ENTER("open_proc_table"); - - thd->reset_n_backup_open_tables_state(backup); + DBUG_ENTER("open_proc_table_for_read"); - bzero((char*) &tables, sizeof(tables)); - tables.db= (char*) "mysql"; - tables.table_name= tables.alias= (char*)"proc"; - if (!(table= open_table(thd, &tables, thd->mem_root, ¬_used, - MYSQL_LOCK_IGNORE_FLUSH))) - { - thd->restore_backup_open_tables_state(backup); - DBUG_RETURN(0); - } - table->use_all_columns(); - - DBUG_ASSERT(table->s->system_table); + TABLE_LIST table; + bzero((char*) &table, sizeof(table)); + table.db= (char*) "mysql"; + table.table_name= table.alias= (char*)"proc"; + table.lock_type= TL_READ; - table->reginfo.lock_type= TL_READ; - /* - We have to ensure we are not blocked by a flush tables, as this - could lead to a deadlock if we have other tables opened. - */ - if (!(thd->lock= mysql_lock_tables(thd, &table, 1, - MYSQL_LOCK_IGNORE_FLUSH, ¬_used))) - { - close_proc_table(thd, backup); + if (!open_system_tables_for_read(thd, &table, backup)) + DBUG_RETURN(table.table); + else DBUG_RETURN(0); - } - DBUG_RETURN(table); } @@ -162,20 +117,15 @@ TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup) static TABLE *open_proc_table_for_update(THD *thd) { - TABLE_LIST tables; - TABLE *table; - DBUG_ENTER("open_proc_table"); - - bzero((char*) &tables, sizeof(tables)); - tables.db= (char*) "mysql"; - tables.table_name= tables.alias= (char*)"proc"; - tables.lock_type= TL_WRITE; + DBUG_ENTER("open_proc_table_for_update"); - table= open_ltable(thd, &tables, TL_WRITE); - if (table) - table->use_all_columns(); + TABLE_LIST table; + bzero((char*) &table, sizeof(table)); + table.db= (char*) "mysql"; + table.table_name= table.alias= (char*)"proc"; + table.lock_type= TL_WRITE; - DBUG_RETURN(table); + DBUG_RETURN(open_system_table_for_update(thd, &table)); } @@ -218,8 +168,7 @@ db_find_routine_aux(THD *thd, int type, sp_name *name, TABLE *table) key_copy(key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read_idx(table->record[0], 0, - key, table->key_info->key_length, + if (table->file->index_read_idx(table->record[0], 0, key, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) DBUG_RETURN(SP_KEY_NOT_FOUND); @@ -364,7 +313,7 @@ db_find_routine(THD *thd, int type, sp_name *name, sp_head **sphp) chistics.comment.str= ptr; chistics.comment.length= length; - close_proc_table(thd, &open_tables_state_backup); + close_system_tables(thd, &open_tables_state_backup); table= 0; ret= db_load_routine(thd, type, name, sphp, @@ -373,7 +322,7 @@ db_find_routine(THD *thd, int type, sp_name *name, sp_head **sphp) done: if (table) - close_proc_table(thd, &open_tables_state_backup); + close_system_tables(thd, &open_tables_state_backup); DBUG_RETURN(ret); } @@ -435,7 +384,7 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, if ((ret= sp_use_new_db(thd, name->m_db, &old_db, 1, &dbchanged))) goto end; - lex_start(thd, (uchar*)defstr.c_ptr(), defstr.length()); + lex_start(thd, defstr.c_ptr(), defstr.length()); thd->spcont= 0; if (MYSQLparse(thd) || thd->is_fatal_error || newlex.sphead == NULL) @@ -498,6 +447,13 @@ db_create_routine(THD *thd, int type, sp_head *sp) DBUG_PRINT("enter", ("type: %d name: %.*s",type,sp->m_name.length, sp->m_name.str)); + /* + This statement will be replicated as a statement, even when using + row-based replication. The flag will be reset at the end of the + statement. + */ + thd->clear_current_stmt_binlog_row_based(); + if (!(table= open_proc_table_for_update(thd))) ret= SP_OPEN_TABLE_FAILED; else @@ -634,6 +590,13 @@ db_drop_routine(THD *thd, int type, sp_name *name) DBUG_PRINT("enter", ("type: %d name: %.*s", type, name->m_name.length, name->m_name.str)); + /* + This statement will be replicated as a statement, even when using + row-based replication. The flag will be reset at the end of the + statement. + */ + thd->clear_current_stmt_binlog_row_based(); + if (!(table= open_proc_table_for_update(thd))) DBUG_RETURN(SP_OPEN_TABLE_FAILED); if ((ret= db_find_routine_aux(thd, type, name, table)) == SP_OK) @@ -666,6 +629,13 @@ db_update_routine(THD *thd, int type, sp_name *name, st_sp_chistics *chistics) DBUG_PRINT("enter", ("type: %d name: %.*s", type, name->m_name.length, name->m_name.str)); + /* + This statement will be replicated as a statement, even when using + row-based replication. The flag will be reset at the end of the + statement. + */ + thd->clear_current_stmt_binlog_row_based(); + if (!(table= open_proc_table_for_update(thd))) DBUG_RETURN(SP_OPEN_TABLE_FAILED); if ((ret= db_find_routine_aux(thd, type, name, table)) == SP_OK) @@ -904,7 +874,7 @@ sp_drop_db_routines(THD *thd, char *db) table->file->ha_index_init(0, 1); if (! table->file->index_read(table->record[0], (byte *)table->field[MYSQL_PROC_FIELD_DB]->ptr, - key_len, HA_READ_KEY_EXACT)) + (key_part_map)1, HA_READ_KEY_EXACT)) { int nxtres; bool deleted= FALSE; @@ -1126,7 +1096,7 @@ sp_routine_exists_in_table(THD *thd, int type, sp_name *name) { if ((ret= db_find_routine_aux(thd, type, name, table)) != SP_OK) ret= SP_KEY_NOT_FOUND; - close_proc_table(thd, &open_tables_state_backup); + close_system_tables(thd, &open_tables_state_backup); } return ret; } @@ -100,7 +100,6 @@ extern "C" byte* sp_sroutine_key(const byte *ptr, uint *plen, my_bool first); we already have some tables open and locked. */ TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup); -void close_proc_table(THD *thd, Open_tables_state *backup); /* diff --git a/sql/sp_head.cc b/sql/sp_head.cc index b77d0cc9a0c..8eeec741dcf 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -36,6 +36,7 @@ Item_result sp_map_result_type(enum enum_field_types type) { switch (type) { + case MYSQL_TYPE_BIT: case MYSQL_TYPE_TINY: case MYSQL_TYPE_SHORT: case MYSQL_TYPE_LONG: @@ -58,6 +59,7 @@ Item::Type sp_map_item_type(enum enum_field_types type) { switch (type) { + case MYSQL_TYPE_BIT: case MYSQL_TYPE_TINY: case MYSQL_TYPE_SHORT: case MYSQL_TYPE_LONG: @@ -488,7 +490,7 @@ sp_head::init(LEX *lex) { DBUG_ENTER("sp_head::init"); - lex->spcont= m_pcont= new sp_pcontext(NULL); + lex->spcont= m_pcont= new sp_pcontext(); /* Altough trg_table_fields list is used only in triggers we init for all @@ -539,15 +541,14 @@ void sp_head::init_strings(THD *thd, LEX *lex) { DBUG_ENTER("sp_head::init_strings"); - const uchar *endp; /* Used to trim the end */ + const char *endp; /* Used to trim the end */ /* During parsing, we must use thd->mem_root */ MEM_ROOT *root= thd->mem_root; if (m_param_begin && m_param_end) { m_params.length= m_param_end - m_param_begin; - m_params.str= strmake_root(root, - (char *)m_param_begin, m_params.length); + m_params.str= strmake_root(root, m_param_begin, m_params.length); } /* If ptr has overrun end_of_query then end_of_query is the end */ @@ -559,9 +560,9 @@ sp_head::init_strings(THD *thd, LEX *lex) endp= skip_rear_comments(m_body_begin, endp); m_body.length= endp - m_body_begin; - m_body.str= strmake_root(root, (char *)m_body_begin, m_body.length); + m_body.str= strmake_root(root, m_body_begin, m_body.length); m_defstr.length= endp - lex->buf; - m_defstr.str= strmake_root(root, (char *)lex->buf, m_defstr.length); + m_defstr.str= strmake_root(root, lex->buf, m_defstr.length); DBUG_VOID_RETURN; } @@ -1115,7 +1116,7 @@ sp_head::execute(THD *thd) case SP_HANDLER_CONTINUE: thd->restore_active_arena(&execute_arena, &backup_arena); thd->set_n_backup_active_arena(&execute_arena, &backup_arena); - ctx->push_hstack(ip); + ctx->push_hstack(i->get_cont_dest()); // Fall through default: ip= hip; @@ -1125,6 +1126,7 @@ sp_head::execute(THD *thd) thd->clear_error(); thd->is_fatal_error= 0; thd->killed= THD::NOT_KILLED; + thd->mysys_var->abort= 0; continue; } } @@ -1482,8 +1484,24 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount, if (need_binlog_call) { + query_id_t q; reset_dynamic(&thd->user_var_events); - mysql_bin_log.start_union_events(thd); + /* + In case of artificially constructed events for function calls + we have separate union for each such event and hence can't use + query_id of real calling statement as the start of all these + unions (this will break logic of replication of user-defined + variables). So we use artifical value which is guaranteed to + be greater than all query_id's of all statements belonging + to previous events/unions. + Possible alternative to this is logging of all function invocations + as one select and not resetting THD::user_var_events before + each invocation. + */ + VOID(pthread_mutex_lock(&LOCK_thread_count)); + q= global_query_id; + VOID(pthread_mutex_unlock(&LOCK_thread_count)); + mysql_bin_log.start_union_events(thd, q + 1); binlog_save_options= thd->options; thd->options&= ~OPTION_BIN_LOG; } @@ -2426,16 +2444,11 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp, m_lex->mark_as_requiring_prelocking(lex_query_tables_own_last); } } - + reinit_stmt_before_use(thd, m_lex); - /* - If requested check whenever we have access to tables in LEX's table list - and open and lock them before executing instructtions core function. - */ - if (open_tables && - (check_table_access(thd, SELECT_ACL, m_lex->query_tables, 0) || - open_and_lock_tables(thd, m_lex->query_tables))) - res= -1; + + if (open_tables) + res= instr->exec_open_and_lock_tables(thd, m_lex->query_tables); if (!res) { @@ -2487,6 +2500,29 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp, sp_instr class functions */ +int sp_instr::exec_open_and_lock_tables(THD *thd, TABLE_LIST *tables) +{ + int result; + + /* + Check whenever we have access to tables for this statement + and open and lock them before executing instructions core function. + */ + if (check_table_access(thd, SELECT_ACL, tables, 0) + || open_and_lock_tables(thd, tables)) + result= -1; + else + result= 0; + + return result; +} + +uint sp_instr::get_cont_dest() +{ + return (m_ip+1); +} + + int sp_instr::exec_core(THD *thd, uint *nextp) { DBUG_ASSERT(0); @@ -2672,6 +2708,15 @@ sp_instr_set_trigger_field::print(String *str) value->print(str); } +/* + sp_instr_opt_meta +*/ + +uint sp_instr_opt_meta::get_cont_dest() +{ + return m_cont_dest; +} + /* sp_instr_jump class functions @@ -2761,7 +2806,6 @@ sp_instr_jump_if_not::exec_core(THD *thd, uint *nextp) if (! it) { res= -1; - *nextp = m_cont_dest; } else { @@ -3330,7 +3374,6 @@ sp_instr_set_case_expr::exec_core(THD *thd, uint *nextp) spcont->clear_handler(); thd->spcont= spcont; } - *nextp= m_cont_dest; /* For continue handler */ } else *nextp= m_ip+1; diff --git a/sql/sp_head.h b/sql/sp_head.h index be6eefa2ea4..9aee9e9389e 100644 --- a/sql/sp_head.h +++ b/sql/sp_head.h @@ -107,8 +107,6 @@ public: /* Possible values of m_flags */ enum { HAS_RETURN= 1, // For FUNCTIONs only: is set if has RETURN - IN_SIMPLE_CASE= 2, // Is set if parsing a simple CASE - IN_HANDLER= 4, // Is set if the parser is in a handler body MULTI_RESULTS= 8, // Is set if a procedure with SELECT(s) CONTAINS_DYNAMIC_SQL= 16, // Is set if a procedure with PREPARE/EXECUTE IS_INVOKED= 32, // Is set if this sp_head is being used @@ -128,7 +126,7 @@ public: create_field m_return_field_def; /* This is used for FUNCTIONs only. */ - const uchar *m_tmp_query; // Temporary pointer to sub query string + const char *m_tmp_query; // Temporary pointer to sub query string st_sp_chistics *m_chistics; ulong m_sql_mode; // For SHOW CREATE and execution LEX_STRING m_qname; // db.name @@ -176,7 +174,7 @@ public: */ HASH m_sroutines; // Pointers set during parsing - const uchar *m_param_begin, *m_param_end, *m_body_begin; + const char *m_param_begin, *m_param_end, *m_body_begin; /* Security context for stored routine which should be run under @@ -468,15 +466,34 @@ public: thd Thread handle nextp OUT index of the next instruction to execute. (For most instructions this will be the instruction following this - one). - - RETURN - 0 on success, - other if some error occured + one). Note that this parameter is undefined in case of + errors, use get_cont_dest() to find the continuation + instruction for CONTINUE error handlers. + + RETURN + 0 on success, + other if some error occurred */ - + virtual int execute(THD *thd, uint *nextp) = 0; + /** + Execute <code>open_and_lock_tables()</code> for this statement. + Open and lock the tables used by this statement, as a pre-requisite + to execute the core logic of this instruction with + <code>exec_core()</code>. + @param thd the current thread + @param tables the list of tables to open and lock + @return zero on success, non zero on failure. + */ + int exec_open_and_lock_tables(THD *thd, TABLE_LIST *tables); + + /** + Get the continuation destination of this instruction. + @return the continuation destination + */ + virtual uint get_cont_dest(); + /* Execute core function of instruction after all preparations (e.g. setting of proper LEX, saving part of the thread context have been @@ -741,6 +758,8 @@ public: virtual void set_destination(uint old_dest, uint new_dest) = 0; + virtual uint get_cont_dest(); + protected: sp_instr *m_optdest; // Used during optimization diff --git a/sql/sp_pcontext.cc b/sql/sp_pcontext.cc index 6229cf14604..780243cc79f 100644 --- a/sql/sp_pcontext.cc +++ b/sql/sp_pcontext.cc @@ -25,6 +25,11 @@ #include "sp_pcontext.h" #include "sp_head.h" +/* Initial size for the dynamic arrays in sp_pcontext */ +#define PCONTEXT_ARRAY_INIT_ALLOC 16 +/* Increment size for the dynamic arrays in sp_pcontext */ +#define PCONTEXT_ARRAY_INCREMENT_ALLOC 8 + /* Sanity check for SQLSTATEs. Will not check if it's really an existing state (there are just too many), but will check length and bad characters. @@ -49,28 +54,61 @@ sp_cond_check(LEX_STRING *sqlstate) return TRUE; } -sp_pcontext::sp_pcontext(sp_pcontext *prev) - :Sql_alloc(), m_max_var_index(0), m_max_cursor_index(0), m_max_handler_index(0), - m_context_handlers(0), m_parent(prev), m_pboundary(0) +sp_pcontext::sp_pcontext() + : Sql_alloc(), + m_max_var_index(0), m_max_cursor_index(0), m_max_handler_index(0), + m_context_handlers(0), m_parent(NULL), m_pboundary(0), + m_label_scope(LABEL_DEFAULT_SCOPE) { - VOID(my_init_dynamic_array(&m_vars, sizeof(sp_variable_t *), 16, 8)); - VOID(my_init_dynamic_array(&m_case_expr_id_lst, sizeof(int), 16, 8)); - VOID(my_init_dynamic_array(&m_conds, sizeof(sp_cond_type_t *), 16, 8)); - VOID(my_init_dynamic_array(&m_cursors, sizeof(LEX_STRING), 16, 8)); - VOID(my_init_dynamic_array(&m_handlers, sizeof(sp_cond_type_t *), 16, 8)); + VOID(my_init_dynamic_array(&m_vars, sizeof(sp_variable_t *), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_case_expr_id_lst, sizeof(int), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_conds, sizeof(sp_cond_type_t *), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_cursors, sizeof(LEX_STRING), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_handlers, sizeof(sp_cond_type_t *), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); m_label.empty(); m_children.empty(); - if (!prev) - { - m_var_offset= m_cursor_offset= 0; - m_num_case_exprs= 0; - } - else - { - m_var_offset= prev->m_var_offset + prev->m_max_var_index; - m_cursor_offset= prev->current_cursor_count(); - m_num_case_exprs= prev->get_num_case_exprs(); - } + + m_var_offset= m_cursor_offset= 0; + m_num_case_exprs= 0; +} + +sp_pcontext::sp_pcontext(sp_pcontext *prev, label_scope_type label_scope) + : Sql_alloc(), + m_max_var_index(0), m_max_cursor_index(0), m_max_handler_index(0), + m_context_handlers(0), m_parent(prev), m_pboundary(0), + m_label_scope(label_scope) +{ + VOID(my_init_dynamic_array(&m_vars, sizeof(sp_variable_t *), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_case_expr_id_lst, sizeof(int), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_conds, sizeof(sp_cond_type_t *), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_cursors, sizeof(LEX_STRING), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_handlers, sizeof(sp_cond_type_t *), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + m_label.empty(); + m_children.empty(); + + m_var_offset= prev->m_var_offset + prev->m_max_var_index; + m_cursor_offset= prev->current_cursor_count(); + m_num_case_exprs= prev->get_num_case_exprs(); } void @@ -92,9 +130,9 @@ sp_pcontext::destroy() } sp_pcontext * -sp_pcontext::push_context() +sp_pcontext::push_context(label_scope_type label_scope) { - sp_pcontext *child= new sp_pcontext(this); + sp_pcontext *child= new sp_pcontext(this, label_scope); if (child) m_children.push_back(child); @@ -257,7 +295,15 @@ sp_pcontext::find_label(char *name) if (my_strcasecmp(system_charset_info, name, lab->name) == 0) return lab; - if (m_parent) + /* + Note about exception handlers. + See SQL:2003 SQL/PSM (ISO/IEC 9075-4:2003), + section 13.1 <compound statement>, + syntax rule 4. + In short, a DECLARE HANDLER block can not refer + to labels from the parent context, as they are out of scope. + */ + if (m_parent && (m_label_scope == LABEL_DEFAULT_SCOPE)) return m_parent->find_label(name); return NULL; } diff --git a/sql/sp_pcontext.h b/sql/sp_pcontext.h index b2cdd5e689c..5bffda79f98 100644 --- a/sql/sp_pcontext.h +++ b/sql/sp_pcontext.h @@ -88,16 +88,33 @@ typedef struct sp_cond sp_cond_type_t *val; } sp_cond_t; +/** + The scope of a label in Stored Procedures, + for name resolution of labels in a parsing context. +*/ +enum label_scope_type +{ + /** + The labels declared in a parent context are in scope. + */ + LABEL_DEFAULT_SCOPE, + /** + The labels declared in a parent context are not in scope. + */ + LABEL_HANDLER_SCOPE +}; -/* - The parse-time context, used to keep track on declared variables/parameters, +/** + The parse-time context, used to keep track of declared variables/parameters, conditions, handlers, cursors and labels, during parsing. sp_contexts are organized as a tree, with one object for each begin-end - block, plus a root-context for the parameters. + block, one object for each exception handler, + plus a root-context for the parameters. This is used during parsing for looking up defined names (e.g. declared variables and visible labels), for error checking, and to calculate offsets to be used at runtime. (During execution variable values, active handlers and cursors, etc, are referred to by an index in a stack.) + Parsing contexts for exception handlers limit the visibility of labels. The pcontext tree is also kept during execution and is used for error checking (e.g. correct number of parameters), and in the future, used by the debugger. @@ -105,21 +122,30 @@ typedef struct sp_cond class sp_pcontext : public Sql_alloc { - sp_pcontext(const sp_pcontext &); /* Prevent use of these */ - void operator=(sp_pcontext &); +public: - public: - - sp_pcontext(sp_pcontext *prev); + /** + Constructor. + Builds a parsing context root node. + */ + sp_pcontext(); // Free memory void destroy(); + /** + Create and push a new context in the tree. + @param label_scope label scope for the new parsing context + @return the node created + */ sp_pcontext * - push_context(); + push_context(label_scope_type label_scope); - // Returns the previous context, not the one we pop + /** + Pop a node from the parsing context tree. + @return the parent node + */ sp_pcontext * pop_context(); @@ -363,6 +389,13 @@ class sp_pcontext : public Sql_alloc protected: + /** + Constructor for a tree node. + @param prev the parent parsing context + @param label_scope label_scope for this parsing context + */ + sp_pcontext(sp_pcontext *prev, label_scope_type label_scope); + /* m_max_var_index -- number of variables (including all types of arguments) in this context including all children contexts. @@ -416,6 +449,14 @@ private: List<sp_pcontext> m_children; // Children contexts, used for destruction + /** + Scope of labels for this parsing context. + */ + label_scope_type m_label_scope; + +private: + sp_pcontext(const sp_pcontext &); /* Prevent use of these */ + void operator=(sp_pcontext &); }; // class sp_pcontext : public Sql_alloc diff --git a/sql/spatial.cc b/sql/spatial.cc index 6cadb0f3aad..e0680ed182c 100644 --- a/sql/spatial.cc +++ b/sql/spatial.cc @@ -216,23 +216,24 @@ static uint32 wkb_get_uint(const char *ptr, Geometry::wkbByteOrder bo) } -int Geometry::create_from_wkb(Geometry_buffer *buffer, - const char *wkb, uint32 len, String *res) +Geometry *Geometry::create_from_wkb(Geometry_buffer *buffer, + const char *wkb, uint32 len, String *res) { uint32 geom_type; Geometry *geom; if (len < WKB_HEADER_SIZE) - return 1; + return NULL; geom_type= wkb_get_uint(wkb+1, (wkbByteOrder)wkb[0]); if (!(geom= create_by_typeid(buffer, (int) geom_type)) || res->reserve(WKB_HEADER_SIZE, 512)) - return 1; + return NULL; res->q_append((char) wkb_ndr); res->q_append(geom_type); - return geom->init_from_wkb(wkb+WKB_HEADER_SIZE, len - WKB_HEADER_SIZE, - (wkbByteOrder) wkb[0], res); + + return geom->init_from_wkb(wkb + WKB_HEADER_SIZE, len - WKB_HEADER_SIZE, + (wkbByteOrder) wkb[0], res) ? geom : NULL; } diff --git a/sql/spatial.h b/sql/spatial.h index f0c8b7bba28..0c0452b5abc 100644 --- a/sql/spatial.h +++ b/sql/spatial.h @@ -246,8 +246,8 @@ public: static Geometry *create_from_wkt(Geometry_buffer *buffer, Gis_read_stream *trs, String *wkt, bool init_stream=1); - static int create_from_wkb(Geometry_buffer *buffer, - const char *wkb, uint32 len, String *res); + static Geometry *create_from_wkb(Geometry_buffer *buffer, const char *wkb, + uint32 len, String *res); int as_wkt(String *wkt, const char **end) { uint32 len= get_class_info()->m_name.length; diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 0d9653172e0..aaa88071173 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -1813,8 +1813,7 @@ static bool update_user_table(THD *thd, TABLE *table, table->key_info->key_length); if (table->file->index_read_idx(table->record[0], 0, - (byte *) user_key, - table->key_info->key_length, + (byte *) user_key, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { my_message(ER_PASSWORD_NO_MATCH, ER(ER_PASSWORD_NO_MATCH), @@ -1905,8 +1904,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, key_copy(user_key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read_idx(table->record[0], 0, - user_key, table->key_info->key_length, + if (table->file->index_read_idx(table->record[0], 0, user_key, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { /* what == 'N' means revoke */ @@ -2123,8 +2121,7 @@ static int replace_db_table(TABLE *table, const char *db, key_copy(user_key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read_idx(table->record[0],0, - user_key, table->key_info->key_length, + if (table->file->index_read_idx(table->record[0],0, user_key, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { if (what == 'N') @@ -2341,9 +2338,8 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) col_privs->field[4]->store("",0, &my_charset_latin1); col_privs->file->ha_index_init(0, 1); - if (col_privs->file->index_read(col_privs->record[0], - (byte*) key, - key_prefix_len, HA_READ_KEY_EXACT)) + if (col_privs->file->index_read(col_privs->record[0], (byte*) key, + (key_part_map)15, HA_READ_KEY_EXACT)) { cols = 0; /* purecov: deadcode */ col_privs->file->ha_index_end(); @@ -2479,7 +2475,7 @@ static int replace_column_table(GRANT_TABLE *g_t, table->field[3]->store(table_name,(uint) strlen(table_name), system_charset_info); - /* Get length of 3 first key parts */ + /* Get length of 4 first key parts */ key_prefix_length= (key_part[0].store_length + key_part[1].store_length + key_part[2].store_length + key_part[3].store_length); key_copy(key, table->record[0], table->key_info, key_prefix_length); @@ -2505,8 +2501,7 @@ static int replace_column_table(GRANT_TABLE *g_t, key_copy(user_key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read(table->record[0], user_key, - table->key_info->key_length, + if (table->file->index_read(table->record[0], user_key, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { if (revoke_grant) @@ -2582,8 +2577,7 @@ static int replace_column_table(GRANT_TABLE *g_t, key_copy(user_key, table->record[0], table->key_info, key_prefix_length); - if (table->file->index_read(table->record[0], user_key, - key_prefix_length, + if (table->file->index_read(table->record[0], user_key, (key_part_map)15, HA_READ_KEY_EXACT)) goto end; @@ -2684,8 +2678,7 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, key_copy(user_key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read_idx(table->record[0], 0, - user_key, table->key_info->key_length, + if (table->file->index_read_idx(table->record[0], 0, user_key, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { /* @@ -2803,13 +2796,13 @@ static int replace_routine_table(THD *thd, GRANT_NAME *grant_name, table->field[2]->store(combo.user.str,combo.user.length, &my_charset_latin1); table->field[3]->store(routine_name,(uint) strlen(routine_name), &my_charset_latin1); - table->field[4]->store((longlong)(is_proc ? + table->field[4]->store((longlong)(is_proc ? TYPE_ENUM_PROCEDURE : TYPE_ENUM_FUNCTION), TRUE); store_record(table,record[1]); // store at pos 1 - if (table->file->index_read_idx(table->record[0],0, - (byte*) table->field[0]->ptr,0, + if (table->file->index_read_idx(table->record[0], 0, + (byte*) table->field[0]->ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { /* @@ -3001,6 +2994,13 @@ bool mysql_table_grant(THD *thd, TABLE_LIST *table_list, tables[0].lock_type=tables[1].lock_type=tables[2].lock_type=TL_WRITE; tables[0].db=tables[1].db=tables[2].db=(char*) "mysql"; + /* + This statement will be replicated as a statement, even when using + row-based replication. The flag will be reset at the end of the + statement. + */ + thd->clear_current_stmt_binlog_row_based(); + #ifdef HAVE_REPLICATION /* GRANT and REVOKE are applied the slave in/exclusion rules as they are @@ -3218,6 +3218,13 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list, bool is_proc, tables[0].lock_type=tables[1].lock_type=TL_WRITE; tables[0].db=tables[1].db=(char*) "mysql"; + /* + This statement will be replicated as a statement, even when using + row-based replication. The flag will be reset at the end of the + statement. + */ + thd->clear_current_stmt_binlog_row_based(); + #ifdef HAVE_REPLICATION /* GRANT and REVOKE are applied the slave in/exclusion rules as they are @@ -3357,6 +3364,13 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, tables[0].lock_type=tables[1].lock_type=TL_WRITE; tables[0].db=tables[1].db=(char*) "mysql"; + /* + This statement will be replicated as a statement, even when using + row-based replication. The flag will be reset at the end of the + statement. + */ + thd->clear_current_stmt_binlog_row_based(); + #ifdef HAVE_REPLICATION /* GRANT and REVOKE are applied the slave in/exclusion rules as they are @@ -4987,7 +5001,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop, key_copy(user_key, table->record[0], table->key_info, key_prefix_length); if ((error= table->file->index_read_idx(table->record[0], 0, - user_key, key_prefix_length, + user_key, (key_part_map)3, HA_READ_KEY_EXACT))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) @@ -5169,6 +5183,8 @@ static int handle_grant_struct(uint struct_no, bool drop, user= grant_name->user; host= grant_name->host.hostname; break; + default: + assert(0); } if (! user) user= ""; @@ -5399,6 +5415,13 @@ bool mysql_create_user(THD *thd, List <LEX_USER> &list) TABLE_LIST tables[GRANT_TABLES]; DBUG_ENTER("mysql_create_user"); + /* + This statement will be replicated as a statement, even when using + row-based replication. The flag will be reset at the end of the + statement. + */ + thd->clear_current_stmt_binlog_row_based(); + /* CREATE USER may be skipped on replication client. */ if ((result= open_grant_tables(thd, tables))) DBUG_RETURN(result != 1); @@ -5471,6 +5494,13 @@ bool mysql_drop_user(THD *thd, List <LEX_USER> &list) TABLE_LIST tables[GRANT_TABLES]; DBUG_ENTER("mysql_drop_user"); + /* + This statement will be replicated as a statement, even when using + row-based replication. The flag will be reset at the end of the + statement. + */ + thd->clear_current_stmt_binlog_row_based(); + /* DROP USER may be skipped on replication client. */ if ((result= open_grant_tables(thd, tables))) DBUG_RETURN(result != 1); @@ -5535,6 +5565,13 @@ bool mysql_rename_user(THD *thd, List <LEX_USER> &list) TABLE_LIST tables[GRANT_TABLES]; DBUG_ENTER("mysql_rename_user"); + /* + This statement will be replicated as a statement, even when using + row-based replication. The flag will be reset at the end of the + statement. + */ + thd->clear_current_stmt_binlog_row_based(); + /* RENAME USER may be skipped on replication client. */ if ((result= open_grant_tables(thd, tables))) DBUG_RETURN(result != 1); @@ -5610,6 +5647,13 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list) TABLE_LIST tables[GRANT_TABLES]; DBUG_ENTER("mysql_revoke_all"); + /* + This statement will be replicated as a statement, even when using + row-based replication. The flag will be reset at the end of the + statement. + */ + thd->clear_current_stmt_binlog_row_based(); + if ((result= open_grant_tables(thd, tables))) DBUG_RETURN(result != 1); @@ -5800,6 +5844,13 @@ bool sp_revoke_privileges(THD *thd, const char *sp_db, const char *sp_name, rw_wrlock(&LOCK_grant); VOID(pthread_mutex_lock(&acl_cache->lock)); + /* + This statement will be replicated as a statement, even when using + row-based replication. The flag will be reset at the end of the + statement. + */ + thd->clear_current_stmt_binlog_row_based(); + /* Remove procedure access */ do { @@ -6028,6 +6079,8 @@ int fill_schema_user_privileges(THD *thd, TABLE_LIST *tables, COND *cond) char *curr_host= thd->security_ctx->priv_host_name(); DBUG_ENTER("fill_schema_user_privileges"); + if (!initialized) + DBUG_RETURN(0); pthread_mutex_lock(&acl_cache->lock); for (counter=0 ; counter < acl_users.elements ; counter++) @@ -6087,6 +6140,8 @@ int fill_schema_schema_privileges(THD *thd, TABLE_LIST *tables, COND *cond) char *curr_host= thd->security_ctx->priv_host_name(); DBUG_ENTER("fill_schema_schema_privileges"); + if (!initialized) + DBUG_RETURN(0); pthread_mutex_lock(&acl_cache->lock); for (counter=0 ; counter < acl_dbs.elements ; counter++) diff --git a/sql/sql_acl.h b/sql/sql_acl.h index 86d2cabc703..cba283ec65b 100644 --- a/sql/sql_acl.h +++ b/sql/sql_acl.h @@ -47,8 +47,7 @@ don't forget to update 1. static struct show_privileges_st sys_privileges[] 2. static const char *command_array[] and static uint command_lengths[] - 3. mysql_create_system_tables.sh, mysql_fix_privilege_tables.sql - and mysql-test/lib/init_db.sql + 3. mysql_system_tables.sql and mysql_system_tables_fix.sql 4. acl_init() or whatever - to define behaviour for old privilege tables 5. sql_yacc.yy - for GRANT/REVOKE to work */ diff --git a/sql/sql_base.cc b/sql/sql_base.cc index fd2e8445132..82cce335f00 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -28,6 +28,59 @@ #include <io.h> #endif +/** + This internal handler is used to trap internally + errors that can occur when executing open table + during the prelocking phase. +*/ +class Prelock_error_handler : public Internal_error_handler +{ +public: + Prelock_error_handler() + : m_handled_errors(0), m_unhandled_errors(0) + {} + + virtual ~Prelock_error_handler() {} + + virtual bool handle_error(uint sql_errno, + MYSQL_ERROR::enum_warning_level level, + THD *thd); + + bool safely_trapped_errors(); + +private: + int m_handled_errors; + int m_unhandled_errors; +}; + + +bool +Prelock_error_handler::handle_error(uint sql_errno, + MYSQL_ERROR::enum_warning_level /* level */, + THD * /* thd */) +{ + if (sql_errno == ER_NO_SUCH_TABLE) + { + m_handled_errors++; + return TRUE; // 'TRUE', as per coding style + } + + m_unhandled_errors++; + return FALSE; // 'FALSE', as per coding style +} + + +bool Prelock_error_handler::safely_trapped_errors() +{ + /* + If m_unhandled_errors != 0, something else, unanticipated, happened, + so the error is not trapped but returned to the caller. + Multiple ER_NO_SUCH_TABLE can be raised in case of views. + */ + return ((m_handled_errors > 0) && (m_unhandled_errors == 0)); +} + + TABLE *unused_tables; /* Used by mysql_test */ HASH open_cache; /* Used by mysql_test */ static HASH table_def_cache; @@ -805,6 +858,7 @@ void free_io_cache(TABLE *table) DBUG_VOID_RETURN; } + /* Close all tables which aren't in use by any thread @@ -917,6 +971,71 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, /* + Close all tables which match specified connection string or + if specified string is NULL, then any table with a connection string. +*/ + +bool close_cached_connection_tables(THD *thd, bool if_wait_for_refresh, + LEX_STRING *connection, bool have_lock) +{ + uint idx; + TABLE_LIST tmp, *tables= NULL; + bool result= FALSE; + DBUG_ENTER("close_cached_connections"); + DBUG_ASSERT(thd); + + bzero(&tmp, sizeof(TABLE_LIST)); + + if (!have_lock) + VOID(pthread_mutex_lock(&LOCK_open)); + + for (idx= 0; idx < table_def_cache.records; idx++) + { + TABLE_SHARE *share= (TABLE_SHARE *) hash_element(&table_def_cache, idx); + + /* Ignore if table is not open or does not have a connect_string */ + if (!share->connect_string.length || !share->ref_count) + continue; + + /* Compare the connection string */ + if (connection && + (connection->length > share->connect_string.length || + (connection->length < share->connect_string.length && + (share->connect_string.str[connection->length] != '/' && + share->connect_string.str[connection->length] != '\\')) || + strncasecmp(connection->str, share->connect_string.str, + connection->length))) + continue; + + /* close_cached_tables() only uses these elements */ + tmp.db= share->db.str; + tmp.table_name= share->table_name.str; + tmp.next_local= tables; + + tables= (TABLE_LIST *) memdup_root(thd->mem_root, (char*)&tmp, + sizeof(TABLE_LIST)); + } + + if (tables) + result= close_cached_tables(thd, FALSE, tables, TRUE); + + if (!have_lock) + VOID(pthread_mutex_unlock(&LOCK_open)); + + if (if_wait_for_refresh) + { + pthread_mutex_lock(&thd->mysys_var->mutex); + thd->mysys_var->current_mutex= 0; + thd->mysys_var->current_cond= 0; + thd->proc_info=0; + pthread_mutex_unlock(&thd->mysys_var->mutex); + } + + DBUG_RETURN(result); +} + + +/* Mark all tables in the list which were used by current substatement as free for reuse. @@ -1078,7 +1197,6 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived) if (!thd->active_transaction()) thd->transaction.xid_state.xid.null(); - /* VOID(pthread_sigmask(SIG_SETMASK,&thd->block_signals,NULL)); */ if (!lock_in_use) VOID(pthread_mutex_lock(&LOCK_open)); @@ -1142,12 +1260,7 @@ bool close_thread_table(THD *thd, TABLE **table_ptr) } else { - if (table->s->flush_version != flush_version) - { - table->s->flush_version= flush_version; - table->file->extra(HA_EXTRA_FLUSH); - } - // Free memory and reset for next loop + /* Free memory and reset for next loop */ table->file->ha_reset(); table->in_use=0; if (unused_tables) @@ -1208,11 +1321,12 @@ void close_temporary_tables(THD *thd) const char stub[]= "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS "; uint stub_len= sizeof(stub) - 1; char buf[256]; - memcpy(buf, stub, stub_len); String s_query= String(buf, sizeof(buf), system_charset_info); - bool found_user_tables= false; + bool found_user_tables= FALSE; LINT_INIT(next); + memcpy(buf, stub, stub_len); + /* insertion sort of temp tables by pseudo_thread_id to build ordered list of sublists of equal pseudo_thread_id @@ -1263,10 +1377,13 @@ void close_temporary_tables(THD *thd) { if (is_user_table(table)) { + my_thread_id save_pseudo_thread_id= thd->variables.pseudo_thread_id; /* Set pseudo_thread_id to be that of the processed table */ thd->variables.pseudo_thread_id= tmpkeyval(thd, table); - /* Loop forward through all tables within the sublist of - common pseudo_thread_id to create single DROP query */ + /* + Loop forward through all tables within the sublist of + common pseudo_thread_id to create single DROP query. + */ for (s_query.length(stub_len); table && is_user_table(table) && tmpkeyval(thd, table) == thd->variables.pseudo_thread_id; @@ -1277,10 +1394,10 @@ void close_temporary_tables(THD *thd) due to special characters in the names */ append_identifier(thd, &s_query, table->s->db.str, strlen(table->s->db.str)); - s_query.q_append('.'); + s_query.append('.'); append_identifier(thd, &s_query, table->s->table_name.str, strlen(table->s->table_name.str)); - s_query.q_append(','); + s_query.append(','); next= table->next; close_temporary(table, 1, 1); } @@ -1292,16 +1409,18 @@ void close_temporary_tables(THD *thd) 0, FALSE); thd->variables.character_set_client= cs_save; /* - Imagine the thread had created a temp table, then was doing a SELECT, and - the SELECT was killed. Then it's not clever to mark the statement above as - "killed", because it's not really a statement updating data, and there - are 99.99% chances it will succeed on slave. - If a real update (one updating a persistent table) was killed on the - master, then this real update will be logged with error_code=killed, - rightfully causing the slave to stop. + Imagine the thread had created a temp table, then was doing a + SELECT, and the SELECT was killed. Then it's not clever to + mark the statement above as "killed", because it's not really + a statement updating data, and there are 99.99% chances it + will succeed on slave. If a real update (one updating a + persistent table) was killed on the master, then this real + update will be logged with error_code=killed, rightfully + causing the slave to stop. */ qinfo.error_code= 0; mysql_bin_log.write(&qinfo); + thd->variables.pseudo_thread_id= save_pseudo_thread_id; } else { @@ -1357,6 +1476,7 @@ TABLE_LIST *find_table_in_list(TABLE_LIST *table, thd thread handle table table which should be checked table_list list of tables + check_alias whether to check tables' aliases NOTE: to exclude derived tables from check we use following mechanism: a) during derived table processing set THD::derived_tables_processing @@ -1384,10 +1504,11 @@ TABLE_LIST *find_table_in_list(TABLE_LIST *table, 0 if table is unique */ -TABLE_LIST* unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list) +TABLE_LIST* unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list, + bool check_alias) { TABLE_LIST *res; - const char *d_name, *t_name; + const char *d_name, *t_name, *t_alias; DBUG_ENTER("unique_table"); DBUG_PRINT("enter", ("table alias: %s", table->alias)); @@ -1415,6 +1536,7 @@ TABLE_LIST* unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list) } d_name= table->db; t_name= table->table_name; + t_alias= table->alias; DBUG_PRINT("info", ("real table: %s.%s", d_name, t_name)); for (;;) @@ -1422,6 +1544,9 @@ TABLE_LIST* unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list) if (((! (res= find_table_in_global_list(table_list, d_name, t_name))) && (! (res= mysql_lock_have_duplicate(thd, table, table_list)))) || ((!res->table || res->table != table->table) && + (!check_alias || !(lower_case_table_names ? + my_strcasecmp(files_charset_info, t_alias, res->alias) : + strcmp(t_alias, res->alias))) && res->select_lex && !res->select_lex->exclude_from_table_unique_test && !res->prelocking_placeholder)) break; @@ -1519,9 +1644,15 @@ TABLE *find_temporary_table(THD *thd, TABLE_LIST *table_list) { if (table->s->table_cache_key.length == key_length && !memcmp(table->s->table_cache_key.str, key, key_length)) + { + DBUG_PRINT("info", + ("Found table. server_id: %u pseudo_thread_id: %lu", + (uint) thd->server_id, + (ulong) thd->variables.pseudo_thread_id)); DBUG_RETURN(table); + } } - DBUG_RETURN(0); // Not a temporary table + DBUG_RETURN(0); // Not a temporary table } @@ -1777,7 +1908,6 @@ bool reopen_name_locked_table(THD* thd, TABLE_LIST* table_list) share= table->s; share->version=0; - share->flush_version=0; table->in_use = thd; check_unused(); table->next = thd->open_tables; @@ -1787,8 +1917,6 @@ bool reopen_name_locked_table(THD* thd, TABLE_LIST* table_list) table->const_table=0; table->null_row= table->maybe_null= table->force_index= 0; table->status=STATUS_NO_RECORD; - table->keys_in_use_for_query= share->keys_in_use; - table->used_keys= share->keys_for_keyread; DBUG_RETURN(FALSE); } @@ -1845,6 +1973,13 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, key_length= (create_table_def_key(thd, key, table_list, 1) - TMP_TABLE_KEY_EXTRA); + /* + Unless requested otherwise, try to resolve this table in the list + of temporary tables of this thread. In MySQL temporary tables + are always thread-local and "shadow" possible base tables with the + same name. This block implements the behaviour. + TODO: move this block into a separate function. + */ if (!table_list->skip_temporary) { for (table= thd->temporary_tables; table ; table=table->next) @@ -1854,9 +1989,19 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, !memcmp(table->s->table_cache_key.str, key, key_length + TMP_TABLE_KEY_EXTRA)) { + /* + We're trying to use the same temporary table twice in a query. + Right now we don't support this because a temporary table + is always represented by only one TABLE object in THD, and + it can not be cloned. Emit an error for an unsupported behaviour. + */ if (table->query_id == thd->query_id || thd->prelocked_mode && table->query_id) { + DBUG_PRINT("error", + ("query_id: %lu server_id: %u pseudo_thread_id: %lu", + (ulong) table->query_id, (uint) thd->server_id, + (ulong) thd->variables.pseudo_thread_id)); my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->alias); DBUG_RETURN(0); } @@ -1869,6 +2014,13 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, } } + /* + The table is not temporary - if we're in pre-locked or LOCK TABLES + mode, let's try to find the requested table in the list of pre-opened + and locked tables. If the table is not there, return an error - we can't + open not pre-opened tables in pre-locked/LOCK TABLES mode. + TODO: move this block into a separate function. + */ if (!(flags & MYSQL_OPEN_IGNORE_LOCKED_TABLES) && (thd->locked_tables || thd->prelocked_mode)) { // Using table locks @@ -1940,7 +2092,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, goto reset; } /* - is it view? + Is this table a view and not a base table? (it is work around to allow to open view with locked tables, real fix will be made after definition cache will be made) */ @@ -1968,12 +2120,39 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, VOID(pthread_mutex_unlock(&LOCK_open)); } } - my_error(ER_TABLE_NOT_LOCKED, MYF(0), alias); + if ((thd->locked_tables) && (thd->locked_tables->lock_count > 0)) + my_error(ER_TABLE_NOT_LOCKED, MYF(0), alias); + else + my_error(ER_NO_SUCH_TABLE, MYF(0), table_list->db, table_list->alias); DBUG_RETURN(0); } + /* + Non pre-locked/LOCK TABLES mode, and the table is not temporary: + this is the normal use case. + Now we should: + - try to find the table in the table cache. + - if one of the discovered TABLE instances is name-locked + (table->s->version == 0) or some thread has started FLUSH TABLES + (refresh_version > table->s->version), back off -- we have to wait + until no one holds a name lock on the table. + - if there is no such TABLE in the name cache, read the table definition + and insert it into the cache. + We perform all of the above under LOCK_open which currently protects + the open cache (also known as table cache) and table definitions stored + on disk. + */ + VOID(pthread_mutex_lock(&LOCK_open)); + /* + If it's the first table from a list of tables used in a query, + remember refresh_version (the version of open_cache state). + If the version changes while we're opening the remaining tables, + we will have to back off, close all the tables opened-so-far, + and try to reopen them. + Note: refresh_version is currently changed only during FLUSH TABLES. + */ if (!thd->open_tables) thd->version=refresh_version; else if ((thd->version != refresh_version) && @@ -1990,6 +2169,16 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, if (thd->handler_tables) mysql_ha_flush(thd, (TABLE_LIST*) NULL, MYSQL_HA_REOPEN_ON_USAGE, TRUE); + /* + Actually try to find the table in the open_cache. + The cache may contain several "TABLE" instances for the same + physical table. The instances that are currently "in use" by + some thread have their "in_use" member != NULL. + There is no good reason for having more than one entry in the + hash for the same physical table, except that we use this as + an implicit "pending locks queue" - see + wait_for_locked_table_names for details. + */ for (table= (TABLE*) hash_first(&open_cache, (byte*) key, key_length, &state); table && table->in_use ; @@ -1999,6 +2188,21 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, /* Here we flush tables marked for flush. However we never flush log tables here. They are flushed only on FLUSH LOGS. + Normally, table->s->version contains the value of + refresh_version from the moment when this table was + (re-)opened and added to the cache. + If since then we did (or just started) FLUSH TABLES + statement, refresh_version has been increased. + For "name-locked" TABLE instances, table->s->version is set + to 0 (see lock_table_name for details). + In case there is a pending FLUSH TABLES or a name lock, we + need to back off and re-start opening tables. + If we do not back off now, we may dead lock in case of lock + order mismatch with some other thread: + c1: name lock t1; -- sort of exclusive lock + c2: open t2; -- sort of shared lock + c1: name lock t2; -- blocks + c2: open t1; -- blocks */ if (table->s->version != refresh_version && !table->s->log_table) { @@ -2014,16 +2218,35 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, } /* - There is a refresh in progress for this table - Wait until the table is freed or the thread is killed. + Back off, part 1: mark the table as "unused" for the + purpose of name-locking by setting table->db_stat to 0. Do + that only for the tables in this thread that have an old + table->s->version (this is an optimization (?)). + table->db_stat == 0 signals wait_for_locked_table_names + that the tables in question are not used any more. See + table_is_used call for details. */ close_old_data_files(thd,thd->open_tables,0,0); + /* + Back-off part 2: try to avoid "busy waiting" on the table: + if the table is in use by some other thread, we suspend + and wait till the operation is complete: when any + operation that juggles with table->s->version completes, + it broadcasts COND_refresh condition variable. + */ if (table->in_use != thd) + { + /* wait_for_conditionwill unlock LOCK_open for us */ wait_for_condition(thd, &LOCK_open, &COND_refresh); + } else { VOID(pthread_mutex_unlock(&LOCK_open)); } + /* + There is a refresh in progress for this table. + Signal the caller that it has to try again. + */ if (refresh) *refresh=1; DBUG_RETURN(0); @@ -2031,6 +2254,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, } if (table) { + /* Unlink the table from "unused_tables" list. */ if (table == unused_tables) { // First unused unused_tables=unused_tables->next; // Remove from link @@ -2043,6 +2267,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, } else { + /* Insert a new TABLE instance into the open cache */ int error; /* Free cache if too big */ while (open_cache.records > table_cache_size && unused_tables) @@ -2110,11 +2335,12 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, table->const_table=0; table->null_row= table->maybe_null= table->force_index= 0; table->status=STATUS_NO_RECORD; - table->keys_in_use_for_query= table->s->keys_in_use; table->insert_values= 0; - table->used_keys= table->s->keys_for_keyread; table->fulltext_searched= 0; table->file->ft_handler= 0; + /* Catch wrong handling of the auto_increment_field_not_null. */ + DBUG_ASSERT(!table->auto_increment_field_not_null); + table->auto_increment_field_not_null= FALSE; if (table->timestamp_field) table->timestamp_field_type= table->timestamp_field->get_auto_set_type(); table->pos_in_table_list= table_list; @@ -2197,8 +2423,6 @@ static bool reopen_table(TABLE *table) tmp.null_row= table->null_row; tmp.maybe_null= table->maybe_null; tmp.status= table->status; - tmp.keys_in_use_for_query= tmp.s->keys_in_use; - tmp.used_keys= tmp.s->keys_for_keyread; tmp.s->table_map_id= table->s->table_map_id; @@ -2851,6 +3075,8 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags) MEM_ROOT new_frm_mem; /* Also used for indicating that prelocking is need */ TABLE_LIST **query_tables_last_own; + bool safe_to_ignore_table; + DBUG_ENTER("open_tables"); /* temporary mem_root for new .frm parsing. @@ -2897,8 +3123,13 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags) } } + /* + For every table in the list of tables to open, try to find or open + a table. + */ for (tables= *start; tables ;tables= tables->next_global) { + safe_to_ignore_table= FALSE; // 'FALSE', as per coding style /* Ignore placeholders for derived tables. After derived tables processing, link to created temporary table will be put here. @@ -2911,6 +3142,12 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags) goto process_view_routines; continue; } + /* + If this TABLE_LIST object is a placeholder for an information_schema + table, create a temporary table to represent the information_schema + table in the query. Do not fill it yet - will be filled during + execution. + */ if (tables->schema_table) { if (!mysql_schema_table(thd, thd->lex, tables)) @@ -2918,9 +3155,32 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags) DBUG_RETURN(-1); } (*counter)++; - - if (!tables->table && - !(tables->table= open_table(thd, tables, &new_frm_mem, &refresh, flags))) + + /* + Not a placeholder: must be a base table or a view, and the table is + not opened yet. Try to open the table. + */ + if (!tables->table) + { + if (tables->prelocking_placeholder) + { + /* + For the tables added by the pre-locking code, attempt to open + the table but fail silently if the table does not exist. + The real failure will occur when/if a statement attempts to use + that table. + */ + Prelock_error_handler prelock_handler; + thd->push_internal_handler(& prelock_handler); + tables->table= open_table(thd, tables, &new_frm_mem, &refresh, flags); + thd->pop_internal_handler(); + safe_to_ignore_table= prelock_handler.safely_trapped_errors(); + } + else + tables->table= open_table(thd, tables, &new_frm_mem, &refresh, flags); + } + + if (!tables->table) { free_root(&new_frm_mem, MYF(MY_KEEP_PREALLOC)); @@ -2971,6 +3231,14 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags) close_tables_for_reopen(thd, start); goto restart; } + + if (safe_to_ignore_table) + { + DBUG_PRINT("info", ("open_table: ignoring table '%s'.'%s'", + tables->db, tables->alias)); + continue; + } + result= -1; // Fatal error break; } @@ -3025,7 +3293,7 @@ process_view_routines: { /* Serious error during reading stored routines from mysql.proc table. - Something's wrong with the table or its contents, and an error has + Something is wrong with the table or its contents, and an error has been emitted; we must abort. */ result= -1; @@ -3274,7 +3542,7 @@ bool open_normal_and_derived_tables(THD *thd, TABLE_LIST *tables, uint flags) static void mark_real_tables_as_free_for_reuse(TABLE_LIST *table) { for (; table; table= table->next_global) - if (!table->placeholder() && !table->schema_table) + if (!table->placeholder()) table->table->query_id= 0; } @@ -3347,7 +3615,7 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count, bool *need_reopen) DBUG_RETURN(-1); for (table= tables; table; table= table->next_global) { - if (!table->placeholder() && !table->schema_table) + if (!table->placeholder()) *(ptr++)= table->table; } @@ -3400,7 +3668,7 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count, bool *need_reopen) for (table= tables; table != first_not_own; table= table->next_global) { - if (!table->placeholder() && !table->schema_table) + if (!table->placeholder()) { table->table->query_id= thd->query_id; if (check_lock_and_start_stmt(thd, table->table, table->lock_type)) @@ -3427,7 +3695,7 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count, bool *need_reopen) TABLE_LIST *first_not_own= thd->lex->first_not_own_table(); for (table= tables; table != first_not_own; table= table->next_global) { - if (!table->placeholder() && !table->schema_table && + if (!table->placeholder() && check_lock_and_start_stmt(thd, table->table, table->lock_type)) { ha_rollback_stmt(thd); @@ -3507,8 +3775,11 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, uint key_length; TABLE_LIST table_list; DBUG_ENTER("open_temporary_table"); - DBUG_PRINT("enter", ("table: '%s'.'%s' path: '%s'", - db, table_name, path)); + DBUG_PRINT("enter", + ("table: '%s'.'%s' path: '%s' server_id: %u " + "pseudo_thread_id: %lu", + db, table_name, path, + (uint) thd->server_id, (ulong) thd->variables.pseudo_thread_id)); table_list.db= (char*) db; table_list.table_name= (char*) table_name; @@ -3544,7 +3815,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, tmp_table->reginfo.lock_type= TL_WRITE; // Simulate locked share->tmp_table= (tmp_table->file->has_transactions() ? - TRANSACTIONAL_TMP_TABLE : TMP_TABLE); + TRANSACTIONAL_TMP_TABLE : NON_TRANSACTIONAL_TMP_TABLE); if (link_in_list) { @@ -3612,7 +3883,7 @@ static void update_field_dependencies(THD *thd, Field *field, TABLE *table) been set for all fields (for example for view). */ - table->used_keys.intersect(field->part_of_key); + table->covering_keys.intersect(field->part_of_key); table->merge_keys.merge(field->part_of_key); if (thd->mark_used_columns == MARK_COLUMNS_READ) @@ -3795,6 +4066,7 @@ find_field_in_natural_join(THD *thd, TABLE_LIST *table_ref, const char *name, if (nj_col->view_field) { Item *item; + LINT_INIT(arena); if (register_tree_change) arena= thd->activate_stmt_arena_if_needed(&backup); /* @@ -3978,6 +4250,9 @@ find_field_in_table_ref(THD *thd, TABLE_LIST *table_list, { Field *fld; DBUG_ENTER("find_field_in_table_ref"); + DBUG_ASSERT(table_list->alias); + DBUG_ASSERT(name); + DBUG_ASSERT(item_name); DBUG_PRINT("enter", ("table: '%s' field name: '%s' item name: '%s' ref 0x%lx", table_list->alias, name, item_name, (ulong) ref)); @@ -4371,10 +4646,13 @@ find_field_in_tables(THD *thd, Item_ident *item, return not_found_item, report other errors, return 0 IGNORE_ERRORS Do not report errors, return 0 if error - unaliased Set to true if item is field which was found - by original field name and not by its alias - in item list. Set to false otherwise. - + resolution Set to the resolution type if the item is found + (it says whether the item is resolved + against an alias name, + or as a field name without alias, + or as a field hidden by alias, + or ignoring alias) + RETURN VALUES 0 Item is not found or item is not unique, error message is reported @@ -4390,7 +4668,8 @@ Item **not_found_item= (Item**) 0x1; Item ** find_item_in_list(Item *find, List<Item> &items, uint *counter, - find_item_error_report_type report_error, bool *unaliased) + find_item_error_report_type report_error, + enum_resolution_type *resolution) { List_iterator<Item> li(items); Item **found=0, **found_unaliased= 0, *item; @@ -4404,10 +4683,9 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter, */ bool is_ref_by_name= 0; uint unaliased_counter; - LINT_INIT(unaliased_counter); // Dependent on found_unaliased - *unaliased= FALSE; + *resolution= NOT_RESOLVED; is_ref_by_name= (find->type() == Item::FIELD_ITEM || find->type() == Item::REF_ITEM); @@ -4474,63 +4752,77 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter, } found_unaliased= li.ref(); unaliased_counter= i; + *resolution= RESOLVED_IGNORING_ALIAS; if (db_name) break; // Perfect match } } - else if (!my_strcasecmp(system_charset_info, item_field->name, - field_name)) - { - /* - If table name was not given we should scan through aliases - (or non-aliased fields) first. We are also checking unaliased - name of the field in then next else-if, to be able to find - instantly field (hidden by alias) if no suitable alias (or - non-aliased field) was found. - */ - if (found) - { - if ((*found)->eq(item, 0)) - continue; // Same field twice - if (report_error != IGNORE_ERRORS) - my_error(ER_NON_UNIQ_ERROR, MYF(0), - find->full_name(), current_thd->where); - return (Item**) 0; - } - found= li.ref(); - *counter= i; - } - else if (!my_strcasecmp(system_charset_info, item_field->field_name, - field_name)) + else { - /* - We will use un-aliased field or react on such ambiguities only if - we won't be able to find aliased field. - Again if we have ambiguity with field outside of select list - we should prefer fields from select list. - */ - if (found_unaliased) + int fname_cmp= my_strcasecmp(system_charset_info, + item_field->field_name, + field_name); + if (!my_strcasecmp(system_charset_info, + item_field->name,field_name)) { - if ((*found_unaliased)->eq(item, 0)) - continue; // Same field twice - found_unaliased_non_uniq= 1; + /* + If table name was not given we should scan through aliases + and non-aliased fields first. We are also checking unaliased + name of the field in then next else-if, to be able to find + instantly field (hidden by alias) if no suitable alias or + non-aliased field was found. + */ + if (found) + { + if ((*found)->eq(item, 0)) + continue; // Same field twice + if (report_error != IGNORE_ERRORS) + my_error(ER_NON_UNIQ_ERROR, MYF(0), + find->full_name(), current_thd->where); + return (Item**) 0; + } + found= li.ref(); + *counter= i; + *resolution= fname_cmp ? RESOLVED_AGAINST_ALIAS: + RESOLVED_WITH_NO_ALIAS; } - else + else if (!fname_cmp) { + /* + We will use non-aliased field or react on such ambiguities only if + we won't be able to find aliased field. + Again if we have ambiguity with field outside of select list + we should prefer fields from select list. + */ + if (found_unaliased) + { + if ((*found_unaliased)->eq(item, 0)) + continue; // Same field twice + found_unaliased_non_uniq= 1; + } found_unaliased= li.ref(); unaliased_counter= i; } } } - else if (!table_name && (find->eq(item,0) || - is_ref_by_name && find->name && item->name && - !my_strcasecmp(system_charset_info, - item->name,find->name))) - { - found= li.ref(); - *counter= i; - break; - } + else if (!table_name) + { + if (is_ref_by_name && find->name && item->name && + !my_strcasecmp(system_charset_info,item->name,find->name)) + { + found= li.ref(); + *counter= i; + *resolution= RESOLVED_AGAINST_ALIAS; + break; + } + else if (find->eq(item,0)) + { + found= li.ref(); + *counter= i; + *resolution= RESOLVED_IGNORING_ALIAS; + break; + } + } } if (!found) { @@ -4545,7 +4837,7 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter, { found= found_unaliased; *counter= unaliased_counter; - *unaliased= TRUE; + *resolution= RESOLVED_BEHIND_ALIAS; } } if (found) @@ -4831,7 +5123,7 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2, TABLE *table_1= nj_col_1->table_ref->table; /* Mark field_1 used for table cache. */ bitmap_set_bit(table_1->read_set, field_1->field_index); - table_1->used_keys.intersect(field_1->part_of_key); + table_1->covering_keys.intersect(field_1->part_of_key); table_1->merge_keys.merge(field_1->part_of_key); } if (field_2) @@ -4839,7 +5131,7 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2, TABLE *table_2= nj_col_2->table_ref->table; /* Mark field_2 used for table cache. */ bitmap_set_bit(table_2->read_set, field_2->field_index); - table_2->used_keys.intersect(field_2->part_of_key); + table_2->covering_keys.intersect(field_2->part_of_key); table_2->merge_keys.merge(field_2->part_of_key); } @@ -5280,7 +5572,8 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields, Item_int do not need fix_fields() because it is basic constant. */ - it.replace(new Item_int("Not_used", (longlong) 1, 21)); + it.replace(new Item_int("Not_used", (longlong) 1, + MY_INT64_NUM_DECIMAL_DIGITS)); } else if (insert_fields(thd, ((Item_field*) item)->context, ((Item_field*) item)->db_name, @@ -5327,6 +5620,7 @@ bool setup_fields(THD *thd, Item **ref_pointer_array, enum_mark_columns save_mark_used_columns= thd->mark_used_columns; nesting_map save_allow_sum_func= thd->lex->allow_sum_func; List_iterator<Item> it(fields); + bool save_is_item_list_lookup; DBUG_ENTER("setup_fields"); thd->mark_used_columns= mark_used_columns; @@ -5334,6 +5628,8 @@ bool setup_fields(THD *thd, Item **ref_pointer_array, if (allow_sum_func) thd->lex->allow_sum_func|= 1 << thd->lex->current_select->nest_level; thd->where= THD::DEFAULT_WHERE; + save_is_item_list_lookup= thd->lex->current_select->is_item_list_lookup; + thd->lex->current_select->is_item_list_lookup= 0; /* To prevent fail on forward lookup we fill it with zerows, @@ -5356,6 +5652,7 @@ bool setup_fields(THD *thd, Item **ref_pointer_array, if (!item->fixed && item->fix_fields(thd, it.ref()) || (item= *(it.ref()))->check_cols(1)) { + thd->lex->current_select->is_item_list_lookup= save_is_item_list_lookup; thd->lex->allow_sum_func= save_allow_sum_func; thd->mark_used_columns= save_mark_used_columns; DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns)); @@ -5369,6 +5666,7 @@ bool setup_fields(THD *thd, Item **ref_pointer_array, thd->used_tables|= item->used_tables(); thd->lex->current_select->cur_pos_in_select_list++; } + thd->lex->current_select->is_item_list_lookup= save_is_item_list_lookup; thd->lex->current_select->cur_pos_in_select_list= UNDEF_POS; thd->lex->allow_sum_func= save_allow_sum_func; @@ -5443,21 +5741,8 @@ bool setup_tables(THD *thd, Name_resolution_context *context, uint tablenr= 0; DBUG_ENTER("setup_tables"); - /* - Due to the various call paths that lead to setup_tables() it may happen - that context->table_list and context->first_name_resolution_table can be - NULL (this is typically done when creating TABLE_LISTs internally). - TODO: - Investigate all cases when this my happen, initialize the name resolution - context correctly in all those places, and remove the context reset below. - */ - if (!context->table_list || !context->first_name_resolution_table) - { - /* Test whether the context is in a consistent state. */ - DBUG_ASSERT(!context->first_name_resolution_table && !context->table_list); - context->table_list= context->first_name_resolution_table= tables; - } - + DBUG_ASSERT ((select_insert && !tables->next_name_resolution_table) || !tables || + (context->table_list && context->first_name_resolution_table)); /* this is used for INSERT ... SELECT. For select we setup tables except first (and its underlying tables) @@ -5483,30 +5768,8 @@ bool setup_tables(THD *thd, Name_resolution_context *context, tablenr= 0; } setup_table_map(table, table_list, tablenr); - table->used_keys= table->s->keys_for_keyread; - table->merge_keys.clear_all(); - if (table_list->use_index) - { - key_map map; - get_key_map_from_key_list(&map, table, table_list->use_index); - if (map.is_set_all()) - DBUG_RETURN(1); - /* - Don't introduce keys in keys_in_use_for_query that weren't there - before. FORCE/USE INDEX should not add keys, it should only remove - all keys except the key(s) specified in the hint. - */ - table->keys_in_use_for_query.intersect(map); - } - if (table_list->ignore_index) - { - key_map map; - get_key_map_from_key_list(&map, table, table_list->ignore_index); - if (map.is_set_all()) - DBUG_RETURN(1); - table->keys_in_use_for_query.subtract(map); - } - table->used_keys.intersect(table->keys_in_use_for_query); + if (table_list->process_index_hints(table)) + DBUG_RETURN(1); } if (tablenr > MAX_TABLES) { @@ -5582,7 +5845,8 @@ bool setup_tables_and_check_access(THD *thd, &leaves_tmp, select_insert)) return TRUE; - *leaves= leaves_tmp; + if (leaves) + *leaves= leaves_tmp; for (; leaves_tmp; leaves_tmp= leaves_tmp->next_leaf) { @@ -5788,7 +6052,7 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name, bitmap_set_bit(field->table->read_set, field->field_index); if (table) { - table->used_keys.intersect(field->part_of_key); + table->covering_keys.intersect(field->part_of_key); table->merge_keys.merge(field->part_of_key); } if (tables->is_natural_join) @@ -5806,7 +6070,7 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name, if (field_table) { thd->used_tables|= field_table->map; - field_table->used_keys.intersect(field->part_of_key); + field_table->covering_keys.intersect(field->part_of_key); field_table->merge_keys.merge(field->part_of_key); field_table->used_fields++; } @@ -5875,6 +6139,8 @@ int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves, */ bool it_is_update= (select_lex == &thd->lex->select_lex) && thd->lex->which_check_option_applicable(); + bool save_is_item_list_lookup= select_lex->is_item_list_lookup; + select_lex->is_item_list_lookup= 0; DBUG_ENTER("setup_conds"); if (select_lex->conds_processed_with_permanent_arena || @@ -5950,9 +6216,11 @@ int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves, select_lex->where= *conds; select_lex->conds_processed_with_permanent_arena= 1; } + thd->lex->current_select->is_item_list_lookup= save_is_item_list_lookup; DBUG_RETURN(test(thd->net.report_error)); err_no_arena: + select_lex->is_item_list_lookup= save_is_item_list_lookup; DBUG_RETURN(1); } @@ -5973,6 +6241,11 @@ err_no_arena: values values to fill with ignore_errors TRUE if we should ignore errors + NOTE + fill_record() may set table->auto_increment_field_not_null and a + caller should make sure that it is reset after their last call to this + function. + RETURN FALSE OK TRUE error occured @@ -5985,27 +6258,52 @@ fill_record(THD * thd, List<Item> &fields, List<Item> &values, List_iterator_fast<Item> f(fields),v(values); Item *value, *fld; Item_field *field; + TABLE *table= 0; DBUG_ENTER("fill_record"); + /* + Reset the table->auto_increment_field_not_null as it is valid for + only one row. + */ + if (fields.elements) + { + /* + On INSERT or UPDATE fields are checked to be from the same table, + thus we safely can take table from the first field. + */ + fld= (Item_field*)f++; + if (!(field= fld->filed_for_view_update())) + { + my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), fld->name); + goto err; + } + table= field->field->table; + table->auto_increment_field_not_null= FALSE; + f.rewind(); + } while ((fld= f++)) { if (!(field= fld->filed_for_view_update())) { my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), fld->name); - DBUG_RETURN(TRUE); + goto err; } value=v++; Field *rfield= field->field; - TABLE *table= rfield->table; + table= rfield->table; if (rfield == table->next_number_field) table->auto_increment_field_not_null= TRUE; if ((value->save_in_field(rfield, 0) < 0) && !ignore_errors) { my_message(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR), MYF(0)); - DBUG_RETURN(TRUE); + goto err; } } DBUG_RETURN(thd->net.report_error); +err: + if (table) + table->auto_increment_field_not_null= FALSE; + DBUG_RETURN(TRUE); } @@ -6054,6 +6352,11 @@ fill_record_n_invoke_before_triggers(THD *thd, List<Item> &fields, values list of fields ignore_errors TRUE if we should ignore errors + NOTE + fill_record() may set table->auto_increment_field_not_null and a + caller should make sure that it is reset after their last call to this + function. + RETURN FALSE OK TRUE error occured @@ -6064,19 +6367,38 @@ fill_record(THD *thd, Field **ptr, List<Item> &values, bool ignore_errors) { List_iterator_fast<Item> v(values); Item *value; + TABLE *table= 0; DBUG_ENTER("fill_record"); Field *field; + /* + Reset the table->auto_increment_field_not_null as it is valid for + only one row. + */ + if (*ptr) + { + /* + On INSERT or UPDATE fields are checked to be from the same table, + thus we safely can take table from the first field. + */ + table= (*ptr)->table; + table->auto_increment_field_not_null= FALSE; + } while ((field = *ptr++)) { value=v++; - TABLE *table= field->table; + table= field->table; if (field == table->next_number_field) table->auto_increment_field_not_null= TRUE; if (value->save_in_field(field, 0) == -1) - DBUG_RETURN(TRUE); + goto err; } DBUG_RETURN(thd->net.report_error); + +err: + if (table) + table->auto_increment_field_not_null= FALSE; + DBUG_RETURN(TRUE); } @@ -6681,3 +7003,122 @@ has_two_write_locked_tables_with_auto_increment(TABLE_LIST *tables) } return 0; } + + +/* + Open and lock system tables for read. + + SYNOPSIS + open_system_tables_for_read() + thd Thread context. + table_list List of tables to open. + backup Pointer to Open_tables_state instance where + information about currently open tables will be + saved, and from which will be restored when we will + end work with system tables. + + NOTES + Thanks to restrictions which we put on opening and locking of + system tables for writing, we can open and lock them for reading + even when we already have some other tables open and locked. One + must call close_system_tables() to close systems tables opened + with this call. + + RETURN + FALSE Success + TRUE Error +*/ + +bool +open_system_tables_for_read(THD *thd, TABLE_LIST *table_list, + Open_tables_state *backup) +{ + DBUG_ENTER("open_system_tables_for_read"); + + thd->reset_n_backup_open_tables_state(backup); + + uint count= 0; + bool not_used; + for (TABLE_LIST *tables= table_list; tables; tables= tables->next_global) + { + TABLE *table= open_table(thd, tables, thd->mem_root, ¬_used, + MYSQL_LOCK_IGNORE_FLUSH); + if (!table) + goto error; + + DBUG_ASSERT(table->s->system_table); + + table->use_all_columns(); + table->reginfo.lock_type= tables->lock_type; + tables->table= table; + count++; + } + + { + TABLE **list= (TABLE**) thd->alloc(sizeof(TABLE*) * count); + TABLE **ptr= list; + for (TABLE_LIST *tables= table_list; tables; tables= tables->next_global) + *(ptr++)= tables->table; + + thd->lock= mysql_lock_tables(thd, list, count, + MYSQL_LOCK_IGNORE_FLUSH, ¬_used); + } + if (thd->lock) + DBUG_RETURN(FALSE); + +error: + close_system_tables(thd, backup); + + DBUG_RETURN(TRUE); +} + + +/* + Close system tables, opened with open_system_tables_for_read(). + + SYNOPSIS + close_system_tables() + thd Thread context + backup Pointer to Open_tables_state instance which holds + information about tables which were open before we + decided to access system tables. +*/ + +void +close_system_tables(THD *thd, Open_tables_state *backup) +{ + close_thread_tables(thd); + thd->restore_backup_open_tables_state(backup); +} + + +/* + Open and lock one system table for update. + + SYNOPSIS + open_system_table_for_update() + thd Thread context. + one_table Table to open. + + NOTES + Table opened with this call should closed using close_thread_tables(). + + RETURN + 0 Error + # Pointer to TABLE object of system table +*/ + +TABLE * +open_system_table_for_update(THD *thd, TABLE_LIST *one_table) +{ + DBUG_ENTER("open_system_table_for_update"); + + TABLE *table= open_ltable(thd, one_table, one_table->lock_type); + if (table) + { + DBUG_ASSERT(table->s->system_table); + table->use_all_columns(); + } + + DBUG_RETURN(table); +} diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc index d8f12375258..6f7bbda96de 100644 --- a/sql/sql_binlog.cc +++ b/sql/sql_binlog.cc @@ -163,9 +163,17 @@ void mysql_client_binlog_statement(THD* thd) (ulong) uint4korr(bufptr+EVENT_LEN_OFFSET))); #endif ev->thd= thd; - if (int err= ev->exec_event(thd->rli_fake)) + /* + We go directly to the application phase, since we don't need + to check if the event shall be skipped or not. + + Neither do we have to update the log positions, since that is + not used at all: the rli_fake instance is used only for error + reporting. + */ + if (IF_DBUG(int err= ) ev->apply_event(thd->rli_fake)) { - DBUG_PRINT("error", ("exec_event() returned: %d", err)); + DBUG_PRINT("info", ("apply_event() returned: %d", err)); /* TODO: Maybe a better error message since the BINLOG statement now contains several events. diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 8c0cb72e1f4..89b7a25033f 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -844,6 +844,12 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) flags.client_long_flag= test(thd->client_capabilities & CLIENT_LONG_FLAG); flags.client_protocol_41= test(thd->client_capabilities & CLIENT_PROTOCOL_41); + /* + Protocol influences result format, so statement results in the binary + protocol (COM_EXECUTE) cannot be served to statements asking for results + in the text protocol (COM_QUERY) and vice-versa. + */ + flags.result_in_binary_protocol= (unsigned int) thd->protocol->type(); flags.more_results_exists= test(thd->server_status & SERVER_MORE_RESULTS_EXISTS); flags.pkt_nr= net->pkt_nr; @@ -861,11 +867,13 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) flags.max_sort_length= thd->variables.max_sort_length; flags.lc_time_names= thd->variables.lc_time_names; flags.group_concat_max_len= thd->variables.group_concat_max_len; - DBUG_PRINT("qcache", ("long %d, 4.1: %d, more results %d, pkt_nr: %d, \ + DBUG_PRINT("qcache", ("\ +long %d, 4.1: %d, bin_proto: %d, more results %d, pkt_nr: %d, \ CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \ sql mode: 0x%lx, sort len: %lu, conncat len: %lu", (int)flags.client_long_flag, (int)flags.client_protocol_41, + (int)flags.result_in_binary_protocol, (int)flags.more_results_exists, flags.pkt_nr, flags.character_set_client_num, @@ -1089,6 +1097,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) flags.client_long_flag= test(thd->client_capabilities & CLIENT_LONG_FLAG); flags.client_protocol_41= test(thd->client_capabilities & CLIENT_PROTOCOL_41); + flags.result_in_binary_protocol= (unsigned int)thd->protocol->type(); flags.more_results_exists= test(thd->server_status & SERVER_MORE_RESULTS_EXISTS); flags.pkt_nr= thd->net.pkt_nr; @@ -1104,11 +1113,13 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) flags.max_sort_length= thd->variables.max_sort_length; flags.group_concat_max_len= thd->variables.group_concat_max_len; flags.lc_time_names= thd->variables.lc_time_names; - DBUG_PRINT("qcache", ("long %d, 4.1: %d, more results %d, pkt_nr: %d, \ + DBUG_PRINT("qcache", ("\ +long %d, 4.1: %d, bin_proto: %d, more results %d, pkt_nr: %d, \ CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \ sql mode: 0x%lx, sort len: %lu, conncat len: %lu", (int)flags.client_long_flag, (int)flags.client_protocol_41, + (int)flags.result_in_binary_protocol, (int)flags.more_results_exists, flags.pkt_nr, flags.character_set_client_num, @@ -1761,8 +1772,18 @@ void Query_cache::free_cache() { DBUG_ENTER("Query_cache::free_cache"); if (query_cache_size > 0) - { flush_cache(); + /* + There may be two free_cache() calls in progress, because we + release 'structure_guard_mutex' in flush_cache(). When the second + flush_cache() wakes up from the wait on 'COND_flush_finished', the + first call to free_cache() has done its job. So we have to test + 'query_cache_size > 0' the second time to see if the cache wasn't + reset by other thread, or if it was reset and was re-enabled then. + If the cache was reset, then we have nothing to do here. + */ + if (query_cache_size > 0) + { #ifndef DBUG_OFF if (bins[0].free_blocks == 0) { @@ -1804,6 +1825,12 @@ void Query_cache::free_cache() flush_in_progress flag and releases the lock, so other threads may proceed skipping the cache as if it is disabled. Concurrent flushes are performed in turn. + + After flush_cache() call, the cache is flushed, all the freed + memory is accumulated in bin[0], and the 'structure_guard_mutex' + is locked. However, since we could release the mutex during + execution, the rest of the cache state could have been changed, + and should not be relied on. */ void Query_cache::flush_cache() @@ -3032,11 +3059,10 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex, TABLE_COUNTER_TYPE table_count; DBUG_ENTER("Query_cache::is_cacheable"); - if (lex->sql_command == SQLCOM_SELECT && + if (query_cache_is_cacheable_query(lex) && (thd->variables.query_cache_type == 1 || (thd->variables.query_cache_type == 2 && (lex->select_lex.options & - OPTION_TO_QUERY_CACHE))) && - lex->safe_to_cache_query) + OPTION_TO_QUERY_CACHE)))) { DBUG_PRINT("qcache", ("options: %lx %lx type: %u", (long) OPTION_TO_QUERY_CACHE, diff --git a/sql/sql_class.cc b/sql/sql_class.cc index d5f81168be3..cc38d63c9f9 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -192,14 +192,10 @@ void **thd_ha_data(const THD *thd, const struct handlerton *hton) } -/* - Pass nominal parameters to Statement constructor only to ensure that - the destructor works OK in case of error. The main_mem_root will be - re-initialized in init(). -*/ THD::THD() - :Statement(CONVENTIONAL_EXECUTION, 0, ALLOC_ROOT_MIN_BLOCK_SIZE, 0), + :Statement(&main_lex, &main_mem_root, CONVENTIONAL_EXECUTION, + /* statement id */ 0), Open_tables_state(refresh_version), rli_fake(0), lock_id(&main_lock_id), user_time(0), in_sub_stmt(0), @@ -216,6 +212,12 @@ THD::THD() { ulong tmp; + /* + Pass nominal parameters to init_alloc_root only to ensure that + the destructor works OK in case of an error. The main_mem_root + will be re-initialized in init_for_queries(). + */ + init_sql_alloc(&main_mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0); stmt_arena= this; thread_stack= 0; db= 0; @@ -243,7 +245,7 @@ THD::THD() time_after_lock=(time_t) 0; current_linfo = 0; slave_thread = 0; - variables.pseudo_thread_id= 0; + thread_id= variables.pseudo_thread_id= 0; one_shot_set= 0; file_id = 0; query_id= 0; @@ -261,15 +263,14 @@ THD::THD() #endif client_capabilities= 0; // minimalistic client net.last_error[0]=0; // If error on boot +#ifdef HAVE_QUERY_CACHE query_cache_init_query(&net); // If error on boot +#endif ull=0; system_thread= NON_SYSTEM_THREAD; cleanup_done= abort_on_warning= no_warnings_for_error= 0; peer_port= 0; // For SHOW PROCESSLIST transaction.m_pending_rows_event= 0; -#ifdef __WIN__ - real_id = 0; -#endif #ifdef SIGNAL_WITH_VIO_CLOSE active_vio = 0; #endif @@ -302,9 +303,9 @@ THD::THD() bzero((char*) &user_var_events, sizeof(user_var_events)); /* Protocol */ - protocol= &protocol_simple; // Default protocol - protocol_simple.init(this); - protocol_prep.init(this); + protocol= &protocol_text; // Default protocol + protocol_text.init(this); + protocol_binary.init(this); tablespace_op=FALSE; tmp= sql_rnd_with_mutex(); @@ -312,6 +313,38 @@ THD::THD() substitute_null_with_insert_id = FALSE; thr_lock_info_init(&lock_info); /* safety: will be reset after start */ thr_lock_owner_init(&main_lock_id, &lock_info); + + m_internal_handler= NULL; +} + + +void THD::push_internal_handler(Internal_error_handler *handler) +{ + /* + TODO: The current implementation is limited to 1 handler at a time only. + THD and sp_rcontext need to be modified to use a common handler stack. + */ + DBUG_ASSERT(m_internal_handler == NULL); + m_internal_handler= handler; +} + + +bool THD::handle_error(uint sql_errno, + MYSQL_ERROR::enum_warning_level level) +{ + if (m_internal_handler) + { + return m_internal_handler->handle_error(sql_errno, level, this); + } + + return FALSE; // 'FALSE', as per coding style +} + + +void THD::pop_internal_handler() +{ + DBUG_ASSERT(m_internal_handler != NULL); + m_internal_handler= NULL; } @@ -357,6 +390,7 @@ void THD::init(void) void THD::init_for_queries() { + set_time(); ha_enable_transaction(this,TRUE); reset_root_defaults(mem_root, variables.query_alloc_block_size, @@ -401,6 +435,8 @@ void THD::change_user(void) void THD::cleanup(void) { DBUG_ENTER("THD::cleanup"); + DBUG_ASSERT(cleanup_done == 0); + #ifdef ENABLE_WHEN_BINLOG_WILL_BE_ABLE_TO_PREPARE if (transaction.xid_state.xa_state == XA_PREPARED) { @@ -436,7 +472,6 @@ void THD::cleanup(void) pthread_mutex_lock(&LOCK_user_locks); item_user_lock_release(ull); pthread_mutex_unlock(&LOCK_user_locks); - ull= 0; } cleanup_done=1; @@ -485,6 +520,7 @@ THD::~THD() delete rli_fake; #endif + free_root(&main_mem_root, MYF(0)); DBUG_VOID_RETURN; } @@ -550,7 +586,9 @@ void THD::awake(THD::killed_state state_to_set) killed= state_to_set; if (state_to_set != THD::KILL_QUERY) { - thr_alarm_kill(real_id); + thr_alarm_kill(thread_id); + if (!slave_thread) + thread_scheduler.post_kill_notification(this); #ifdef SIGNAL_WITH_VIO_CLOSE close_active_vio(); #endif @@ -601,18 +639,19 @@ bool THD::store_globals() Assert that thread_stack is initialized: it's necessary to be able to track stack overrun. */ - DBUG_ASSERT(this->thread_stack); + DBUG_ASSERT(thread_stack); if (my_pthread_setspecific_ptr(THR_THD, this) || my_pthread_setspecific_ptr(THR_MALLOC, &mem_root)) return 1; mysys_var=my_thread_var; - dbug_thread_id=my_thread_id(); /* - By default 'slave_proxy_id' is 'thread_id'. They may later become different - if this is the slave SQL thread. + Let mysqld define the thread id (not mysys) + This allows us to move THD to different threads if needed. */ - variables.pseudo_thread_id= thread_id; + mysys_var->id= thread_id; + real_id= pthread_self(); // For debugging + /* We have to call thr_lock_info_init() again here as THD may have been created in another thread @@ -640,11 +679,22 @@ bool THD::store_globals() void THD::cleanup_after_query() { + /* + Reset rand_used so that detection of calls to rand() will save random + seeds if needed by the slave. + + Do not reset rand_used if inside a stored function or trigger because + only the call to these operations is logged. Thus only the calling + statement needs to detect rand() calls made by its substatements. These + substatements must not set rand_used to 0 because it would remove the + detection of rand() by the calling statement. + */ if (!in_sub_stmt) /* stored functions and triggers are a special case */ { /* Forget those values, for next binlogger: */ stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; auto_inc_intervals_in_cur_stmt_for_binlog.empty(); + rand_used= 0; } if (first_successful_insert_id_in_cur_stmt > 0) { @@ -995,6 +1045,7 @@ sql_exchange::sql_exchange(char *name,bool flag) enclosed= line_start= &my_empty_string; line_term= &default_line_term; escaped= &default_escaped; + cs= NULL; } bool select_send::send_fields(List<Item> &list, uint flags) @@ -1179,7 +1230,7 @@ static File create_file(THD *thd, char *path, sql_exchange *exchange, IO_CACHE *cache) { File file; - uint option= MY_UNPACK_FILENAME; + uint option= MY_UNPACK_FILENAME | MY_RELATIVE_PATH; #ifdef DONT_ALLOW_FULL_LOAD_DATA_PATHS option|= MY_REPLACE_DIR; // Force use of db directory @@ -1193,7 +1244,15 @@ static File create_file(THD *thd, char *path, sql_exchange *exchange, } else (void) fn_format(path, exchange->file_name, mysql_real_data_home, "", option); - + + if (opt_secure_file_priv && + strncmp(opt_secure_file_priv, path, strlen(opt_secure_file_priv))) + { + /* Write only allowed to dir or subdir specified by secure_file_priv */ + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--secure-file-priv"); + return -1; + } + if (!access(path, F_OK)) { my_error(ER_FILE_EXISTS_ERROR, MYF(0), exchange->file_name); @@ -1274,7 +1333,6 @@ bool select_export::send_data(List<Item> &items) } row_count++; Item *item; - char *buff_ptr=buff; uint used_length=0,items_left=items.elements; List_iterator_fast<Item> li(items); @@ -1374,19 +1432,18 @@ bool select_export::send_data(List<Item> &items) goto err; } } - buff_ptr=buff; // Place separators here if (res && (!exchange->opt_enclosed || result_type == STRING_RESULT)) { - memcpy(buff_ptr,exchange->enclosed->ptr(),exchange->enclosed->length()); - buff_ptr+=exchange->enclosed->length(); + if (my_b_write(&cache, (byte*) exchange->enclosed->ptr(), + exchange->enclosed->length())) + goto err; } if (--items_left) { - memcpy(buff_ptr,exchange->field_term->ptr(),field_term_length); - buff_ptr+=field_term_length; + if (my_b_write(&cache, (byte*) exchange->field_term->ptr(), + field_term_length)) + goto err; } - if (my_b_write(&cache,(byte*) buff,(uint) (buff_ptr-buff))) - goto err; } if (my_b_write(&cache,(byte*) exchange->line_term->ptr(), exchange->line_term->length())) @@ -1676,18 +1733,17 @@ void Query_arena::cleanup_stmt() Statement functions */ -Statement::Statement(enum enum_state state_arg, ulong id_arg, - ulong alloc_block_size, ulong prealloc_size) - :Query_arena(&main_mem_root, state_arg), +Statement::Statement(LEX *lex_arg, MEM_ROOT *mem_root_arg, + enum enum_state state_arg, ulong id_arg) + :Query_arena(mem_root_arg, state_arg), id(id_arg), mark_used_columns(MARK_COLUMNS_READ), - lex(&main_lex), + lex(lex_arg), query(0), query_length(0), cursor(0) { name.str= NULL; - init_sql_alloc(&main_mem_root, alloc_block_size, prealloc_size); } @@ -1729,7 +1785,7 @@ void Statement::restore_backup_statement(Statement *stmt, Statement *backup) void THD::end_statement() { - /* Cleanup SQL processing state to resuse this statement in next query. */ + /* Cleanup SQL processing state to reuse this statement in next query. */ lex_end(lex); delete lex->result; lex->result= 0; @@ -1770,12 +1826,6 @@ void THD::restore_active_arena(Query_arena *set, Query_arena *backup) Statement::~Statement() { - /* - We must free `main_mem_root', not `mem_root' (pointer), to work - correctly if this statement is used as a backup statement, - for which `mem_root' may point to some other statement. - */ - free_root(&main_mem_root, MYF(0)); } C_MODE_START @@ -2156,7 +2206,12 @@ void THD::reset_sub_statement_state(Sub_statement_state *backup, !current_stmt_binlog_row_based) { options&= ~OPTION_BIN_LOG; - } + } + + if ((backup->options & OPTION_BIN_LOG) && is_update_query(lex->sql_command)&& + !current_stmt_binlog_row_based) + mysql_bin_log.start_union_events(this, this->query_id); + /* Disable result sets */ client_capabilities &= ~CLIENT_MULTI_RESULTS; in_sub_stmt|= new_state; @@ -2200,6 +2255,10 @@ void THD::restore_sub_statement_state(Sub_statement_state *backup) sent_row_count= backup->sent_row_count; client_capabilities= backup->client_capabilities; + if ((options & OPTION_BIN_LOG) && is_update_query(lex->sql_command) && + !current_stmt_binlog_row_based) + mysql_bin_log.stop_union_events(this); + /* The following is added to the old values as we are interested in the total complexity of the query @@ -2494,30 +2553,111 @@ my_size_t THD::max_row_length_blob(TABLE *table, const byte *data) const } -my_size_t THD::pack_row(TABLE *table, MY_BITMAP const* cols, byte *row_data, - const byte *record) const +/* + Pack a record of data for a table into a format suitable for + transfer via the binary log. + + SYNOPSIS + THD::pack_row() + table Table describing the format of the record + cols Bitmap with a set bit for each column that should be + stored in the row + row_data Pointer to memory where row will be written + record Pointer to record that should be packed. It is assumed + that the pointer refers to either record[0] or + record[1], but no such check is made since the code does + not rely on that. + + DESCRIPTION + + The format for a row in transfer with N fields is the following: + + ceil(N/8) null bytes: + One null bit for every column *regardless of whether it can be + null or not*. This simplifies the decoding. Observe that the + number of null bits is equal to the number of set bits in the + 'cols' bitmap. The number of null bytes is the smallest number + of bytes necessary to store the null bits. + + Padding bits are 1. + + N packets: + Each field is stored in packed format. + + + RETURN VALUE + + The number of bytes written at 'row_data'. + */ +my_size_t +THD::pack_row(TABLE *table, MY_BITMAP const* cols, + byte *const row_data, const byte *record) const { Field **p_field= table->field, *field; - int n_null_bytes= table->s->null_bytes; - byte *ptr; - uint i; + int const null_byte_count= (bitmap_bits_set(cols) + 7) / 8; + byte *pack_ptr = row_data + null_byte_count; + byte *null_ptr = row_data; my_ptrdiff_t const rec_offset= record - table->record[0]; my_ptrdiff_t const def_offset= table->s->default_values - table->record[0]; - memcpy(row_data, record, n_null_bytes); - ptr= row_data+n_null_bytes; - for (i= 0 ; (field= *p_field) ; i++, p_field++) + /* + We write the null bits and the packed records using one pass + through all the fields. The null bytes are written little-endian, + i.e., the first fields are in the first byte. + */ + unsigned int null_bits= (1U << 8) - 1; + // Mask to mask out the correct but among the null bits + unsigned int null_mask= 1U; + for ( ; (field= *p_field) ; p_field++) { - if (bitmap_is_set(cols,i)) + DBUG_PRINT("debug", ("null_mask=%d; null_ptr=%p; row_data=%p; null_byte_count=%d", + null_mask, null_ptr, row_data, null_byte_count)); + if (bitmap_is_set(cols, p_field - table->field)) { - my_ptrdiff_t const offset= - field->is_null(rec_offset) ? def_offset : rec_offset; - field->move_field_offset(offset); - ptr= (byte*)field->pack((char *) ptr, field->ptr); - field->move_field_offset(-offset); + my_ptrdiff_t offset; + if (field->is_null(rec_offset)) + { + offset= def_offset; + null_bits |= null_mask; + } + else + { + offset= rec_offset; + null_bits &= ~null_mask; + + /* + We only store the data of the field if it is non-null + */ + pack_ptr= (byte*)field->pack((char *) pack_ptr, field->ptr + offset); + } + + null_mask <<= 1; + if ((null_mask & 0xFF) == 0) + { + DBUG_ASSERT(null_ptr < row_data + null_byte_count); + null_mask = 1U; + *null_ptr++ = null_bits; + null_bits= (1U << 8) - 1; + } } } - return (static_cast<my_size_t>(ptr - row_data)); + + /* + Write the last (partial) byte, if there is one + */ + if ((null_mask & 0xFF) > 1) + { + DBUG_ASSERT(null_ptr < row_data + null_byte_count); + *null_ptr++ = null_bits; + } + + /* + The null pointer should now point to the first byte of the + packed data. If it doesn't, something is very wrong. + */ + DBUG_ASSERT(null_ptr == row_data + null_byte_count); + + return static_cast<my_size_t>(pack_ptr - row_data); } @@ -2553,7 +2693,7 @@ namespace { : m_memory(0) { #ifndef DBUG_OFF - m_alloc_checked= false; + m_alloc_checked= FALSE; #endif allocate_memory(table, len1); m_ptr[0]= has_memory() ? m_memory : 0; @@ -2564,7 +2704,7 @@ namespace { : m_memory(0) { #ifndef DBUG_OFF - m_alloc_checked= false; + m_alloc_checked= FALSE; #endif allocate_memory(table, len1 + len2); m_ptr[0]= has_memory() ? m_memory : 0; @@ -2585,7 +2725,7 @@ namespace { */ bool has_memory() const { #ifndef DBUG_OFF - m_alloc_checked= true; + m_alloc_checked= TRUE; #endif return m_memory != 0; } @@ -2594,7 +2734,7 @@ namespace { { DBUG_ASSERT(s < sizeof(m_ptr)/sizeof(*m_ptr)); DBUG_ASSERT(m_ptr[s] != 0); - DBUG_ASSERT(m_alloc_checked == true); + DBUG_ASSERT(m_alloc_checked == TRUE); return m_ptr[s]; } @@ -2624,12 +2764,12 @@ namespace { table->write_row_record= (byte *) alloc_root(&table->mem_root, 2 * maxlen); m_memory= table->write_row_record; - m_release_memory_on_destruction= false; + m_release_memory_on_destruction= FALSE; } else { m_memory= (byte *) my_malloc(total_length, MYF(MY_WME)); - m_release_memory_on_destruction= true; + m_release_memory_on_destruction= TRUE; } } diff --git a/sql/sql_class.h b/sql/sql_class.h index 7babe1eda24..66914bdf908 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -214,7 +214,7 @@ struct system_variables ulong read_rnd_buff_size; ulong div_precincrement; ulong sortbuff_size; - handlerton *table_type; + ulong thread_handling; ulong tx_isolation; ulong completion_type; /* Determines which non-standard SQL behaviour should be enabled */ @@ -231,14 +231,23 @@ struct system_variables ulong trans_prealloc_size; ulong log_warnings; ulong group_concat_max_len; + ulong ndb_autoincrement_prefetch_sz; + ulong ndb_index_stat_cache_entries; + ulong ndb_index_stat_update_freq; + ulong binlog_format; // binlog format for this thd (see enum_binlog_format) /* In slave thread we need to know in behalf of which thread the query is being run to replicate temp tables properly */ - ulong pseudo_thread_id; + my_thread_id pseudo_thread_id; my_bool low_priority_updates; my_bool new_mode; + /* + compatibility option: + - index usage hints (USE INDEX without a FOR clause) behave as in 5.0 + */ + my_bool old_mode; my_bool query_cache_wlock_invalidate; my_bool engine_condition_pushdown; my_bool innodb_table_locks; @@ -248,14 +257,12 @@ struct system_variables my_bool ndb_use_exact_count; my_bool ndb_use_transactions; my_bool ndb_index_stat_enable; - ulong ndb_autoincrement_prefetch_sz; - ulong ndb_index_stat_cache_entries; - ulong ndb_index_stat_update_freq; - ulong binlog_format; // binlog format for this thd (see enum_binlog_format) my_bool old_alter_table; my_bool old_passwords; + handlerton *table_type; + /* Only charset part of these variables is sensible */ CHARSET_INFO *character_set_filesystem; CHARSET_INFO *character_set_client; @@ -276,6 +283,7 @@ struct system_variables DATE_TIME_FORMAT *datetime_format; DATE_TIME_FORMAT *time_format; my_bool sysdate_is_now; + }; @@ -450,8 +458,10 @@ public: class Server_side_cursor; -/* - State of a single command executed against this connection. +/** + @class Statement + @brief State of a single command executed against this connection. + One connection can contain a lot of simultaneously running statements, some of which could be: - prepared, that is, contain placeholders, @@ -469,10 +479,6 @@ class Statement: public ilink, public Query_arena Statement(const Statement &rhs); /* not implemented: */ Statement &operator=(const Statement &rhs); /* non-copyable */ public: - /* FIXME: these must be protected */ - MEM_ROOT main_mem_root; - LEX main_lex; - /* Uniquely identifies each statement object in thread scope; change during statement lifetime. FIXME: must be const @@ -523,10 +529,10 @@ public: public: /* This constructor is called for backup statements */ - Statement() { clear_alloc_root(&main_mem_root); } + Statement() {} - Statement(enum enum_state state_arg, ulong id_arg, - ulong alloc_block_size, ulong prealloc_size); + Statement(LEX *lex_arg, MEM_ROOT *mem_root_arg, + enum enum_state state_arg, ulong id_arg); virtual ~Statement(); /* Assign execution context (note: not all members) of given stmt to self */ @@ -538,7 +544,7 @@ public: }; -/* +/** Container for all statements created/used in a connection. Statements in Statement_map have unique Statement::id (guaranteed by id assignment in Statement::Statement) @@ -618,6 +624,10 @@ bool xid_cache_insert(XID *xid, enum xa_states xa_state); bool xid_cache_insert(XID_STATE *xid_state); void xid_cache_delete(XID_STATE *xid_state); +/** + @class Security_context + @brief A set of THD members describing the current authenticated user. +*/ class Security_context { public: @@ -649,7 +659,7 @@ public: }; -/* +/** A registry for item tree transformations performed during query optimization. We register only those changes which require a rollback to re-execute a prepared statement or stored procedure @@ -660,7 +670,7 @@ struct Item_change_record; typedef I_List<Item_change_record> Item_change_list; -/* +/** Type of prelocked mode. See comment for THD::prelocked_mode for complete description. */ @@ -669,7 +679,7 @@ enum prelocked_mode_type {NON_PRELOCKED= 0, PRELOCKED= 1, PRELOCKED_UNDER_LOCK_TABLES= 2}; -/* +/** Class that holds information about tables which were opened and locked by the thread. It is also used to save/restore this information in push_open_tables_state()/pop_open_tables_state(). @@ -772,14 +782,17 @@ public: } }; - -/* class to save context when executing a function or trigger */ +/** + @class Sub_statement_state + @brief Used to save context when executing a function or trigger +*/ /* Defines used for Sub_statement_state::in_sub_stmt */ #define SUB_STMT_TRIGGER 1 #define SUB_STMT_FUNCTION 2 + class Sub_statement_state { public: @@ -811,7 +824,51 @@ enum enum_thread_type }; -/* +/** + This class represents the interface for internal error handlers. + Internal error handlers are exception handlers used by the server + implementation. +*/ +class Internal_error_handler +{ +protected: + Internal_error_handler() {} + virtual ~Internal_error_handler() {} + +public: + /** + Handle an error condition. + This method can be implemented by a subclass to achieve any of the + following: + - mask an error internally, prevent exposing it to the user, + - mask an error and throw another one instead. + When this method returns true, the error condition is considered + 'handled', and will not be propagated to upper layers. + It is the responsability of the code installing an internal handler + to then check for trapped conditions, and implement logic to recover + from the anticipated conditions trapped during runtime. + + This mechanism is similar to C++ try/throw/catch: + - 'try' correspond to <code>THD::push_internal_handler()</code>, + - 'throw' correspond to <code>my_error()</code>, + which invokes <code>my_message_sql()</code>, + - 'catch' correspond to checking how/if an internal handler was invoked, + before removing it from the exception stack with + <code>THD::pop_internal_handler()</code>. + + @param sql_errno the error number + @param level the error level + @param thd the calling thread + @return true if the error is handled + */ + virtual bool handle_error(uint sql_errno, + MYSQL_ERROR::enum_warning_level level, + THD *thd) = 0; +}; + + +/** + @class THD For each client connection we create a separate thread with THD serving as a thread/connection descriptor */ @@ -853,8 +910,8 @@ public: NET net; // client connection descriptor MEM_ROOT warn_root; // For warnings and errors Protocol *protocol; // Current protocol - Protocol_simple protocol_simple; // Normal protocol - Protocol_prep protocol_prep; // Binary protocol + Protocol_text protocol_text; // Normal protocol + Protocol_binary protocol_binary; // Binary protocol HASH user_vars; // hash for user variables String packet; // dynamic buffer for network I/O String convert_buffer; // buffer for charset conversions @@ -1064,7 +1121,7 @@ public: } transaction; Field *dup_field; #ifndef __WIN__ - sigset_t signals,block_signals; + sigset_t signals; #endif #ifdef SIGNAL_WITH_VIO_CLOSE Vio* active_vio; @@ -1213,7 +1270,7 @@ public: return first_successful_insert_id_in_prev_stmt; } /* - Used by Intvar_log_event::exec_event() and by "SET INSERT_ID=#" + Used by Intvar_log_event::do_apply_event() and by "SET INSERT_ID=#" (mysqlbinlog). We'll soon add a variant which can take many intervals in argument. */ @@ -1255,7 +1312,7 @@ public: update auto-updatable fields (like auto_increment and timestamp). */ query_id_t query_id, warn_id; - ulong thread_id, col_access; + ulong col_access; #ifdef ERROR_INJECT_SUPPORT ulong error_inject_value; @@ -1264,8 +1321,8 @@ public: ulong statement_id_counter; ulong rand_saved_seed1, rand_saved_seed2; ulong row_count; // Row counter, mainly for errors and warnings - long dbug_thread_id; - pthread_t real_id; + pthread_t real_id; /* For debugging */ + my_thread_id thread_id; uint tmp_table, global_read_lock; uint server_status,open_options; enum enum_thread_type system_thread; @@ -1354,7 +1411,7 @@ public: #ifdef WITH_PARTITION_STORAGE_ENGINE partition_info *work_part_info; #endif - + THD(); ~THD(); @@ -1629,6 +1686,48 @@ public: *p_db_length= db_length; return FALSE; } + thd_scheduler scheduler; + +public: + /** + Add an internal error handler to the thread execution context. + @param handler the exception handler to add + */ + void push_internal_handler(Internal_error_handler *handler); + + /** + Handle an error condition. + @param sql_errno the error number + @param level the error level + @return true if the error is handled + */ + virtual bool handle_error(uint sql_errno, + MYSQL_ERROR::enum_warning_level level); + + /** + Remove the error handler last pushed. + */ + void pop_internal_handler(); + +private: + /** The current internal error handler for this thread, or NULL. */ + Internal_error_handler *m_internal_handler; + /** + The lex to hold the parsed tree of conventional (non-prepared) queries. + Whereas for prepared and stored procedure statements we use an own lex + instance for each new query, for conventional statements we reuse + the same lex. (@see mysql_parse for details). + */ + LEX main_lex; + /** + This memory root is used for two purposes: + - for conventional queries, to allocate structures stored in main_lex + during parsing, and allocate runtime data (execution plan, etc.) + during execution. + - for prepared queries, only to allocate runtime data. The parsed + tree itself is reused between executions and thus is stored elsewhere. + */ + MEM_ROOT main_mem_root; }; @@ -1640,7 +1739,7 @@ public: /* - Used to hold information about file and file structure in exchainge + Used to hold information about file and file structure in exchange via non-DB file (...INTO OUTFILE..., ...LOAD DATA...) XXX: We never call destructor for objects of this class. */ @@ -1653,6 +1752,7 @@ public: bool opt_enclosed; bool dumpfile; ulong skip_lines; + CHARSET_INFO *cs; sql_exchange(char *name,bool dumpfile_flag); }; diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc new file mode 100644 index 00000000000..09ee4962235 --- /dev/null +++ b/sql/sql_connect.cc @@ -0,0 +1,1108 @@ +/* Copyright (C) 2007 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + + +/* + Functions to autenticate and handle reqests for a connection +*/ + +#include "mysql_priv.h" + +#ifdef HAVE_OPENSSL +/* + Without SSL the handshake consists of one packet. This packet + has both client capabilites and scrambled password. + With SSL the handshake might consist of two packets. If the first + packet (client capabilities) has CLIENT_SSL flag set, we have to + switch to SSL and read the second packet. The scrambled password + is in the second packet and client_capabilites field will be ignored. + Maybe it is better to accept flags other than CLIENT_SSL from the + second packet? +*/ +#define SSL_HANDSHAKE_SIZE 2 +#define NORMAL_HANDSHAKE_SIZE 6 +#define MIN_HANDSHAKE_SIZE 2 +#else +#define MIN_HANDSHAKE_SIZE 6 +#endif /* HAVE_OPENSSL */ + +#ifdef __WIN__ +static void test_signal(int sig_ptr) +{ +#if !defined( DBUG_OFF) + MessageBox(NULL,"Test signal","DBUG",MB_OK); +#endif +#if defined(OS2) + fprintf(stderr, "Test signal %d\n", sig_ptr); + fflush(stderr); +#endif +} +static void init_signals(void) +{ + int signals[7] = {SIGINT,SIGILL,SIGFPE,SIGSEGV,SIGTERM,SIGBREAK,SIGABRT } ; + for (int i=0 ; i < 7 ; i++) + signal( signals[i], test_signal) ; +} +#endif + +/* + Get structure for logging connection data for the current user +*/ + +#ifndef NO_EMBEDDED_ACCESS_CHECKS +static HASH hash_user_connections; + +static int get_or_create_user_conn(THD *thd, const char *user, + const char *host, + USER_RESOURCES *mqh) +{ + int return_val= 0; + uint temp_len, user_len; + char temp_user[USER_HOST_BUFF_SIZE]; + struct user_conn *uc; + + DBUG_ASSERT(user != 0); + DBUG_ASSERT(host != 0); + + user_len= strlen(user); + temp_len= (strmov(strmov(temp_user, user)+1, host) - temp_user)+1; + (void) pthread_mutex_lock(&LOCK_user_conn); + if (!(uc = (struct user_conn *) hash_search(&hash_user_connections, + (byte*) temp_user, temp_len))) + { + /* First connection for user; Create a user connection object */ + if (!(uc= ((struct user_conn*) + my_malloc(sizeof(struct user_conn) + temp_len+1, + MYF(MY_WME))))) + { + net_send_error(thd, 0, NullS); // Out of memory + return_val= 1; + goto end; + } + uc->user=(char*) (uc+1); + memcpy(uc->user,temp_user,temp_len+1); + uc->host= uc->user + user_len + 1; + uc->len= temp_len; + uc->connections= uc->questions= uc->updates= uc->conn_per_hour= 0; + uc->user_resources= *mqh; + uc->intime= thd->thr_create_time; + if (my_hash_insert(&hash_user_connections, (byte*) uc)) + { + my_free((char*) uc,0); + net_send_error(thd, 0, NullS); // Out of memory + return_val= 1; + goto end; + } + } + thd->user_connect=uc; + uc->connections++; +end: + (void) pthread_mutex_unlock(&LOCK_user_conn); + return return_val; + +} + + +/* + check if user has already too many connections + + SYNOPSIS + check_for_max_user_connections() + thd Thread handle + uc User connect object + + NOTES + If check fails, we decrease user connection count, which means one + shouldn't call decrease_user_connections() after this function. + + RETURN + 0 ok + 1 error +*/ + +int check_for_max_user_connections(THD *thd, USER_CONN *uc) +{ + int error=0; + DBUG_ENTER("check_for_max_user_connections"); + + (void) pthread_mutex_lock(&LOCK_user_conn); + if (max_user_connections && !uc->user_resources.user_conn && + max_user_connections < (uint) uc->connections) + { + net_printf_error(thd, ER_TOO_MANY_USER_CONNECTIONS, uc->user); + error=1; + goto end; + } + time_out_user_resource_limits(thd, uc); + if (uc->user_resources.user_conn && + uc->user_resources.user_conn < uc->connections) + { + net_printf_error(thd, ER_USER_LIMIT_REACHED, uc->user, + "max_user_connections", + (long) uc->user_resources.user_conn); + error= 1; + goto end; + } + if (uc->user_resources.conn_per_hour && + uc->user_resources.conn_per_hour <= uc->conn_per_hour) + { + net_printf_error(thd, ER_USER_LIMIT_REACHED, uc->user, + "max_connections_per_hour", + (long) uc->user_resources.conn_per_hour); + error=1; + goto end; + } + uc->conn_per_hour++; + + end: + if (error) + uc->connections--; // no need for decrease_user_connections() here + (void) pthread_mutex_unlock(&LOCK_user_conn); + DBUG_RETURN(error); +} + + +/* + Decrease user connection count + + SYNOPSIS + decrease_user_connections() + uc User connection object + + NOTES + If there is a n user connection object for a connection + (which only happens if 'max_user_connections' is defined or + if someone has created a resource grant for a user), then + the connection count is always incremented on connect. + + The user connect object is not freed if some users has + 'max connections per hour' defined as we need to be able to hold + count over the lifetime of the connection. +*/ + +void decrease_user_connections(USER_CONN *uc) +{ + DBUG_ENTER("decrease_user_connections"); + (void) pthread_mutex_lock(&LOCK_user_conn); + DBUG_ASSERT(uc->connections); + if (!--uc->connections && !mqh_used) + { + /* Last connection for user; Delete it */ + (void) hash_delete(&hash_user_connections,(byte*) uc); + } + (void) pthread_mutex_unlock(&LOCK_user_conn); + DBUG_VOID_RETURN; +} + + +/* + Reset per-hour user resource limits when it has been more than + an hour since they were last checked + + SYNOPSIS: + time_out_user_resource_limits() + thd Thread handler + uc User connection details + + NOTE: + This assumes that the LOCK_user_conn mutex has been acquired, so it is + safe to test and modify members of the USER_CONN structure. +*/ + +void time_out_user_resource_limits(THD *thd, USER_CONN *uc) +{ + time_t check_time = thd->start_time ? thd->start_time : time(NULL); + DBUG_ENTER("time_out_user_resource_limits"); + + /* If more than a hour since last check, reset resource checking */ + if (check_time - uc->intime >= 3600) + { + uc->questions=1; + uc->updates=0; + uc->conn_per_hour=0; + uc->intime=check_time; + } + + DBUG_VOID_RETURN; +} + +/* + Check if maximum queries per hour limit has been reached + returns 0 if OK. +*/ + +bool check_mqh(THD *thd, uint check_command) +{ + bool error= 0; + USER_CONN *uc=thd->user_connect; + DBUG_ENTER("check_mqh"); + DBUG_ASSERT(uc != 0); + + (void) pthread_mutex_lock(&LOCK_user_conn); + + time_out_user_resource_limits(thd, uc); + + /* Check that we have not done too many questions / hour */ + if (uc->user_resources.questions && + uc->questions++ >= uc->user_resources.questions) + { + net_printf_error(thd, ER_USER_LIMIT_REACHED, uc->user, "max_questions", + (long) uc->user_resources.questions); + error=1; + goto end; + } + if (check_command < (uint) SQLCOM_END) + { + /* Check that we have not done too many updates / hour */ + if (uc->user_resources.updates && + (sql_command_flags[check_command] & CF_CHANGES_DATA) && + uc->updates++ >= uc->user_resources.updates) + { + net_printf_error(thd, ER_USER_LIMIT_REACHED, uc->user, "max_updates", + (long) uc->user_resources.updates); + error=1; + goto end; + } + } +end: + (void) pthread_mutex_unlock(&LOCK_user_conn); + DBUG_RETURN(error); +} + +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ + + +/* + Check if user exist and password supplied is correct. + + SYNOPSIS + check_user() + thd thread handle, thd->security_ctx->{host,user,ip} are used + command originator of the check: now check_user is called + during connect and change user procedures; used for + logging. + passwd scrambled password received from client + passwd_len length of scrambled password + db database name to connect to, may be NULL + check_count dont know exactly + + Note, that host, user and passwd may point to communication buffer. + Current implementation does not depend on that, but future changes + should be done with this in mind; 'thd' is INOUT, all other params + are 'IN'. + + RETURN VALUE + 0 OK; thd->security_ctx->user/master_access/priv_user/db_access and + thd->db are updated; OK is sent to client; + -1 access denied or handshake error; error is sent to client; + >0 error, not sent to client +*/ + +int check_user(THD *thd, enum enum_server_command command, + const char *passwd, uint passwd_len, const char *db, + bool check_count) +{ + DBUG_ENTER("check_user"); + +#ifdef NO_EMBEDDED_ACCESS_CHECKS + thd->main_security_ctx.master_access= GLOBAL_ACLS; // Full rights + /* Change database if necessary */ + if (db && db[0]) + { + /* + thd->db is saved in caller and needs to be freed by caller if this + function returns 0 + */ + thd->reset_db(NULL, 0); + if (mysql_change_db(thd, db, FALSE)) + { + /* Send the error to the client */ + net_send_error(thd); + DBUG_RETURN(-1); + } + } + send_ok(thd); + DBUG_RETURN(0); +#else + + my_bool opt_secure_auth_local; + pthread_mutex_lock(&LOCK_global_system_variables); + opt_secure_auth_local= opt_secure_auth; + pthread_mutex_unlock(&LOCK_global_system_variables); + + /* + If the server is running in secure auth mode, short scrambles are + forbidden. + */ + if (opt_secure_auth_local && passwd_len == SCRAMBLE_LENGTH_323) + { + net_printf_error(thd, ER_NOT_SUPPORTED_AUTH_MODE); + general_log_print(thd, COM_CONNECT, ER(ER_NOT_SUPPORTED_AUTH_MODE)); + DBUG_RETURN(-1); + } + if (passwd_len != 0 && + passwd_len != SCRAMBLE_LENGTH && + passwd_len != SCRAMBLE_LENGTH_323) + DBUG_RETURN(ER_HANDSHAKE_ERROR); + + /* + Clear thd->db as it points to something, that will be freed when + connection is closed. We don't want to accidentally free a wrong pointer + if connect failed. Also in case of 'CHANGE USER' failure, current + database will be switched to 'no database selected'. + */ + thd->reset_db(NULL, 0); + + USER_RESOURCES ur; + int res= acl_getroot(thd, &ur, passwd, passwd_len); +#ifndef EMBEDDED_LIBRARY + if (res == -1) + { + /* + This happens when client (new) sends password scrambled with + scramble(), but database holds old value (scrambled with + scramble_323()). Here we please client to send scrambled_password + in old format. + */ + NET *net= &thd->net; + if (opt_secure_auth_local) + { + net_printf_error(thd, ER_SERVER_IS_IN_SECURE_AUTH_MODE, + thd->main_security_ctx.user, + thd->main_security_ctx.host_or_ip); + general_log_print(thd, COM_CONNECT, ER(ER_SERVER_IS_IN_SECURE_AUTH_MODE), + thd->main_security_ctx.user, + thd->main_security_ctx.host_or_ip); + DBUG_RETURN(-1); + } + /* We have to read very specific packet size */ + if (send_old_password_request(thd) || + my_net_read(net) != SCRAMBLE_LENGTH_323 + 1) + { + inc_host_errors(&thd->remote.sin_addr); + DBUG_RETURN(ER_HANDSHAKE_ERROR); + } + /* Final attempt to check the user based on reply */ + /* So as passwd is short, errcode is always >= 0 */ + res= acl_getroot(thd, &ur, (char *) net->read_pos, SCRAMBLE_LENGTH_323); + } +#endif /*EMBEDDED_LIBRARY*/ + /* here res is always >= 0 */ + if (res == 0) + { + if (!(thd->main_security_ctx.master_access & + NO_ACCESS)) // authentication is OK + { + DBUG_PRINT("info", + ("Capabilities: %lu packet_length: %ld Host: '%s' " + "Login user: '%s' Priv_user: '%s' Using password: %s " + "Access: %lu db: '%s'", + thd->client_capabilities, + thd->max_client_packet_length, + thd->main_security_ctx.host_or_ip, + thd->main_security_ctx.user, + thd->main_security_ctx.priv_user, + passwd_len ? "yes": "no", + thd->main_security_ctx.master_access, + (thd->db ? thd->db : "*none*"))); + + if (check_count) + { + VOID(pthread_mutex_lock(&LOCK_thread_count)); + bool count_ok= thread_count <= max_connections + delayed_insert_threads + || (thd->main_security_ctx.master_access & SUPER_ACL); + VOID(pthread_mutex_unlock(&LOCK_thread_count)); + if (!count_ok) + { // too many connections + net_send_error(thd, ER_CON_COUNT_ERROR); + DBUG_RETURN(-1); + } + } + + /* + Log the command before authentication checks, so that the user can + check the log for the tried login tried and also to detect + break-in attempts. + */ + general_log_print(thd, command, + (thd->main_security_ctx.priv_user == + thd->main_security_ctx.user ? + (char*) "%s@%s on %s" : + (char*) "%s@%s as anonymous on %s"), + thd->main_security_ctx.user, + thd->main_security_ctx.host_or_ip, + db ? db : (char*) ""); + + /* + This is the default access rights for the current database. It's + set to 0 here because we don't have an active database yet (and we + may not have an active database to set. + */ + thd->main_security_ctx.db_access=0; + + /* Don't allow user to connect if he has done too many queries */ + if ((ur.questions || ur.updates || ur.conn_per_hour || ur.user_conn || + max_user_connections) && + get_or_create_user_conn(thd, + (opt_old_style_user_limits ? thd->main_security_ctx.user : + thd->main_security_ctx.priv_user), + (opt_old_style_user_limits ? thd->main_security_ctx.host_or_ip : + thd->main_security_ctx.priv_host), + &ur)) + DBUG_RETURN(-1); + if (thd->user_connect && + (thd->user_connect->user_resources.conn_per_hour || + thd->user_connect->user_resources.user_conn || + max_user_connections) && + check_for_max_user_connections(thd, thd->user_connect)) + DBUG_RETURN(-1); + + /* Change database if necessary */ + if (db && db[0]) + { + if (mysql_change_db(thd, db, FALSE)) + { + /* Send error to the client */ + net_send_error(thd); + if (thd->user_connect) + decrease_user_connections(thd->user_connect); + DBUG_RETURN(-1); + } + } + send_ok(thd); + thd->password= test(passwd_len); // remember for error messages + /* Ready to handle queries */ + DBUG_RETURN(0); + } + } + else if (res == 2) // client gave short hash, server has long hash + { + net_printf_error(thd, ER_NOT_SUPPORTED_AUTH_MODE); + general_log_print(thd, COM_CONNECT, ER(ER_NOT_SUPPORTED_AUTH_MODE)); + DBUG_RETURN(-1); + } + net_printf_error(thd, ER_ACCESS_DENIED_ERROR, + thd->main_security_ctx.user, + thd->main_security_ctx.host_or_ip, + passwd_len ? ER(ER_YES) : ER(ER_NO)); + general_log_print(thd, COM_CONNECT, ER(ER_ACCESS_DENIED_ERROR), + thd->main_security_ctx.user, + thd->main_security_ctx.host_or_ip, + passwd_len ? ER(ER_YES) : ER(ER_NO)); + DBUG_RETURN(-1); +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ +} + + +/* + Check for maximum allowable user connections, if the mysqld server is + started with corresponding variable that is greater then 0. +*/ + +extern "C" byte *get_key_conn(user_conn *buff, uint *length, + my_bool not_used __attribute__((unused))) +{ + *length=buff->len; + return (byte*) buff->user; +} + + +extern "C" void free_user(struct user_conn *uc) +{ + my_free((char*) uc,MYF(0)); +} + + +void init_max_user_conn(void) +{ +#ifndef NO_EMBEDDED_ACCESS_CHECKS + (void) hash_init(&hash_user_connections,system_charset_info,max_connections, + 0,0, + (hash_get_key) get_key_conn, (hash_free_key) free_user, + 0); +#endif +} + + +void free_max_user_conn(void) +{ +#ifndef NO_EMBEDDED_ACCESS_CHECKS + hash_free(&hash_user_connections); +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ +} + + +void reset_mqh(LEX_USER *lu, bool get_them= 0) +{ +#ifndef NO_EMBEDDED_ACCESS_CHECKS + (void) pthread_mutex_lock(&LOCK_user_conn); + if (lu) // for GRANT + { + USER_CONN *uc; + uint temp_len=lu->user.length+lu->host.length+2; + char temp_user[USER_HOST_BUFF_SIZE]; + + memcpy(temp_user,lu->user.str,lu->user.length); + memcpy(temp_user+lu->user.length+1,lu->host.str,lu->host.length); + temp_user[lu->user.length]='\0'; temp_user[temp_len-1]=0; + if ((uc = (struct user_conn *) hash_search(&hash_user_connections, + (byte*) temp_user, temp_len))) + { + uc->questions=0; + get_mqh(temp_user,&temp_user[lu->user.length+1],uc); + uc->updates=0; + uc->conn_per_hour=0; + } + } + else + { + /* for FLUSH PRIVILEGES and FLUSH USER_RESOURCES */ + for (uint idx=0;idx < hash_user_connections.records; idx++) + { + USER_CONN *uc=(struct user_conn *) hash_element(&hash_user_connections, + idx); + if (get_them) + get_mqh(uc->user,uc->host,uc); + uc->questions=0; + uc->updates=0; + uc->conn_per_hour=0; + } + } + (void) pthread_mutex_unlock(&LOCK_user_conn); +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ +} + + +void thd_init_client_charset(THD *thd, uint cs_number) +{ + /* + Use server character set and collation if + - opt_character_set_client_handshake is not set + - client has not specified a character set + - client character set is the same as the servers + - client character set doesn't exists in server + */ + if (!opt_character_set_client_handshake || + !(thd->variables.character_set_client= get_charset(cs_number, MYF(0))) || + !my_strcasecmp(&my_charset_latin1, + global_system_variables.character_set_client->name, + thd->variables.character_set_client->name)) + { + thd->variables.character_set_client= + global_system_variables.character_set_client; + thd->variables.collation_connection= + global_system_variables.collation_connection; + thd->variables.character_set_results= + global_system_variables.character_set_results; + } + else + { + thd->variables.character_set_results= + thd->variables.collation_connection= + thd->variables.character_set_client; + } +} + + +/* + Initialize connection threads +*/ + +bool init_new_connection_handler_thread() +{ + pthread_detach_this_thread(); +#if defined(__WIN__) + init_signals(); +#else + /* Win32 calls this in pthread_create */ + if (my_thread_init()) + return 1; +#endif /* __WIN__ */ + return 0; +} + +/* + Perform handshake, authorize client and update thd ACL variables. + + SYNOPSIS + check_connection() + thd thread handle + + RETURN + 0 success, OK is sent to user, thd is updated. + -1 error, which is sent to user + > 0 error code (not sent to user) +*/ + +#ifndef EMBEDDED_LIBRARY +static int check_connection(THD *thd) +{ + uint connect_errors= 0; + NET *net= &thd->net; + ulong pkt_len= 0; + char *end; + + DBUG_PRINT("info", + ("New connection received on %s", vio_description(net->vio))); +#ifdef SIGNAL_WITH_VIO_CLOSE + thd->set_active_vio(net->vio); +#endif + + if (!thd->main_security_ctx.host) // If TCP/IP connection + { + char ip[30]; + + if (vio_peer_addr(net->vio, ip, &thd->peer_port)) + return (ER_BAD_HOST_ERROR); + if (!(thd->main_security_ctx.ip= my_strdup(ip,MYF(0)))) + return (ER_OUT_OF_RESOURCES); + thd->main_security_ctx.host_or_ip= thd->main_security_ctx.ip; + vio_in_addr(net->vio,&thd->remote.sin_addr); + if (!(specialflag & SPECIAL_NO_RESOLVE)) + { + vio_in_addr(net->vio,&thd->remote.sin_addr); + thd->main_security_ctx.host= + ip_to_hostname(&thd->remote.sin_addr, &connect_errors); + /* Cut very long hostnames to avoid possible overflows */ + if (thd->main_security_ctx.host) + { + if (thd->main_security_ctx.host != my_localhost) + thd->main_security_ctx.host[min(strlen(thd->main_security_ctx.host), + HOSTNAME_LENGTH)]= 0; + thd->main_security_ctx.host_or_ip= thd->main_security_ctx.host; + } + if (connect_errors > max_connect_errors) + return(ER_HOST_IS_BLOCKED); + } + DBUG_PRINT("info",("Host: %s ip: %s", + (thd->main_security_ctx.host ? + thd->main_security_ctx.host : "unknown host"), + (thd->main_security_ctx.ip ? + thd->main_security_ctx.ip : "unknown ip"))); + if (acl_check_host(thd->main_security_ctx.host, thd->main_security_ctx.ip)) + return(ER_HOST_NOT_PRIVILEGED); + } + else /* Hostname given means that the connection was on a socket */ + { + DBUG_PRINT("info",("Host: %s", thd->main_security_ctx.host)); + thd->main_security_ctx.host_or_ip= thd->main_security_ctx.host; + thd->main_security_ctx.ip= 0; + /* Reset sin_addr */ + bzero((char*) &thd->remote, sizeof(thd->remote)); + } + vio_keepalive(net->vio, TRUE); + { + /* buff[] needs to big enough to hold the server_version variable */ + char buff[SERVER_VERSION_LENGTH + SCRAMBLE_LENGTH + 64]; + ulong client_flags = (CLIENT_LONG_FLAG | CLIENT_CONNECT_WITH_DB | + CLIENT_PROTOCOL_41 | CLIENT_SECURE_CONNECTION); + + if (opt_using_transactions) + client_flags|=CLIENT_TRANSACTIONS; +#ifdef HAVE_COMPRESS + client_flags |= CLIENT_COMPRESS; +#endif /* HAVE_COMPRESS */ +#ifdef HAVE_OPENSSL + if (ssl_acceptor_fd) + client_flags |= CLIENT_SSL; /* Wow, SSL is available! */ +#endif /* HAVE_OPENSSL */ + + end= strnmov(buff, server_version, SERVER_VERSION_LENGTH) + 1; + int4store((uchar*) end, thd->thread_id); + end+= 4; + /* + So as check_connection is the only entry point to authorization + procedure, scramble is set here. This gives us new scramble for + each handshake. + */ + create_random_string(thd->scramble, SCRAMBLE_LENGTH, &thd->rand); + /* + Old clients does not understand long scrambles, but can ignore packet + tail: that's why first part of the scramble is placed here, and second + part at the end of packet. + */ + end= strmake(end, thd->scramble, SCRAMBLE_LENGTH_323) + 1; + + int2store(end, client_flags); + /* write server characteristics: up to 16 bytes allowed */ + end[2]=(char) default_charset_info->number; + int2store(end+3, thd->server_status); + bzero(end+5, 13); + end+= 18; + /* write scramble tail */ + end= strmake(end, thd->scramble + SCRAMBLE_LENGTH_323, + SCRAMBLE_LENGTH - SCRAMBLE_LENGTH_323) + 1; + + /* At this point we write connection message and read reply */ + if (net_write_command(net, (uchar) protocol_version, "", 0, buff, + (uint) (end-buff)) || + (pkt_len= my_net_read(net)) == packet_error || + pkt_len < MIN_HANDSHAKE_SIZE) + { + inc_host_errors(&thd->remote.sin_addr); + return(ER_HANDSHAKE_ERROR); + } + } +#ifdef _CUSTOMCONFIG_ +#include "_cust_sql_parse.h" +#endif + if (connect_errors) + reset_host_errors(&thd->remote.sin_addr); + if (thd->packet.alloc(thd->variables.net_buffer_length)) + return(ER_OUT_OF_RESOURCES); + + thd->client_capabilities=uint2korr(net->read_pos); + if (thd->client_capabilities & CLIENT_PROTOCOL_41) + { + thd->client_capabilities|= ((ulong) uint2korr(net->read_pos+2)) << 16; + thd->max_client_packet_length= uint4korr(net->read_pos+4); + DBUG_PRINT("info", ("client_character_set: %d", (uint) net->read_pos[8])); + thd_init_client_charset(thd, (uint) net->read_pos[8]); + thd->update_charset(); + end= (char*) net->read_pos+32; + } + else + { + thd->max_client_packet_length= uint3korr(net->read_pos+2); + end= (char*) net->read_pos+5; + } + + if (thd->client_capabilities & CLIENT_IGNORE_SPACE) + thd->variables.sql_mode|= MODE_IGNORE_SPACE; +#ifdef HAVE_OPENSSL + DBUG_PRINT("info", ("client capabilities: %lu", thd->client_capabilities)); + if (thd->client_capabilities & CLIENT_SSL) + { + /* Do the SSL layering. */ + if (!ssl_acceptor_fd) + { + inc_host_errors(&thd->remote.sin_addr); + return(ER_HANDSHAKE_ERROR); + } + DBUG_PRINT("info", ("IO layer change in progress...")); + if (sslaccept(ssl_acceptor_fd, net->vio, net->read_timeout)) + { + DBUG_PRINT("error", ("Failed to accept new SSL connection")); + inc_host_errors(&thd->remote.sin_addr); + return(ER_HANDSHAKE_ERROR); + } + DBUG_PRINT("info", ("Reading user information over SSL layer")); + if ((pkt_len= my_net_read(net)) == packet_error || + pkt_len < NORMAL_HANDSHAKE_SIZE) + { + DBUG_PRINT("error", ("Failed to read user information (pkt_len= %lu)", + pkt_len)); + inc_host_errors(&thd->remote.sin_addr); + return(ER_HANDSHAKE_ERROR); + } + } +#endif /* HAVE_OPENSSL */ + + if (end >= (char*) net->read_pos+ pkt_len +2) + { + inc_host_errors(&thd->remote.sin_addr); + return(ER_HANDSHAKE_ERROR); + } + + if (thd->client_capabilities & CLIENT_INTERACTIVE) + thd->variables.net_wait_timeout= thd->variables.net_interactive_timeout; + if ((thd->client_capabilities & CLIENT_TRANSACTIONS) && + opt_using_transactions) + net->return_status= &thd->server_status; + + char *user= end; + char *passwd= strend(user)+1; + uint user_len= passwd - user - 1; + char *db= passwd; + char db_buff[NAME_LEN + 1]; // buffer to store db in utf8 + char user_buff[USERNAME_LENGTH + 1]; // buffer to store user in utf8 + uint dummy_errors; + + /* + Old clients send null-terminated string as password; new clients send + the size (1 byte) + string (not null-terminated). Hence in case of empty + password both send '\0'. + + This strlen() can't be easily deleted without changing protocol. + */ + uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ? + *passwd++ : strlen(passwd); + db= thd->client_capabilities & CLIENT_CONNECT_WITH_DB ? + db + passwd_len + 1 : 0; + /* strlen() can't be easily deleted without changing protocol */ + uint db_len= db ? strlen(db) : 0; + + if (passwd + passwd_len + db_len > (char *)net->read_pos + pkt_len) + { + inc_host_errors(&thd->remote.sin_addr); + return ER_HANDSHAKE_ERROR; + } + + /* Since 4.1 all database names are stored in utf8 */ + if (db) + { + db_buff[copy_and_convert(db_buff, sizeof(db_buff)-1, + system_charset_info, + db, db_len, + thd->charset(), &dummy_errors)]= 0; + db= db_buff; + } + + user_buff[user_len= copy_and_convert(user_buff, sizeof(user_buff)-1, + system_charset_info, user, user_len, + thd->charset(), &dummy_errors)]= '\0'; + user= user_buff; + + /* If username starts and ends in "'", chop them off */ + if (user_len > 1 && user[0] == '\'' && user[user_len - 1] == '\'') + { + user[user_len-1]= 0; + user++; + user_len-= 2; + } + + if (thd->main_security_ctx.user) + x_free(thd->main_security_ctx.user); + if (!(thd->main_security_ctx.user= my_strdup(user, MYF(0)))) + return (ER_OUT_OF_RESOURCES); + return check_user(thd, COM_CONNECT, passwd, passwd_len, db, TRUE); +} + + +/* + Setup thread to be used with the current thread + + SYNOPSIS + bool setup_connection_thread_globals() + thd Thread/connection handler + + RETURN + 0 ok + 1 Error (out of memory) + In this case we will close the connection and increment status +*/ + +bool setup_connection_thread_globals(THD *thd) +{ + if (thd->store_globals()) + { + close_connection(thd, ER_OUT_OF_RESOURCES, 1); + statistic_increment(aborted_connects,&LOCK_status); + thread_scheduler.end_thread(thd, 0); + return 1; // Error + } + return 0; +} + + +/* + Autenticate user, with error reporting + + SYNOPSIS + login_connection() + thd Thread handler + + NOTES + Connection is not closed in case of errors + + RETURN + 0 ok + 1 error +*/ + + +bool login_connection(THD *thd) +{ + int error; + NET *net= &thd->net; + Security_context *sctx= thd->security_ctx; + DBUG_ENTER("login_connection"); + DBUG_PRINT("info", ("handle_one_connection called by thread %lu", + thd->thread_id)); + + net->no_send_error= 0; + + /* Use "connect_timeout" value during connection phase */ + net_set_read_timeout(net, connect_timeout); + net_set_write_timeout(net, connect_timeout); + + if ((error=check_connection(thd))) + { // Wrong permissions + if (error > 0) + net_printf_error(thd, error, sctx->host_or_ip); +#ifdef __NT__ + if (vio_type(net->vio) == VIO_TYPE_NAMEDPIPE) + my_sleep(1000); /* must wait after eof() */ +#endif + statistic_increment(aborted_connects,&LOCK_status); + DBUG_RETURN(1); + } + /* Connect completed, set read/write timeouts back to default */ + net_set_read_timeout(net, thd->variables.net_read_timeout); + net_set_write_timeout(net, thd->variables.net_write_timeout); + DBUG_RETURN(0); +} + + +/* + Close an established connection + + NOTES + This mainly updates status variables +*/ + +void end_connection(THD *thd) +{ + NET *net= &thd->net; + if (thd->user_connect) + decrease_user_connections(thd->user_connect); + if (net->error && net->vio != 0 && net->report_error) + { + Security_context *sctx= thd->security_ctx; + if (!thd->killed && thd->variables.log_warnings > 1) + sql_print_warning(ER(ER_NEW_ABORTING_CONNECTION), + thd->thread_id,(thd->db ? thd->db : "unconnected"), + sctx->user ? sctx->user : "unauthenticated", + sctx->host_or_ip, + (net->last_errno ? ER(net->last_errno) : + ER(ER_UNKNOWN_ERROR))); + net_send_error(thd, net->last_errno, NullS); + statistic_increment(aborted_threads,&LOCK_status); + } + else if (thd->killed) + statistic_increment(aborted_threads,&LOCK_status); +} + + +/* + Initialize THD to handle queries +*/ + +void prepare_new_connection_state(THD* thd) +{ + Security_context *sctx= thd->security_ctx; + +#ifdef __NETWARE__ + netware_reg_user(sctx->ip, sctx->user, "MySQL"); +#endif + + if (thd->variables.max_join_size == HA_POS_ERROR) + thd->options |= OPTION_BIG_SELECTS; + if (thd->client_capabilities & CLIENT_COMPRESS) + thd->net.compress=1; // Use compression + + thd->version= refresh_version; + thd->proc_info= 0; + thd->command= COM_SLEEP; + thd->set_time(); + thd->init_for_queries(); + + if (sys_init_connect.value_length && !(sctx->master_access & SUPER_ACL)) + { + execute_init_command(thd, &sys_init_connect, &LOCK_sys_init_connect); + if (thd->query_error) + { + thd->killed= THD::KILL_CONNECTION; + sql_print_warning(ER(ER_NEW_ABORTING_CONNECTION), + thd->thread_id,(thd->db ? thd->db : "unconnected"), + sctx->user ? sctx->user : "unauthenticated", + sctx->host_or_ip, "init_connect command failed"); + sql_print_warning("%s", thd->net.last_error); + } + thd->proc_info=0; + thd->set_time(); + thd->init_for_queries(); + } +} + + +/* + Thread handler for a connection + + SYNOPSIS + handle_one_connection() + arg Connection object (THD) + + IMPLEMENTATION + This function (normally) does the following: + - Initialize thread + - Initialize THD to be used with this thread + - Authenticate user + - Execute all queries sent on the connection + - Take connection down + - End thread / Handle next connection using thread from thread cache +*/ + +pthread_handler_t handle_one_connection(void *arg) +{ + THD *thd= (THD*) arg; + uint launch_time = + (uint) ((thd->thr_create_time = time(NULL)) - thd->connect_time); + + if (thread_scheduler.init_new_connection_thread()) + { + close_connection(thd, ER_OUT_OF_RESOURCES, 1); + statistic_increment(aborted_connects,&LOCK_status); + thread_scheduler.end_thread(thd,0); + return 0; + } + if (launch_time >= slow_launch_time) + statistic_increment(slow_launch_threads,&LOCK_status); + + /* + handle_one_connection() is normally the only way a thread would + start and would always be on the very high end of the stack , + therefore, the thread stack always starts at the address of the + first local variable of handle_one_connection, which is thd. We + need to know the start of the stack so that we could check for + stack overruns. + */ + thd->thread_stack= (char*) &thd; + if (setup_connection_thread_globals(thd)) + return 0; + + for (;;) + { + NET *net= &thd->net; + + if (login_connection(thd)) + goto end_thread; + + prepare_new_connection_state(thd); + + while (!net->error && net->vio != 0 && + !(thd->killed == THD::KILL_CONNECTION)) + { + net->no_send_error= 0; + if (do_command(thd)) + break; + } + end_connection(thd); + +end_thread: + close_connection(thd, 0, 1); + if (thread_scheduler.end_thread(thd,1)) + return 0; // Probably no-threads + + /* + If end_thread() returns, we are either running with + thread-handler=no-threads or this thread has been schedule to + handle the next connection. + */ + thd= current_thd; + thd->thread_stack= (char*) &thd; + } +} +#endif /* EMBEDDED_LIBRARY */ diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index ea8c0e2d83e..d300edd6e18 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -54,6 +54,27 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, if (mysql_prepare_delete(thd, table_list, &conds)) DBUG_RETURN(TRUE); + /* check ORDER BY even if it can be ignored */ + if (order && order->elements) + { + TABLE_LIST tables; + List<Item> fields; + List<Item> all_fields; + + bzero((char*) &tables,sizeof(tables)); + tables.table = table; + tables.alias = table_list->alias; + + if (select_lex->setup_ref_array(thd, order->elements) || + setup_order(thd, select_lex->ref_pointer_array, &tables, + fields, all_fields, (ORDER*) order->first)) + { + delete select; + free_underlaid_joins(thd, &thd->lex->select_lex); + DBUG_RETURN(TRUE); + } + } + const_cond= (!conds || conds->const_item()); safe_update=test(thd->options & OPTION_SAFE_UPDATES); if (safe_update && const_cond) @@ -116,7 +137,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, /* Update the table->file->stats.records number */ table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); - table->used_keys.clear_all(); + table->covering_keys.clear_all(); table->quick_keys.clear_all(); // Can't use 'only index' select=make_select(table, 0, 0, conds, 0, &error); if (error) @@ -155,23 +176,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, { uint length= 0; SORT_FIELD *sortorder; - TABLE_LIST tables; - List<Item> fields; - List<Item> all_fields; ha_rows examined_rows; - - bzero((char*) &tables,sizeof(tables)); - tables.table = table; - tables.alias = table_list->alias; - - if (select_lex->setup_ref_array(thd, order->elements) || - setup_order(thd, select_lex->ref_pointer_array, &tables, - fields, all_fields, (ORDER*) order->first)) - { - delete select; - free_underlaid_joins(thd, &thd->lex->select_lex); - DBUG_RETURN(TRUE); - } if ((!select || table->quick_keys.is_clear_all()) && limit != HA_POS_ERROR) usable_index= get_index_for_order(table, (ORDER*)(order->first), limit); @@ -377,6 +382,7 @@ bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds) Item *fake_conds= 0; SELECT_LEX *select_lex= &thd->lex->select_lex; DBUG_ENTER("mysql_prepare_delete"); + List<Item> all_fields; thd->lex->allow_sum_func= 0; if (setup_tables_and_check_access(thd, &thd->lex->select_lex.context, @@ -394,12 +400,17 @@ bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds) } { TABLE_LIST *duplicate; - if ((duplicate= unique_table(thd, table_list, table_list->next_global))) + if ((duplicate= unique_table(thd, table_list, table_list->next_global, 0))) { update_non_unique_table_error(table_list, "DELETE", duplicate); DBUG_RETURN(TRUE); } } + + if (select_lex->inner_refs_list.elements && + fix_inner_refs(thd, all_fields, select_lex, select_lex->ref_pointer_array)) + DBUG_RETURN(-1); + select_lex->fix_prepare_information(thd, conds, &fake_conds); DBUG_RETURN(FALSE); } @@ -486,7 +497,7 @@ bool mysql_multi_delete_prepare(THD *thd) { TABLE_LIST *duplicate; if ((duplicate= unique_table(thd, target_tbl->correspondent_table, - lex->query_tables))) + lex->query_tables, 0))) { update_non_unique_table_error(target_tbl->correspondent_table, "DELETE", duplicate); @@ -547,7 +558,7 @@ multi_delete::initialize_tables(JOIN *join) tbl->no_keyread=1; /* Don't use record cache */ tbl->no_cache= 1; - tbl->used_keys.clear_all(); + tbl->covering_keys.clear_all(); if (tbl->file->has_transactions()) transactional_tables= 1; else diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 5712fbceac8..89bd7958c86 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -179,7 +179,7 @@ exit: orig_table_list->table_name= table->s->table_name.str; orig_table_list->table_name_length= table->s->table_name.length; table->derived_select_number= first_select->select_number; - table->s->tmp_table= TMP_TABLE; + table->s->tmp_table= NON_TRANSACTIONAL_TMP_TABLE; #ifndef NO_EMBEDDED_ACCESS_CHECKS if (orig_table_list->referencing_view) table->grant= orig_table_list->grant; diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 91e61be0478..cd87330cedb 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -515,7 +515,8 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables, } List_iterator<Item> it_ke(*key_expr); Item *item; - for (key_len=0 ; (item=it_ke++) ; key_part++) + key_part_map keypart_map; + for (keypart_map= key_len=0 ; (item=it_ke++) ; key_part++) { my_bitmap_map *old_map; // 'item' can be changed by fix_fields() call @@ -532,6 +533,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables, (void) item->save_in_field(key_part->field, 1); dbug_tmp_restore_column_map(table->write_set, old_map); key_len+=key_part->store_length; + keypart_map= (keypart_map << 1) | 1; } if (!(key= (byte*) thd->calloc(ALIGN_SIZE(key_len)))) @@ -540,7 +542,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables, table->file->ha_index_init(keyno, 1); key_copy(key, table->record[0], table->key_info + keyno, key_len); error= table->file->index_read(table->record[0], - key,key_len,ha_rkey_mode); + key, keypart_map, ha_rkey_mode); mode=rkey_to_rnext[(int)ha_rkey_mode]; break; } diff --git a/sql/sql_help.cc b/sql/sql_help.cc index 7b7f7602163..79d658c2a85 100644 --- a/sql/sql_help.cc +++ b/sql/sql_help.cc @@ -295,8 +295,7 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations, rkey_id->store((longlong) key_id, TRUE); rkey_id->get_key_image(buff, rkey_id->pack_length(), Field::itRAW); int key_res= relations->file->index_read(relations->record[0], - (byte *) buff, - rkey_id->pack_length(), + (byte *) buff, (key_part_map)1, HA_READ_KEY_EXACT); for ( ; @@ -310,7 +309,7 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations, field->get_key_image(topic_id_buff, field->pack_length(), Field::itRAW); if (!topics->file->index_read(topics->record[0], (byte *)topic_id_buff, - field->pack_length(), HA_READ_KEY_EXACT)) + (key_part_map)1, HA_READ_KEY_EXACT)) { memorize_variant_topic(thd,topics,count,find_fields, names,name,description,example); @@ -567,7 +566,7 @@ SQL_SELECT *prepare_simple_select(THD *thd, Item *cond, cond->fix_fields(thd, &cond); // can never fail /* Assume that no indexes cover all required fields */ - table->used_keys.clear_all(); + table->covering_keys.clear_all(); SQL_SELECT *res= make_select(table, 0, 0, cond, 0, error); if (*error || (res && res->check_quick(thd, 0, HA_POS_ERROR)) || @@ -655,13 +654,16 @@ bool mysqld_help(THD *thd, const char *mask) tables[3].lock_type= TL_READ; tables[0].db= tables[1].db= tables[2].db= tables[3].db= (char*) "mysql"; - if (open_and_lock_tables(thd, tables)) - goto error; + Open_tables_state open_tables_state_backup; + if (open_system_tables_for_read(thd, tables, &open_tables_state_backup)) + goto error2; /* Init tables and fields to be usable from items tables do not contain VIEWs => we can pass 0 as conds */ + thd->lex->select_lex.context.table_list= + thd->lex->select_lex.context.first_name_resolution_table= &tables[0]; if (setup_tables(thd, &thd->lex->select_lex.context, &thd->lex->select_lex.top_join_list, tables, &leaves, FALSE)) @@ -779,8 +781,13 @@ bool mysqld_help(THD *thd, const char *mask) } send_eof(thd); + close_system_tables(thd, &open_tables_state_backup); DBUG_RETURN(FALSE); + error: + close_system_tables(thd, &open_tables_state_backup); + +error2: DBUG_RETURN(TRUE); } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index aaffa09b978..c0e0203ed86 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -59,6 +59,7 @@ #include "sql_trigger.h" #include "sql_select.h" #include "sql_show.h" +#include "slave.h" #ifndef EMBEDDED_LIBRARY static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list); @@ -363,6 +364,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, Name_resolution_context_state ctx_state; #ifndef EMBEDDED_LIBRARY char *query= thd->query; +#endif /* log_on is about delayed inserts only. By default, both logs are enabled (this won't cause problems if the server @@ -370,7 +372,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, */ bool log_on= ((thd->options & OPTION_BIN_LOG) || (!(thd->security_ctx->master_access & SUPER_ACL))); -#endif thr_lock_type lock_type = table_list->lock_type; Item *unused_conds= 0; DBUG_ENTER("mysql_insert"); @@ -391,11 +392,42 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, (duplic == DUP_UPDATE)) lock_type=TL_WRITE; #endif + if ((lock_type == TL_WRITE_DELAYED) && + (global_system_variables.binlog_format == BINLOG_FORMAT_STMT) && + log_on && mysql_bin_log.is_open() && + (values_list.elements > 1)) + { + /* + Statement-based binary logging does not work in this case, because: + a) two concurrent statements may have their rows intermixed in the + queue, leading to autoincrement replication problems on slave (because + the values generated used for one statement don't depend only on the + value generated for the first row of this statement, so are not + replicable) + b) if first row of the statement has an error the full statement is + not binlogged, while next rows of the statement may be inserted. + c) if first row succeeds, statement is binlogged immediately with a + zero error code (i.e. "no error"), if then second row fails, query + will fail on slave too and slave will stop (wrongly believing that the + master got no error). + So we fallback to non-delayed INSERT. + Note that to be fully correct, we should test the "binlog format which + the delayed thread is going to use for this row". But in the common case + where the global binlog format is not changed and the session binlog + format may be changed, that is equal to the global binlog format. + We test it without mutex for speed reasons (condition rarely true), and + in the common case (global not changed) it is as good as without mutex; + if global value is changed, anyway there is uncertainty as the delayed + thread may be old and use the before-the-change value. + */ + lock_type= TL_WRITE; + } table_list->lock_type= lock_type; #ifndef EMBEDDED_LIBRARY if (lock_type == TL_WRITE_DELAYED) { + res= 1; if (thd->locked_tables) { DBUG_ASSERT(table_list->db); /* Must be set in the parser */ @@ -436,10 +468,15 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, thd->proc_info="init"; thd->used_tables=0; values= its++; + value_count= values->elements; if (mysql_prepare_insert(thd, table_list, table, fields, values, update_fields, update_values, duplic, &unused_conds, - FALSE)) + FALSE, + (fields.elements || !value_count), + !ignore && (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES)))) goto abort; /* mysql_prepare_insert set table_list->table if it was not set */ @@ -465,7 +502,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, table_list->next_local= 0; context->resolve_in_table_list_only(table_list); - value_count= values->elements; while ((values= its++)) { counter++; @@ -504,6 +540,14 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, thd->cuted_fields = 0L; table->next_number_field=table->found_next_number_field; +#ifdef HAVE_REPLICATION + if (thd->slave_thread && + (info.handle_duplicates == DUP_UPDATE) && + (table->next_number_field != NULL) && + rpl_master_has_bug(&active_mi->rli, 24432)) + goto abort; +#endif + error=0; thd->proc_info="update"; if (duplic != DUP_ERROR || ignore) @@ -527,17 +571,9 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, table->file->ha_start_bulk_insert(values_list.elements); thd->no_trans_update= 0; - thd->abort_on_warning= (!ignore && - (thd->variables.sql_mode & - (MODE_STRICT_TRANS_TABLES | - MODE_STRICT_ALL_TABLES))); - - if ((fields.elements || !value_count) && - check_that_all_fields_are_given_values(thd, table, table_list)) - { - /* thd->net.report_error is now set, which will abort the next loop */ - error= 1; - } + thd->abort_on_warning= (!ignore && (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))); table->mark_columns_needed_for_insert(); @@ -717,6 +753,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, table->next_number_field->val_int() : 0)); table->next_number_field=0; thd->count_cuted_fields= CHECK_FIELD_IGNORE; + table->auto_increment_field_not_null= FALSE; if (duplic != DUP_ERROR || ignore) table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); if (duplic == DUP_REPLACE && @@ -914,6 +951,10 @@ static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list, be taken from table_list->table) where Where clause (for insert ... select) select_insert TRUE if INSERT ... SELECT statement + check_fields TRUE if need to check that all INSERT fields are + given values. + abort_on_warning whether to report if some INSERT field is not + assigned as an error (TRUE) or as a warning (FALSE). TODO (in far future) In cases of: @@ -934,7 +975,8 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE *table, List<Item> &fields, List_item *values, List<Item> &update_fields, List<Item> &update_values, enum_duplicates duplic, - COND **where, bool select_insert) + COND **where, bool select_insert, + bool check_fields, bool abort_on_warning) { SELECT_LEX *select_lex= &thd->lex->select_lex; Name_resolution_context *context= &select_lex->context; @@ -946,6 +988,8 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, DBUG_PRINT("enter", ("table_list 0x%lx, table 0x%lx, view %d", (ulong)table_list, (ulong)table, (int)insert_into_view)); + /* INSERT should have a SELECT or VALUES clause */ + DBUG_ASSERT (!select_insert || !values); /* For subqueries in VALUES() we should not see the table in which we are @@ -977,44 +1021,52 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, if (mysql_prepare_insert_check_table(thd, table_list, fields, select_insert)) DBUG_RETURN(TRUE); - /* Save the state of the current name resolution context. */ - ctx_state.save_state(context, table_list); - - /* - Perform name resolution only in the first table - 'table_list', - which is the table that is inserted into. - */ - table_list->next_local= 0; - context->resolve_in_table_list_only(table_list); /* Prepare the fields in the statement. */ - if (values && - !(res= check_insert_fields(thd, context->table_list, fields, *values, - !insert_into_view, &map) || - setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0)) && - duplic == DUP_UPDATE) + if (values) { - select_lex->no_wrap_view_item= TRUE; - res= check_update_fields(thd, context->table_list, update_fields, &map); - select_lex->no_wrap_view_item= FALSE; + /* if we have INSERT ... VALUES () we cannot have a GROUP BY clause */ + DBUG_ASSERT (!select_lex->group_list.elements); + + /* Save the state of the current name resolution context. */ + ctx_state.save_state(context, table_list); + /* - When we are not using GROUP BY we can refer to other tables in the - ON DUPLICATE KEY part. - */ - if (select_lex->group_list.elements == 0) + Perform name resolution only in the first table - 'table_list', + which is the table that is inserted into. + */ + table_list->next_local= 0; + context->resolve_in_table_list_only(table_list); + + res= check_insert_fields(thd, context->table_list, fields, *values, + !insert_into_view, &map) || + setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0); + + if (!res && check_fields) { - context->table_list->next_local= ctx_state.save_next_local; - /* first_name_resolution_table was set by resolve_in_table_list_only() */ - context->first_name_resolution_table-> - next_name_resolution_table= ctx_state.save_next_local; + bool saved_abort_on_warning= thd->abort_on_warning; + thd->abort_on_warning= abort_on_warning; + res= check_that_all_fields_are_given_values(thd, + table ? table : + context->table_list->table, + context->table_list); + thd->abort_on_warning= saved_abort_on_warning; } + + if (!res && duplic == DUP_UPDATE) + { + select_lex->no_wrap_view_item= TRUE; + res= check_update_fields(thd, context->table_list, update_fields, &map); + select_lex->no_wrap_view_item= FALSE; + } + + /* Restore the current context. */ + ctx_state.restore_state(context, table_list); + if (!res) res= setup_fields(thd, 0, update_values, MARK_COLUMNS_READ, 0, 0); } - /* Restore the current context. */ - ctx_state.restore_state(context, table_list); - if (res) DBUG_RETURN(res); @@ -1025,7 +1077,7 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, { Item *fake_conds= 0; TABLE_LIST *duplicate; - if ((duplicate= unique_table(thd, table_list, table_list->next_global))) + if ((duplicate= unique_table(thd, table_list, table_list->next_global, 1))) { update_non_unique_table_error(table_list, "INSERT", duplicate); DBUG_RETURN(TRUE); @@ -1163,9 +1215,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) } key_copy((byte*) key,table->record[0],table->key_info+key_nr,0); if ((error=(table->file->index_read_idx(table->record[1],key_nr, - (byte*) key, - table->key_info[key_nr]. - key_length, + (byte*) key, HA_WHOLE_KEY, HA_READ_KEY_EXACT)))) goto err; } @@ -1196,32 +1246,38 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) if (res == VIEW_CHECK_ERROR) goto before_trg_err; + table->file->restore_auto_increment(prev_insert_id); if ((error=table->file->ha_update_row(table->record[1], table->record[0]))) { if (info->ignore && !table->file->is_fatal_error(error, HA_CHECK_DUP_KEY)) { - table->file->restore_auto_increment(prev_insert_id); goto ok_or_after_trg_err; } goto err; - } - info->updated++; - /* - If ON DUP KEY UPDATE updates a row instead of inserting one, it's - like a regular UPDATE statement: it should not affect the value of a - next SELECT LAST_INSERT_ID() or mysql_insert_id(). - Except if LAST_INSERT_ID(#) was in the INSERT query, which is - handled separately by THD::arg_of_last_insert_id_function. - */ - insert_id_for_cur_row= table->file->insert_id_for_cur_row= 0; - if (table->next_number_field) - table->file->adjust_next_insert_id_after_explicit_value(table->next_number_field->val_int()); - trg_error= (table->triggers && - table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, - TRG_ACTION_AFTER, TRUE)); - info->copied++; + } + if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) || + compare_record(table)) + { + info->updated++; + /* + If ON DUP KEY UPDATE updates a row instead of inserting one, it's + like a regular UPDATE statement: it should not affect the value of a + next SELECT LAST_INSERT_ID() or mysql_insert_id(). + Except if LAST_INSERT_ID(#) was in the INSERT query, which is + handled separately by THD::arg_of_last_insert_id_function. + */ + insert_id_for_cur_row= table->file->insert_id_for_cur_row= 0; + if (table->next_number_field) + table->file->adjust_next_insert_id_after_explicit_value( + table->next_number_field->val_int()); + trg_error= (table->triggers && + table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, + TRG_ACTION_AFTER, TRUE)); + info->copied++; + } + goto ok_or_after_trg_err; } else /* DUP_REPLACE */ @@ -1892,7 +1948,7 @@ pthread_handler_t handle_delayed_insert(void *arg) pthread_detach_this_thread(); /* Add thread to THD list so that's it's visible in 'show processlist' */ pthread_mutex_lock(&LOCK_thread_count); - thd->thread_id=thread_id++; + thd->thread_id= thd->variables.pseudo_thread_id= thread_id++; thd->end_time(); threads.append(thd); thd->killed=abort_loop ? THD::KILL_CONNECTION : THD::NOT_KILLED; @@ -1923,14 +1979,8 @@ pthread_handler_t handle_delayed_insert(void *arg) strmov(thd->net.last_error,ER(thd->net.last_errno=ER_OUT_OF_RESOURCES)); goto err; } -#if !defined(__WIN__) && !defined(__NETWARE__) - sigset_t set; - VOID(sigemptyset(&set)); // Get mask in use - VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals)); -#endif /* open table */ - if (!(di->table=open_ltable(thd,&di->table_list,TL_WRITE_DELAYED))) { thd->fatal_error(); // Abort waiting inserts @@ -2407,7 +2457,7 @@ bool mysql_insert_select_prepare(THD *thd) lex->query_tables->table, lex->field_list, 0, lex->update_list, lex->value_list, lex->duplicates, - &select_lex->where, TRUE)) + &select_lex->where, TRUE, FALSE, FALSE)) DBUG_RETURN(TRUE); /* @@ -2470,9 +2520,19 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) !insert_into_view, &map) || setup_fields(thd, 0, values, MARK_COLUMNS_READ, 0, 0); - if (info.handle_duplicates == DUP_UPDATE) + if (!res && fields->elements) + { + bool saved_abort_on_warning= thd->abort_on_warning; + thd->abort_on_warning= !info.ignore && (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES)); + res= check_that_all_fields_are_given_values(thd, table_list->table, + table_list); + thd->abort_on_warning= saved_abort_on_warning; + } + + if (info.handle_duplicates == DUP_UPDATE && !res) { - /* Save the state of the current name resolution context. */ Name_resolution_context *context= &lex->select_lex.context; Name_resolution_context_state ctx_state; @@ -2488,18 +2548,40 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) *info.update_fields, &map); lex->select_lex.no_wrap_view_item= FALSE; /* - When we are not using GROUP BY we can refer to other tables in the - ON DUPLICATE KEY part - */ - if (lex->select_lex.group_list.elements == 0) + When we are not using GROUP BY and there are no ungrouped aggregate functions + we can refer to other tables in the ON DUPLICATE KEY part. + We use next_name_resolution_table descructively, so check it first (views?) + */ + DBUG_ASSERT (!table_list->next_name_resolution_table); + if (lex->select_lex.group_list.elements == 0 && + !lex->select_lex.with_sum_func) + /* + We must make a single context out of the two separate name resolution contexts : + the INSERT table and the tables in the SELECT part of INSERT ... SELECT. + To do that we must concatenate the two lists + */ + table_list->next_name_resolution_table= + ctx_state.get_first_name_resolution_table(); + + res= res || setup_fields(thd, 0, *info.update_values, + MARK_COLUMNS_READ, 0, 0); + if (!res) { - context->table_list->next_local= ctx_state.save_next_local; - /* first_name_resolution_table was set by resolve_in_table_list_only() */ - context->first_name_resolution_table-> - next_name_resolution_table= ctx_state.save_next_local; + /* + Traverse the update values list and substitute fields from the + select for references (Item_ref objects) to them. This is done in + order to get correct values from those fields when the select + employs a temporary table. + */ + List_iterator<Item> li(*info.update_values); + Item *item; + + while ((item= li++)) + { + item->transform(&Item::update_value_transformer, + (byte*)lex->current_select); + } } - res= res || setup_fields(thd, 0, *info.update_values, MARK_COLUMNS_READ, - 0, 0); /* Restore the current context. */ ctx_state.restore_state(context, table_list); @@ -2519,7 +2601,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) query */ if (!(lex->current_select->options & OPTION_BUFFER_RESULT) && - unique_table(thd, table_list, table_list->next_global)) + unique_table(thd, table_list, table_list->next_global, 0)) { /* Using same table for INSERT and SELECT */ lex->current_select->options|= OPTION_BUFFER_RESULT; @@ -2540,6 +2622,15 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) } restore_record(table,s->default_values); // Get empty record table->next_number_field=table->found_next_number_field; + +#ifdef HAVE_REPLICATION + if (thd->slave_thread && + (info.handle_duplicates == DUP_UPDATE) && + (table->next_number_field != NULL) && + rpl_master_has_bug(&active_mi->rli, 24432)) + DBUG_RETURN(1); +#endif + thd->cuted_fields=0; if (info.ignore || info.handle_duplicates != DUP_ERROR) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); @@ -2551,9 +2642,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))); - res= ((fields->elements && - check_that_all_fields_are_given_values(thd, table, table_list)) || - table_list->prepare_where(thd, 0, TRUE) || + res= (table_list->prepare_where(thd, 0, TRUE) || table_list->prepare_check_option(thd)); if (!res) @@ -2601,6 +2690,7 @@ select_insert::~select_insert() if (table) { table->next_number_field=0; + table->auto_increment_field_not_null= FALSE; table->file->ha_reset(); } thd->count_cuted_fields= CHECK_FIELD_IGNORE; @@ -3143,8 +3233,7 @@ void select_create::send_error(uint errcode,const char *err) ("Current statement %s row-based", thd->current_stmt_binlog_row_based ? "is" : "is NOT")); DBUG_PRINT("info", - ("Current table (at 0x%lx) %s a temporary (or non-existing) " - "table", + ("Current table (at 0x%lu) %s a temporary (or non-existant) table", (ulong) table, table && !table->s->tmp_table ? "is NOT" : "is")); DBUG_PRINT("info", diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 35db3e40930..26955c18342 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -33,10 +33,10 @@ sys_var *trg_new_row_fake_var= (sys_var*) 0x01; /* Macros to look like lex */ -#define yyGet() *(lex->ptr++) -#define yyGetLast() lex->ptr[-1] -#define yyPeek() lex->ptr[0] -#define yyPeek2() lex->ptr[1] +#define yyGet() ((uchar) *(lex->ptr++)) +#define yyGetLast() ((uchar) lex->ptr[-1]) +#define yyPeek() ((uchar) lex->ptr[0]) +#define yyPeek2() ((uchar) lex->ptr[1]) #define yyUnget() lex->ptr-- #define yySkip() lex->ptr++ #define yyLength() ((uint) (lex->ptr - lex->tok_start)-1) @@ -69,6 +69,17 @@ static uchar to_upper_lex[]= 208,209,210,211,212,213,214,247,216,217,218,219,220,221,222,255 }; +/* + Names of the index hints (for error messages). Keep in sync with + index_hint_type +*/ + +const char * index_hint_type_name[] = +{ + "IGNORE INDEX", + "USE INDEX", + "FORCE INDEX" +}; inline int lex_casecmp(const char *s, const char *t, uint len) { @@ -100,13 +111,23 @@ void lex_free(void) } +void +st_parsing_options::reset() +{ + allows_variable= TRUE; + allows_select_into= TRUE; + allows_select_procedure= TRUE; + allows_derived= TRUE; +} + + /* This is called before every query that is to be parsed. Because of this, it's critical to not do too much things here. (We already do too much here) */ -void lex_start(THD *thd, const uchar *buf, uint length) +void lex_start(THD *thd, const char *buf, uint length) { LEX *lex= thd->lex; DBUG_ENTER("lex_start"); @@ -148,8 +169,8 @@ void lex_start(THD *thd, const uchar *buf, uint length) lex->lock_option= TL_READ; lex->found_semicolon= 0; lex->safe_to_cache_query= 1; - lex->time_zone_tables_used= 0; lex->leaf_tables_insert= 0; + lex->parsing_options.reset(); lex->empty_field_list_on_rset= 0; lex->select_lex.select_number= 1; lex->next_state=MY_LEX_START; @@ -217,9 +238,9 @@ void lex_end(LEX *lex) static int find_keyword(LEX *lex, uint len, bool function) { - const uchar *tok=lex->tok_start; + const char *tok= lex->tok_start; - SYMBOL *symbol= get_hash_symbol((const char *)tok,len,function); + SYMBOL *symbol= get_hash_symbol(tok, len, function); if (symbol) { lex->yylval->symbol.symbol=symbol; @@ -284,16 +305,16 @@ static LEX_STRING get_token(LEX *lex,uint length) static LEX_STRING get_quoted_token(LEX *lex,uint length, char quote) { LEX_STRING tmp; - const uchar *from, *end; - uchar *to; + const char *from, *end; + char *to; yyUnget(); // ptr points now after last token char tmp.length=lex->yytoklen=length; tmp.str=(char*) lex->thd->alloc(tmp.length+1); - for (from= lex->tok_start, to= (uchar*) tmp.str, end= to+length ; + for (from= lex->tok_start, to= tmp.str, end= to+length ; to != end ; ) { - if ((*to++= *from++) == (uchar) quote) + if ((*to++= *from++) == quote) from++; // Skip double quotes } *to= 0; // End null for safety @@ -320,9 +341,7 @@ static char *get_text(LEX *lex) { int l; if (use_mb(cs) && - (l = my_ismbchar(cs, - (const char *)lex->ptr-1, - (const char *)lex->end_of_query))) { + (l = my_ismbchar(cs, lex->ptr-1, lex->end_of_query))) { lex->ptr += l-1; continue; } @@ -347,12 +366,12 @@ static char *get_text(LEX *lex) yyUnget(); /* Found end. Unescape and return string */ - const uchar *str, *end; - uchar *start; + const char *str, *end; + char *start; str=lex->tok_start+1; end=lex->ptr-1; - if (!(start=(uchar*) lex->thd->alloc((uint) (end-str)+1))) + if (!(start= (char*) lex->thd->alloc((uint) (end-str)+1))) return (char*) ""; // Sql_alloc has set error flag if (!found_escape) { @@ -362,15 +381,14 @@ static char *get_text(LEX *lex) } else { - uchar *to; + char *to; for (to=start ; str != end ; str++) { #ifdef USE_MB int l; if (use_mb(cs) && - (l = my_ismbchar(cs, - (const char *)str, (const char *)end))) { + (l = my_ismbchar(cs, str, end))) { while (l--) *to++ = *str++; str--; @@ -416,7 +434,7 @@ static char *get_text(LEX *lex) *to=0; lex->yytoklen=(uint) (to-start); } - return (char*) start; + return start; } } return 0; // unexpected end of query @@ -535,7 +553,6 @@ int MYSQLlex(void *arg, void *yythd) lex->yylval=yylval; // The global state - lex->tok_end_prev= lex->tok_end; lex->tok_start_prev= lex->tok_start; lex->tok_start=lex->tok_end=lex->ptr; @@ -619,16 +636,14 @@ int MYSQLlex(void *arg, void *yythd) break; } case MY_LEX_IDENT: - const uchar *start; + const char *start; #if defined(USE_MB) && defined(USE_MB_IDENT) if (use_mb(cs)) { result_state= IDENT_QUOTED; if (my_mbcharlen(cs, yyGetLast()) > 1) { - int l = my_ismbchar(cs, - (const char *)lex->ptr-1, - (const char *)lex->end_of_query); + int l = my_ismbchar(cs, lex->ptr-1, lex->end_of_query); if (l == 0) { state = MY_LEX_CHAR; continue; @@ -640,9 +655,7 @@ int MYSQLlex(void *arg, void *yythd) if (my_mbcharlen(cs, c) > 1) { int l; - if ((l = my_ismbchar(cs, - (const char *)lex->ptr-1, - (const char *)lex->end_of_query)) == 0) + if ((l = my_ismbchar(cs, lex->ptr-1, lex->end_of_query)) == 0) break; lex->ptr += l-1; } @@ -765,9 +778,7 @@ int MYSQLlex(void *arg, void *yythd) if (my_mbcharlen(cs, c) > 1) { int l; - if ((l = my_ismbchar(cs, - (const char *)lex->ptr-1, - (const char *)lex->end_of_query)) == 0) + if ((l = my_ismbchar(cs, lex->ptr-1, lex->end_of_query)) == 0) break; lex->ptr += l-1; } @@ -1101,7 +1112,7 @@ int MYSQLlex(void *arg, void *yythd) Pointer to the last non-comment symbol of the statement. */ -const uchar *skip_rear_comments(const uchar *begin, const uchar *end) +const char *skip_rear_comments(const char *begin, const char *end) { while (begin < end && (end[-1] <= ' ' || end[-1] == '*' || end[-1] == '/' || end[-1] == ';')) @@ -1172,6 +1183,7 @@ void st_select_lex::init_query() cond_count= between_count= with_wild= 0; conds_processed_with_permanent_arena= 0; ref_pointer_array= 0; + select_n_where_fields= 0; select_n_having_items= 0; subquery_in_having= explicit_limit= 0; is_item_list_lookup= 0; @@ -1189,7 +1201,6 @@ void st_select_lex::init_select() group_list.empty(); type= db= 0; having= 0; - use_index_ptr= ignore_index_ptr= 0; table_join_options= 0; in_sum_expr= with_wild= 0; options= 0; @@ -1197,7 +1208,6 @@ void st_select_lex::init_select() braces= 0; expr_list.empty(); interval_list.empty(); - use_index.empty(); ftfunc_list_alloc.empty(); inner_sum_func_list= 0; ftfunc_list= &ftfunc_list_alloc; @@ -1212,6 +1222,8 @@ void st_select_lex::init_select() is_correlated= 0; cur_pos_in_select_list= UNDEF_POS; non_agg_fields.empty(); + cond_value= having_value= Item::COND_UNDEF; + inner_refs_list.empty(); } /* @@ -1422,14 +1434,11 @@ bool st_select_lex_node::inc_in_sum_expr() { return 1; } uint st_select_lex_node::get_in_sum_expr() { return 0; } TABLE_LIST* st_select_lex_node::get_table_list() { return 0; } List<Item>* st_select_lex_node::get_item_list() { return 0; } -List<String>* st_select_lex_node::get_use_index() { return 0; } -List<String>* st_select_lex_node::get_ignore_index() { return 0; } -TABLE_LIST *st_select_lex_node::add_table_to_list(THD *thd, Table_ident *table, +TABLE_LIST *st_select_lex_node::add_table_to_list (THD *thd, Table_ident *table, LEX_STRING *alias, ulong table_join_options, thr_lock_type flags, - List<String> *use_index, - List<String> *ignore_index, + List<index_hint> *hints, LEX_STRING *option) { return 0; @@ -1536,19 +1545,6 @@ List<Item>* st_select_lex::get_item_list() return &item_list; } - -List<String>* st_select_lex::get_use_index() -{ - return use_index_ptr; -} - - -List<String>* st_select_lex::get_ignore_index() -{ - return ignore_index_ptr; -} - - ulong st_select_lex::get_table_join_options() { return table_join_options; @@ -1569,6 +1565,7 @@ bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num) (Item **)arena->alloc(sizeof(Item*) * (n_child_sum_items + item_list.elements + select_n_having_items + + select_n_where_fields + order_group_num)*5)) == 0; } @@ -1652,6 +1649,36 @@ void st_select_lex::print_limit(THD *thd, String *str) } } +/** + @brief Restore the LEX and THD in case of a parse error. + + This is a clean up call that is invoked by the Bison generated + parser before returning an error from MYSQLparse. If your + semantic actions manipulate with the global thread state (which + is a very bad practice and should not normally be employed) and + need a clean-up in case of error, and you can not use %destructor + rule in the grammar file itself, this function should be used + to implement the clean up. +*/ + +void st_lex::cleanup_lex_after_parse_error(THD *thd) +{ + /* + Delete sphead for the side effect of restoring of the original + LEX state, thd->lex, thd->mem_root and thd->free_list if they + were replaced when parsing stored procedure statements. We + will never use sphead object after a parse error, so it's okay + to delete it only for the sake of the side effect. + TODO: make this functionality explicit in sp_head class. + Sic: we must nullify the member of the main lex, not the + current one that will be thrown away + */ + if (thd->lex->sphead) + { + delete thd->lex->sphead; + thd->lex->sphead= NULL; + } +} /* Initialize (or reset) Query_tables_list object. @@ -1773,7 +1800,6 @@ bool st_lex::can_be_merged() } return (selects_allow_merge && - select_lex.order_list.elements == 0 && select_lex.group_list.elements == 0 && select_lex.having == 0 && select_lex.with_sum_func == 0 && @@ -2050,31 +2076,6 @@ void st_lex::first_lists_tables_same() /* - Add implicitly used time zone description tables to global table list - (if needed). - - SYNOPSYS - st_lex::add_time_zone_tables_to_query_tables() - thd - pointer to current thread context - - RETURN VALUE - TRUE - error - FALSE - success -*/ - -bool st_lex::add_time_zone_tables_to_query_tables(THD *thd_arg) -{ - /* We should not add these tables twice */ - if (!time_zone_tables_used) - { - time_zone_tables_used= my_tz_get_table_list(thd_arg, &query_tables_last); - if (time_zone_tables_used == &fake_time_zone_tables_list) - return TRUE; - } - return FALSE; -} - -/* Link table back that was unlinked with unlink_first_table() SYNOPSIS @@ -2143,7 +2144,6 @@ void st_lex::cleanup_after_one_table_open() /* remove underlying units (units of VIEW) subtree */ select_lex.cut_subtree(); } - time_zone_tables_used= 0; } @@ -2284,3 +2284,61 @@ void st_select_lex::fix_prepare_information(THD *thd, Item **conds, are in sql_union.cc */ +/* + Sets the kind of hints to be added by the calls to add_index_hint(). + + SYNOPSIS + set_index_hint_type() + type the kind of hints to be added from now on. + clause the clause to use for hints to be added from now on. + + DESCRIPTION + Used in filling up the tagged hints list. + This list is filled by first setting the kind of the hint as a + context variable and then adding hints of the current kind. + Then the context variable index_hint_type can be reset to the + next hint type. +*/ +void st_select_lex::set_index_hint_type(enum index_hint_type type, + index_clause_map clause) +{ + current_index_hint_type= type; + current_index_hint_clause= clause; +} + + +/* + Makes an array to store index usage hints (ADD/FORCE/IGNORE INDEX). + + SYNOPSIS + alloc_index_hints() + thd current thread. +*/ + +void st_select_lex::alloc_index_hints (THD *thd) +{ + index_hints= new (thd->mem_root) List<index_hint>(); +} + + + +/* + adds an element to the array storing index usage hints + (ADD/FORCE/IGNORE INDEX). + + SYNOPSIS + add_index_hint() + thd current thread. + str name of the index. + length number of characters in str. + + RETURN VALUE + 0 on success, non-zero otherwise +*/ +bool st_select_lex::add_index_hint (THD *thd, char *str, uint length) +{ + return index_hints->push_front (new (thd->mem_root) + index_hint(current_index_hint_type, + current_index_hint_clause, + str, length)); +} diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 821af3f946d..d8a5a0b04f0 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -217,6 +217,47 @@ enum tablespace_op_type }; /* + String names used to print a statement with index hints. + Keep in sync with index_hint_type. +*/ +extern const char * index_hint_type_name[]; +typedef byte index_clause_map; + +/* + Bits in index_clause_map : one for each possible FOR clause in + USE/FORCE/IGNORE INDEX index hint specification +*/ +#define INDEX_HINT_MASK_JOIN (1) +#define INDEX_HINT_MASK_GROUP (1 << 1) +#define INDEX_HINT_MASK_ORDER (1 << 2) + +#define INDEX_HINT_MASK_ALL (INDEX_HINT_MASK_JOIN | INDEX_HINT_MASK_GROUP | \ + INDEX_HINT_MASK_ORDER) + +/* Single element of an USE/FORCE/IGNORE INDEX list specified as a SQL hint */ +class index_hint : public Sql_alloc +{ +public: + /* The type of the hint : USE/FORCE/IGNORE */ + enum index_hint_type type; + /* Where the hit applies to. A bitmask of INDEX_HINT_MASK_<place> values */ + index_clause_map clause; + /* + The index name. Empty (str=NULL) name represents an empty list + USE INDEX () clause + */ + LEX_STRING key_name; + + index_hint (enum index_hint_type type_arg, index_clause_map clause_arg, + char *str, uint length) : + type(type_arg), clause(clause_arg) + { + key_name.str= str; + key_name.length= length; + } +}; + +/* The state of the lex parsing for selects master and slaves are pointers to select_lex. @@ -394,15 +435,12 @@ public: virtual uint get_in_sum_expr(); virtual TABLE_LIST* get_table_list(); virtual List<Item>* get_item_list(); - virtual List<String>* get_use_index(); - virtual List<String>* get_ignore_index(); virtual ulong get_table_join_options(); virtual TABLE_LIST *add_table_to_list(THD *thd, Table_ident *table, LEX_STRING *alias, ulong table_options, thr_lock_type flags= TL_UNLOCK, - List<String> *use_index= 0, - List<String> *ignore_index= 0, + List<index_hint> *hints= 0, LEX_STRING *option= 0); virtual void set_lock_for_tables(thr_lock_type lock_type) {} @@ -504,7 +542,7 @@ public: void set_limit(st_select_lex *values); void set_thd(THD *thd_arg) { thd= thd_arg; } - friend void lex_start(THD *thd, const uchar *buf, uint length); + friend void lex_start(THD *thd, const char *buf, uint length); friend int subselect_union_engine::exec(); List<Item> *get_unit_column_types(); @@ -523,6 +561,8 @@ public: Item *where, *having; /* WHERE & HAVING clauses */ Item *prep_where; /* saved WHERE clause for prepared statement processing */ Item *prep_having;/* saved HAVING clause for prepared statement processing */ + /* Saved values of the WHERE and HAVING clauses*/ + Item::cond_result cond_value, having_value; /* point on lex in which it was created, used in view subquery detection */ st_lex *parent_lex; enum olap_type olap; @@ -530,8 +570,7 @@ public: SQL_LIST table_list; SQL_LIST group_list; /* GROUP BY clause. */ List<Item> item_list; /* list of fields & expressions */ - List<String> interval_list, use_index, *use_index_ptr, - ignore_index, *ignore_index_ptr; + List<String> interval_list; bool is_item_list_lookup; /* Usualy it is pointer to ftfunc_list_alloc, but in union used to create fake @@ -566,6 +605,11 @@ public: uint select_n_having_items; uint cond_count; /* number of arguments of and/or/xor in where/having/on */ uint between_count; /* number of between predicates in where/having/on */ + /* + Number of fields used in select list or where clause of current select + and all inner subselects. + */ + uint select_n_where_fields; enum_parsing_place parsing_place; /* where we are parsing expression */ bool with_sum_func; /* sum function indicator */ /* @@ -583,7 +627,8 @@ public: bool braces; /* SELECT ... UNION (SELECT ... ) <- this braces */ /* TRUE when having fix field called in processing of this SELECT */ bool having_fix_field; - + /* List of references to fields referenced from inner selects */ + List<Item_outer_ref> inner_refs_list; /* Number of Item_sum-derived objects in this SELECT */ uint n_sum_items; /* Number of Item_sum-derived objects in children and descendant SELECTs */ @@ -672,8 +717,7 @@ public: LEX_STRING *alias, ulong table_options, thr_lock_type flags= TL_UNLOCK, - List<String> *use_index= 0, - List<String> *ignore_index= 0, + List<index_hint> *hints= 0, LEX_STRING *option= 0); TABLE_LIST* get_table_list(); bool init_nested_join(THD *thd); @@ -682,8 +726,6 @@ public: void add_joined_table(TABLE_LIST *table); TABLE_LIST *convert_right_join(); List<Item>* get_item_list(); - List<String>* get_use_index(); - List<String>* get_ignore_index(); ulong get_table_join_options(); void set_lock_for_tables(thr_lock_type lock_type); inline void init_order() @@ -701,7 +743,7 @@ public: void cut_subtree() { slave= 0; } bool test_limit(); - friend void lex_start(THD *thd, const uchar *buf, uint length); + friend void lex_start(THD *thd, const char *buf, uint length); st_select_lex() : n_sum_items(0), n_child_sum_items(0) {} void make_empty_select() { @@ -723,6 +765,33 @@ public: select lexes. */ void cleanup_all_joins(bool full); + + void set_index_hint_type(enum index_hint_type type, index_clause_map clause); + + /* + Add a index hint to the tagged list of hints. The type and clause of the + hint will be the current ones (set by set_index_hint()) + */ + bool add_index_hint (THD *thd, char *str, uint length); + + /* make a list to hold index hints */ + void alloc_index_hints (THD *thd); + /* read and clear the index hints */ + List<index_hint>* pop_index_hints(void) + { + List<index_hint> *hints= index_hints; + index_hints= NULL; + return hints; + } + + void clear_index_hints(void) { index_hints= NULL; } + +private: + /* current index hint kind. used in filling up index_hints */ + enum index_hint_type current_index_hint_type; + index_clause_map current_index_hint_clause; + /* a list of USE/FORCE/IGNORE INDEX */ + List<index_hint> *index_hints; }; typedef class st_select_lex SELECT_LEX; @@ -910,10 +979,8 @@ struct st_parsing_options bool allows_select_procedure; bool allows_derived; - st_parsing_options() - : allows_variable(TRUE), allows_select_into(TRUE), - allows_select_procedure(TRUE), allows_derived(TRUE) - {} + st_parsing_options() { reset(); } + void reset(); }; @@ -929,11 +996,11 @@ typedef struct st_lex : public Query_tables_list SELECT_LEX *current_select; /* list of all SELECT_LEX */ SELECT_LEX *all_selects_list; - const uchar *buf; /* The beginning of string, used by SPs */ - const uchar *ptr,*tok_start,*tok_end,*end_of_query; + const char *buf; /* The beginning of string, used by SPs */ + const char *ptr,*tok_start,*tok_end,*end_of_query; - /* The values of tok_start/tok_end as they were one call of MYSQLlex before */ - const uchar *tok_start_prev, *tok_end_prev; + /* The value of tok_start as they were one call of MYSQLlex before */ + const char *tok_start_prev; char *length,*dec,*change; LEX_STRING name; @@ -1088,11 +1155,6 @@ typedef struct st_lex : public Query_tables_list bool prepared_stmt_code_is_varref; /* Names of user variables holding parameters (in EXECUTE) */ List<LEX_STRING> prepared_stmt_params; - /* - Points to part of global table list which contains time zone tables - implicitly used by the statement. - */ - TABLE_LIST *time_zone_tables_used; sp_head *sphead; sp_name *spname; bool sp_lex_in_use; /* Keep track on lex usage in SPs for error handling */ @@ -1140,7 +1202,7 @@ typedef struct st_lex : public Query_tables_list Pointers to part of LOAD DATA statement that should be rewritten during replication ("LOCAL 'filename' REPLACE INTO" part). */ - const uchar *fname_start, *fname_end; + const char *fname_start, *fname_end; /* Reference to a struct that contains information in various commands @@ -1179,7 +1241,6 @@ typedef struct st_lex : public Query_tables_list TABLE_LIST *unlink_first_table(bool *link_to_local); void link_first_table_back(TABLE_LIST *first, bool link_to_local); void first_lists_tables_same(); - bool add_time_zone_tables_to_query_tables(THD *thd); bool can_be_merged(); bool can_use_merged(); @@ -1229,6 +1290,10 @@ typedef struct st_lex : public Query_tables_list { return context_stack.head(); } + /* + Restore the LEX and THD in case of a parse error. + */ + static void cleanup_lex_after_parse_error(THD *thd); void reset_n_backup_query_tables_list(Query_tables_list *backup); void restore_backup_query_tables_list(Query_tables_list *backup); @@ -1254,10 +1319,10 @@ struct st_lex_local: public st_lex extern void lex_init(void); extern void lex_free(void); -extern void lex_start(THD *thd, const uchar *buf, uint length); +extern void lex_start(THD *thd, const char *buf, uint length); extern void lex_end(LEX *lex); extern int MYSQLlex(void *arg, void *yythd); -extern const uchar *skip_rear_comments(const uchar *ubegin, const uchar *uend); +extern const char *skip_rear_comments(const char *ubegin, const char *uend); extern bool is_lex_native_function(const LEX_STRING *name); diff --git a/sql/sql_list.h b/sql/sql_list.h index d16fbaf2e50..ba61a931e04 100644 --- a/sql/sql_list.h +++ b/sql/sql_list.h @@ -23,7 +23,7 @@ class Sql_alloc { public: - static void *operator new(size_t size) + static void *operator new(size_t size) throw () { return (void*) sql_alloc((uint) size); } @@ -31,13 +31,15 @@ public: { return (void*) sql_alloc((uint) size); } - static void *operator new[](size_t size, MEM_ROOT *mem_root) + static void *operator new[](size_t size, MEM_ROOT *mem_root) throw () { return (void*) alloc_root(mem_root, (uint) size); } - static void *operator new(size_t size, MEM_ROOT *mem_root) + static void *operator new(size_t size, MEM_ROOT *mem_root) throw () { return (void*) alloc_root(mem_root, (uint) size); } static void operator delete(void *ptr, size_t size) { TRASH(ptr, size); } static void operator delete(void *ptr, MEM_ROOT *mem_root) { /* never called */ } + static void operator delete[](void *ptr, MEM_ROOT *mem_root) + { /* never called */ } static void operator delete[](void *ptr, size_t size) { TRASH(ptr, size); } #ifdef HAVE_purify bool dummy; diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 364fda2c94b..7d2c2281bba 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -175,7 +175,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, table is marked to be 'used for insert' in which case we should never mark this table as 'const table' (ie, one that has only one row). */ - if (unique_table(thd, table_list, table_list->next_global)) + if (unique_table(thd, table_list, table_list->next_global, 0)) { my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name); DBUG_RETURN(TRUE); @@ -305,6 +305,15 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, if ((stat_info.st_mode & S_IFIFO) == S_IFIFO) is_fifo = 1; #endif + + if (opt_secure_file_priv && + strncmp(opt_secure_file_priv, name, strlen(opt_secure_file_priv))) + { + /* Read only allowed from within dir specified by secure_file_priv */ + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--secure-file-priv"); + DBUG_RETURN(TRUE); + } + } if ((file=my_open(name,O_RDONLY,MYF(MY_WME))) < 0) DBUG_RETURN(TRUE); @@ -316,7 +325,8 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, info.handle_duplicates=handle_duplicates; info.escape_char=escaped->length() ? (*escaped)[0] : INT_MAX; - READ_INFO read_info(file,tot_length,thd->variables.collation_database, + READ_INFO read_info(file,tot_length, + ex->cs ? ex->cs : thd->variables.collation_database, *field_term,*ex->line_start, *ex->line_term, *enclosed, info.escape_char, read_file_from_client, is_fifo); if (read_info.error) @@ -458,7 +468,6 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, } sprintf(name, ER(ER_LOAD_INFO), (ulong) info.records, (ulong) info.deleted, (ulong) (info.records - info.copied), (ulong) thd->cuted_fields); - send_ok(thd,info.copied+info.deleted,0L,name); if (!transactional_table) thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; @@ -494,6 +503,8 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, if (transactional_table) error=ha_autocommit_or_rollback(thd,error); + /* ok to client sent only after binlog write and engine commit */ + send_ok(thd, info.copied + info.deleted, 0L, name); err: table->file->ha_release_auto_increment(); if (thd->lock) @@ -501,6 +512,7 @@ err: mysql_unlock_tables(thd, thd->lock); thd->lock=0; } + table->auto_increment_field_not_null= FALSE; thd->abort_on_warning= 0; DBUG_RETURN(error); } @@ -598,8 +610,6 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, { uint length; byte save_chr; - if (field == table->next_number_field) - table->auto_increment_field_not_null= TRUE; if ((length=(uint) (read_info.row_end-pos)) > field->field_length) length=field->field_length; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index b45859c4e28..e59c675a678 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -27,24 +27,7 @@ #include "sp_cache.h" #include "events.h" #include "event_data_objects.h" - -#ifdef HAVE_OPENSSL -/* - Without SSL the handshake consists of one packet. This packet - has both client capabilites and scrambled password. - With SSL the handshake might consist of two packets. If the first - packet (client capabilities) has CLIENT_SSL flag set, we have to - switch to SSL and read the second packet. The scrambled password - is in the second packet and client_capabilites field will be ignored. - Maybe it is better to accept flags other than CLIENT_SSL from the - second packet? -*/ -#define SSL_HANDSHAKE_SIZE 2 -#define NORMAL_HANDSHAKE_SIZE 6 -#define MIN_HANDSHAKE_SIZE 2 -#else -#define MIN_HANDSHAKE_SIZE 6 -#endif /* HAVE_OPENSSL */ +#include "sql_trigger.h" /* Used in error handling only */ #define SP_TYPE_STRING(LP) \ @@ -56,11 +39,6 @@ (LP)->sql_command == SQLCOM_DROP_FUNCTION ? \ "FUNCTION" : "PROCEDURE") -#ifndef NO_EMBEDDED_ACCESS_CHECKS -static void time_out_user_resource_limits(THD *thd, USER_CONN *uc); -static int check_for_max_user_connections(THD *thd, USER_CONN *uc); -static void decrease_user_connections(USER_CONN *uc); -#endif /* NO_EMBEDDED_ACCESS_CHECKS */ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables); const char *any_db="*any*"; // Special symbol for check_access @@ -103,20 +81,6 @@ const char *xa_state_names[]={ "NON-EXISTING", "ACTIVE", "IDLE", "PREPARED" }; -#ifdef __WIN__ -static void test_signal(int sig_ptr) -{ -#if !defined( DBUG_OFF) - MessageBox(NULL,"Test signal","DBUG",MB_OK); -#endif -} -static void init_signals(void) -{ - int signals[7] = {SIGINT,SIGILL,SIGFPE,SIGSEGV,SIGTERM,SIGBREAK,SIGABRT } ; - for (int i=0 ; i < 7 ; i++) - signal( signals[i], test_signal) ; -} -#endif static void unlock_locked_tables(THD *thd) { @@ -160,6 +124,7 @@ bool end_active_trans(THD *thd) DBUG_RETURN(error); } + bool begin_trans(THD *thd) { int error=0; @@ -211,413 +176,6 @@ static bool some_non_temp_table_to_be_updated(THD *thd, TABLE_LIST *tables) return 0; } -#ifndef NO_EMBEDDED_ACCESS_CHECKS -static HASH hash_user_connections; - -static int get_or_create_user_conn(THD *thd, const char *user, - const char *host, - USER_RESOURCES *mqh) -{ - int return_val= 0; - uint temp_len, user_len; - char temp_user[USER_HOST_BUFF_SIZE]; - struct user_conn *uc; - - DBUG_ASSERT(user != 0); - DBUG_ASSERT(host != 0); - - user_len= strlen(user); - temp_len= (strmov(strmov(temp_user, user)+1, host) - temp_user)+1; - (void) pthread_mutex_lock(&LOCK_user_conn); - if (!(uc = (struct user_conn *) hash_search(&hash_user_connections, - (byte*) temp_user, temp_len))) - { - /* First connection for user; Create a user connection object */ - if (!(uc= ((struct user_conn*) - my_malloc(sizeof(struct user_conn) + temp_len+1, - MYF(MY_WME))))) - { - net_send_error(thd, 0, NullS); // Out of memory - return_val= 1; - goto end; - } - uc->user=(char*) (uc+1); - memcpy(uc->user,temp_user,temp_len+1); - uc->host= uc->user + user_len + 1; - uc->len= temp_len; - uc->connections= uc->questions= uc->updates= uc->conn_per_hour= 0; - uc->user_resources= *mqh; - uc->intime= thd->thr_create_time; - if (my_hash_insert(&hash_user_connections, (byte*) uc)) - { - my_free((char*) uc,0); - net_send_error(thd, 0, NullS); // Out of memory - return_val= 1; - goto end; - } - } - thd->user_connect=uc; - uc->connections++; -end: - (void) pthread_mutex_unlock(&LOCK_user_conn); - return return_val; - -} -#endif /* !NO_EMBEDDED_ACCESS_CHECKS */ - - -/* - Check if user exist and password supplied is correct. - - SYNOPSIS - check_user() - thd thread handle, thd->security_ctx->{host,user,ip} are used - command originator of the check: now check_user is called - during connect and change user procedures; used for - logging. - passwd scrambled password received from client - passwd_len length of scrambled password - db database name to connect to, may be NULL - check_count dont know exactly - - Note, that host, user and passwd may point to communication buffer. - Current implementation does not depend on that, but future changes - should be done with this in mind; 'thd' is INOUT, all other params - are 'IN'. - - RETURN VALUE - 0 OK; thd->security_ctx->user/master_access/priv_user/db_access and - thd->db are updated; OK is sent to client; - -1 access denied or handshake error; error is sent to client; - >0 error, not sent to client -*/ - -int check_user(THD *thd, enum enum_server_command command, - const char *passwd, uint passwd_len, const char *db, - bool check_count) -{ - DBUG_ENTER("check_user"); - -#ifdef NO_EMBEDDED_ACCESS_CHECKS - thd->main_security_ctx.master_access= GLOBAL_ACLS; // Full rights - /* Change database if necessary */ - if (db && db[0]) - { - /* - thd->db is saved in caller and needs to be freed by caller if this - function returns 0 - */ - thd->reset_db(NULL, 0); - if (mysql_change_db(thd, db, FALSE)) - { - /* Send the error to the client */ - net_send_error(thd); - DBUG_RETURN(-1); - } - } - send_ok(thd); - DBUG_RETURN(0); -#else - - my_bool opt_secure_auth_local; - pthread_mutex_lock(&LOCK_global_system_variables); - opt_secure_auth_local= opt_secure_auth; - pthread_mutex_unlock(&LOCK_global_system_variables); - - /* - If the server is running in secure auth mode, short scrambles are - forbidden. - */ - if (opt_secure_auth_local && passwd_len == SCRAMBLE_LENGTH_323) - { - net_printf_error(thd, ER_NOT_SUPPORTED_AUTH_MODE); - general_log_print(thd, COM_CONNECT, ER(ER_NOT_SUPPORTED_AUTH_MODE)); - DBUG_RETURN(-1); - } - if (passwd_len != 0 && - passwd_len != SCRAMBLE_LENGTH && - passwd_len != SCRAMBLE_LENGTH_323) - DBUG_RETURN(ER_HANDSHAKE_ERROR); - - /* - Clear thd->db as it points to something, that will be freed when - connection is closed. We don't want to accidentally free a wrong pointer - if connect failed. Also in case of 'CHANGE USER' failure, current - database will be switched to 'no database selected'. - */ - thd->reset_db(NULL, 0); - - USER_RESOURCES ur; - int res= acl_getroot(thd, &ur, passwd, passwd_len); -#ifndef EMBEDDED_LIBRARY - if (res == -1) - { - /* - This happens when client (new) sends password scrambled with - scramble(), but database holds old value (scrambled with - scramble_323()). Here we please client to send scrambled_password - in old format. - */ - NET *net= &thd->net; - if (opt_secure_auth_local) - { - net_printf_error(thd, ER_SERVER_IS_IN_SECURE_AUTH_MODE, - thd->main_security_ctx.user, - thd->main_security_ctx.host_or_ip); - general_log_print(thd, COM_CONNECT, ER(ER_SERVER_IS_IN_SECURE_AUTH_MODE), - thd->main_security_ctx.user, - thd->main_security_ctx.host_or_ip); - DBUG_RETURN(-1); - } - /* We have to read very specific packet size */ - if (send_old_password_request(thd) || - my_net_read(net) != SCRAMBLE_LENGTH_323 + 1) - { - inc_host_errors(&thd->remote.sin_addr); - DBUG_RETURN(ER_HANDSHAKE_ERROR); - } - /* Final attempt to check the user based on reply */ - /* So as passwd is short, errcode is always >= 0 */ - res= acl_getroot(thd, &ur, (char *) net->read_pos, SCRAMBLE_LENGTH_323); - } -#endif /*EMBEDDED_LIBRARY*/ - /* here res is always >= 0 */ - if (res == 0) - { - if (!(thd->main_security_ctx.master_access & - NO_ACCESS)) // authentication is OK - { - DBUG_PRINT("info", - ("Capabilities: %lu packet_length: %ld Host: '%s' " - "Login user: '%s' Priv_user: '%s' Using password: %s " - "Access: %lu db: '%s'", - thd->client_capabilities, - thd->max_client_packet_length, - thd->main_security_ctx.host_or_ip, - thd->main_security_ctx.user, - thd->main_security_ctx.priv_user, - passwd_len ? "yes": "no", - thd->main_security_ctx.master_access, - (thd->db ? thd->db : "*none*"))); - - if (check_count) - { - VOID(pthread_mutex_lock(&LOCK_thread_count)); - bool count_ok= thread_count <= max_connections + delayed_insert_threads - || (thd->main_security_ctx.master_access & SUPER_ACL); - VOID(pthread_mutex_unlock(&LOCK_thread_count)); - if (!count_ok) - { // too many connections - net_send_error(thd, ER_CON_COUNT_ERROR); - DBUG_RETURN(-1); - } - } - - /* - Log the command before authentication checks, so that the user can - check the log for the tried login tried and also to detect - break-in attempts. - */ - general_log_print(thd, command, - (thd->main_security_ctx.priv_user == - thd->main_security_ctx.user ? - (char*) "%s@%s on %s" : - (char*) "%s@%s as anonymous on %s"), - thd->main_security_ctx.user, - thd->main_security_ctx.host_or_ip, - db ? db : (char*) ""); - - /* - This is the default access rights for the current database. It's - set to 0 here because we don't have an active database yet (and we - may not have an active database to set. - */ - thd->main_security_ctx.db_access=0; - - /* Don't allow user to connect if he has done too many queries */ - if ((ur.questions || ur.updates || ur.conn_per_hour || ur.user_conn || - max_user_connections) && - get_or_create_user_conn(thd, - (opt_old_style_user_limits ? thd->main_security_ctx.user : - thd->main_security_ctx.priv_user), - (opt_old_style_user_limits ? thd->main_security_ctx.host_or_ip : - thd->main_security_ctx.priv_host), - &ur)) - DBUG_RETURN(-1); - if (thd->user_connect && - (thd->user_connect->user_resources.conn_per_hour || - thd->user_connect->user_resources.user_conn || - max_user_connections) && - check_for_max_user_connections(thd, thd->user_connect)) - DBUG_RETURN(-1); - - /* Change database if necessary */ - if (db && db[0]) - { - if (mysql_change_db(thd, db, FALSE)) - { - /* Send error to the client */ - net_send_error(thd); - if (thd->user_connect) - decrease_user_connections(thd->user_connect); - DBUG_RETURN(-1); - } - } - send_ok(thd); - thd->password= test(passwd_len); // remember for error messages - /* Ready to handle queries */ - DBUG_RETURN(0); - } - } - else if (res == 2) // client gave short hash, server has long hash - { - net_printf_error(thd, ER_NOT_SUPPORTED_AUTH_MODE); - general_log_print(thd, COM_CONNECT, ER(ER_NOT_SUPPORTED_AUTH_MODE)); - DBUG_RETURN(-1); - } - net_printf_error(thd, ER_ACCESS_DENIED_ERROR, - thd->main_security_ctx.user, - thd->main_security_ctx.host_or_ip, - passwd_len ? ER(ER_YES) : ER(ER_NO)); - general_log_print(thd, COM_CONNECT, ER(ER_ACCESS_DENIED_ERROR), - thd->main_security_ctx.user, - thd->main_security_ctx.host_or_ip, - passwd_len ? ER(ER_YES) : ER(ER_NO)); - DBUG_RETURN(-1); -#endif /* NO_EMBEDDED_ACCESS_CHECKS */ -} - -/* - Check for maximum allowable user connections, if the mysqld server is - started with corresponding variable that is greater then 0. -*/ - -extern "C" byte *get_key_conn(user_conn *buff, uint *length, - my_bool not_used __attribute__((unused))) -{ - *length=buff->len; - return (byte*) buff->user; -} - -extern "C" void free_user(struct user_conn *uc) -{ - my_free((char*) uc,MYF(0)); -} - -void init_max_user_conn(void) -{ -#ifndef NO_EMBEDDED_ACCESS_CHECKS - (void) hash_init(&hash_user_connections,system_charset_info,max_connections, - 0,0, - (hash_get_key) get_key_conn, (hash_free_key) free_user, - 0); -#endif -} - - -/* - check if user has already too many connections - - SYNOPSIS - check_for_max_user_connections() - thd Thread handle - uc User connect object - - NOTES - If check fails, we decrease user connection count, which means one - shouldn't call decrease_user_connections() after this function. - - RETURN - 0 ok - 1 error -*/ - -#ifndef NO_EMBEDDED_ACCESS_CHECKS - -static int check_for_max_user_connections(THD *thd, USER_CONN *uc) -{ - int error=0; - DBUG_ENTER("check_for_max_user_connections"); - - (void) pthread_mutex_lock(&LOCK_user_conn); - if (max_user_connections && !uc->user_resources.user_conn && - max_user_connections < (uint) uc->connections) - { - net_printf_error(thd, ER_TOO_MANY_USER_CONNECTIONS, uc->user); - error=1; - goto end; - } - time_out_user_resource_limits(thd, uc); - if (uc->user_resources.user_conn && - uc->user_resources.user_conn < uc->connections) - { - net_printf_error(thd, ER_USER_LIMIT_REACHED, uc->user, - "max_user_connections", - (long) uc->user_resources.user_conn); - error= 1; - goto end; - } - if (uc->user_resources.conn_per_hour && - uc->user_resources.conn_per_hour <= uc->conn_per_hour) - { - net_printf_error(thd, ER_USER_LIMIT_REACHED, uc->user, - "max_connections_per_hour", - (long) uc->user_resources.conn_per_hour); - error=1; - goto end; - } - uc->conn_per_hour++; - - end: - if (error) - uc->connections--; // no need for decrease_user_connections() here - (void) pthread_mutex_unlock(&LOCK_user_conn); - DBUG_RETURN(error); -} - -/* - Decrease user connection count - - SYNOPSIS - decrease_user_connections() - uc User connection object - - NOTES - If there is a n user connection object for a connection - (which only happens if 'max_user_connections' is defined or - if someone has created a resource grant for a user), then - the connection count is always incremented on connect. - - The user connect object is not freed if some users has - 'max connections per hour' defined as we need to be able to hold - count over the lifetime of the connection. -*/ - -static void decrease_user_connections(USER_CONN *uc) -{ - DBUG_ENTER("decrease_user_connections"); - (void) pthread_mutex_lock(&LOCK_user_conn); - DBUG_ASSERT(uc->connections); - if (!--uc->connections && !mqh_used) - { - /* Last connection for user; Delete it */ - (void) hash_delete(&hash_user_connections,(byte*) uc); - } - (void) pthread_mutex_unlock(&LOCK_user_conn); - DBUG_VOID_RETURN; -} - -#endif /* NO_EMBEDDED_ACCESS_CHECKS */ - - -void free_max_user_conn(void) -{ -#ifndef NO_EMBEDDED_ACCESS_CHECKS - hash_free(&hash_user_connections); -#endif /* NO_EMBEDDED_ACCESS_CHECKS */ -} - - /* Mark all commands that somehow changes a table @@ -701,402 +259,6 @@ bool is_update_query(enum enum_sql_command command) return (sql_command_flags[command] & CF_CHANGES_DATA) != 0; } -/* - Reset per-hour user resource limits when it has been more than - an hour since they were last checked - - SYNOPSIS: - time_out_user_resource_limits() - thd Thread handler - uc User connection details - - NOTE: - This assumes that the LOCK_user_conn mutex has been acquired, so it is - safe to test and modify members of the USER_CONN structure. -*/ - -#ifndef NO_EMBEDDED_ACCESS_CHECKS - -static void time_out_user_resource_limits(THD *thd, USER_CONN *uc) -{ - time_t check_time = thd->start_time ? thd->start_time : time(NULL); - DBUG_ENTER("time_out_user_resource_limits"); - - /* If more than a hour since last check, reset resource checking */ - if (check_time - uc->intime >= 3600) - { - uc->questions=1; - uc->updates=0; - uc->conn_per_hour=0; - uc->intime=check_time; - } - - DBUG_VOID_RETURN; -} - -/* - Check if maximum queries per hour limit has been reached - returns 0 if OK. -*/ - -static bool check_mqh(THD *thd, uint check_command) -{ - bool error= 0; - USER_CONN *uc=thd->user_connect; - DBUG_ENTER("check_mqh"); - DBUG_ASSERT(uc != 0); - - (void) pthread_mutex_lock(&LOCK_user_conn); - - time_out_user_resource_limits(thd, uc); - - /* Check that we have not done too many questions / hour */ - if (uc->user_resources.questions && - uc->questions++ >= uc->user_resources.questions) - { - net_printf_error(thd, ER_USER_LIMIT_REACHED, uc->user, "max_questions", - (long) uc->user_resources.questions); - error=1; - goto end; - } - if (check_command < (uint) SQLCOM_END) - { - /* Check that we have not done too many updates / hour */ - if (uc->user_resources.updates && - (sql_command_flags[check_command] & CF_CHANGES_DATA) && - uc->updates++ >= uc->user_resources.updates) - { - net_printf_error(thd, ER_USER_LIMIT_REACHED, uc->user, "max_updates", - (long) uc->user_resources.updates); - error=1; - goto end; - } - } -end: - (void) pthread_mutex_unlock(&LOCK_user_conn); - DBUG_RETURN(error); -} - -#endif /* NO_EMBEDDED_ACCESS_CHECKS */ - - -static void reset_mqh(LEX_USER *lu, bool get_them= 0) -{ -#ifndef NO_EMBEDDED_ACCESS_CHECKS - (void) pthread_mutex_lock(&LOCK_user_conn); - if (lu) // for GRANT - { - USER_CONN *uc; - uint temp_len=lu->user.length+lu->host.length+2; - char temp_user[USER_HOST_BUFF_SIZE]; - - memcpy(temp_user,lu->user.str,lu->user.length); - memcpy(temp_user+lu->user.length+1,lu->host.str,lu->host.length); - temp_user[lu->user.length]='\0'; temp_user[temp_len-1]=0; - if ((uc = (struct user_conn *) hash_search(&hash_user_connections, - (byte*) temp_user, temp_len))) - { - uc->questions=0; - get_mqh(temp_user,&temp_user[lu->user.length+1],uc); - uc->updates=0; - uc->conn_per_hour=0; - } - } - else - { - /* for FLUSH PRIVILEGES and FLUSH USER_RESOURCES */ - for (uint idx=0;idx < hash_user_connections.records; idx++) - { - USER_CONN *uc=(struct user_conn *) hash_element(&hash_user_connections, - idx); - if (get_them) - get_mqh(uc->user,uc->host,uc); - uc->questions=0; - uc->updates=0; - uc->conn_per_hour=0; - } - } - (void) pthread_mutex_unlock(&LOCK_user_conn); -#endif /* NO_EMBEDDED_ACCESS_CHECKS */ -} - -void thd_init_client_charset(THD *thd, uint cs_number) -{ - /* - Use server character set and collation if - - opt_character_set_client_handshake is not set - - client has not specified a character set - - client character set is the same as the servers - - client character set doesn't exists in server - */ - if (!opt_character_set_client_handshake || - !(thd->variables.character_set_client= get_charset(cs_number, MYF(0))) || - !my_strcasecmp(&my_charset_latin1, - global_system_variables.character_set_client->name, - thd->variables.character_set_client->name)) - { - thd->variables.character_set_client= - global_system_variables.character_set_client; - thd->variables.collation_connection= - global_system_variables.collation_connection; - thd->variables.character_set_results= - global_system_variables.character_set_results; - } - else - { - thd->variables.character_set_results= - thd->variables.collation_connection= - thd->variables.character_set_client; - } -} - - -/* - Perform handshake, authorize client and update thd ACL variables. - SYNOPSIS - check_connection() - thd thread handle - - RETURN - 0 success, OK is sent to user, thd is updated. - -1 error, which is sent to user - > 0 error code (not sent to user) -*/ - -#ifndef EMBEDDED_LIBRARY -static int check_connection(THD *thd) -{ - uint connect_errors= 0; - NET *net= &thd->net; - ulong pkt_len= 0; - char *end; - - DBUG_PRINT("info", - ("New connection received on %s", vio_description(net->vio))); -#ifdef SIGNAL_WITH_VIO_CLOSE - thd->set_active_vio(net->vio); -#endif - - if (!thd->main_security_ctx.host) // If TCP/IP connection - { - char ip[30]; - - if (vio_peer_addr(net->vio, ip, &thd->peer_port)) - return (ER_BAD_HOST_ERROR); - if (!(thd->main_security_ctx.ip= my_strdup(ip,MYF(0)))) - return (ER_OUT_OF_RESOURCES); - thd->main_security_ctx.host_or_ip= thd->main_security_ctx.ip; - vio_in_addr(net->vio,&thd->remote.sin_addr); - if (!(specialflag & SPECIAL_NO_RESOLVE)) - { - vio_in_addr(net->vio,&thd->remote.sin_addr); - thd->main_security_ctx.host= - ip_to_hostname(&thd->remote.sin_addr, &connect_errors); - /* Cut very long hostnames to avoid possible overflows */ - if (thd->main_security_ctx.host) - { - if (thd->main_security_ctx.host != my_localhost) - thd->main_security_ctx.host[min(strlen(thd->main_security_ctx.host), - HOSTNAME_LENGTH)]= 0; - thd->main_security_ctx.host_or_ip= thd->main_security_ctx.host; - } - if (connect_errors > max_connect_errors) - return(ER_HOST_IS_BLOCKED); - } - DBUG_PRINT("info",("Host: %s ip: %s", - (thd->main_security_ctx.host ? - thd->main_security_ctx.host : "unknown host"), - (thd->main_security_ctx.ip ? - thd->main_security_ctx.ip : "unknown ip"))); - if (acl_check_host(thd->main_security_ctx.host, thd->main_security_ctx.ip)) - return(ER_HOST_NOT_PRIVILEGED); - } - else /* Hostname given means that the connection was on a socket */ - { - DBUG_PRINT("info",("Host: %s", thd->main_security_ctx.host)); - thd->main_security_ctx.host_or_ip= thd->main_security_ctx.host; - thd->main_security_ctx.ip= 0; - /* Reset sin_addr */ - bzero((char*) &thd->remote, sizeof(thd->remote)); - } - vio_keepalive(net->vio, TRUE); - { - /* buff[] needs to big enough to hold the server_version variable */ - char buff[SERVER_VERSION_LENGTH + SCRAMBLE_LENGTH + 64]; - ulong client_flags = (CLIENT_LONG_FLAG | CLIENT_CONNECT_WITH_DB | - CLIENT_PROTOCOL_41 | CLIENT_SECURE_CONNECTION); - - if (opt_using_transactions) - client_flags|=CLIENT_TRANSACTIONS; -#ifdef HAVE_COMPRESS - client_flags |= CLIENT_COMPRESS; -#endif /* HAVE_COMPRESS */ -#ifdef HAVE_OPENSSL - if (ssl_acceptor_fd) - client_flags |= CLIENT_SSL; /* Wow, SSL is available! */ -#endif /* HAVE_OPENSSL */ - - end= strnmov(buff, server_version, SERVER_VERSION_LENGTH) + 1; - int4store((uchar*) end, thd->thread_id); - end+= 4; - /* - So as check_connection is the only entry point to authorization - procedure, scramble is set here. This gives us new scramble for - each handshake. - */ - create_random_string(thd->scramble, SCRAMBLE_LENGTH, &thd->rand); - /* - Old clients does not understand long scrambles, but can ignore packet - tail: that's why first part of the scramble is placed here, and second - part at the end of packet. - */ - end= strmake(end, thd->scramble, SCRAMBLE_LENGTH_323) + 1; - - int2store(end, client_flags); - /* write server characteristics: up to 16 bytes allowed */ - end[2]=(char) default_charset_info->number; - int2store(end+3, thd->server_status); - bzero(end+5, 13); - end+= 18; - /* write scramble tail */ - end= strmake(end, thd->scramble + SCRAMBLE_LENGTH_323, - SCRAMBLE_LENGTH - SCRAMBLE_LENGTH_323) + 1; - - /* At this point we write connection message and read reply */ - if (net_write_command(net, (uchar) protocol_version, "", 0, buff, - (uint) (end-buff)) || - (pkt_len= my_net_read(net)) == packet_error || - pkt_len < MIN_HANDSHAKE_SIZE) - { - inc_host_errors(&thd->remote.sin_addr); - return(ER_HANDSHAKE_ERROR); - } - } -#ifdef _CUSTOMCONFIG_ -#include "_cust_sql_parse.h" -#endif - if (connect_errors) - reset_host_errors(&thd->remote.sin_addr); - if (thd->packet.alloc(thd->variables.net_buffer_length)) - return(ER_OUT_OF_RESOURCES); - - thd->client_capabilities=uint2korr(net->read_pos); - if (thd->client_capabilities & CLIENT_PROTOCOL_41) - { - thd->client_capabilities|= ((ulong) uint2korr(net->read_pos+2)) << 16; - thd->max_client_packet_length= uint4korr(net->read_pos+4); - DBUG_PRINT("info", ("client_character_set: %d", (uint) net->read_pos[8])); - thd_init_client_charset(thd, (uint) net->read_pos[8]); - thd->update_charset(); - end= (char*) net->read_pos+32; - } - else - { - thd->max_client_packet_length= uint3korr(net->read_pos+2); - end= (char*) net->read_pos+5; - } - - if (thd->client_capabilities & CLIENT_IGNORE_SPACE) - thd->variables.sql_mode|= MODE_IGNORE_SPACE; -#ifdef HAVE_OPENSSL - DBUG_PRINT("info", ("client capabilities: %lu", thd->client_capabilities)); - if (thd->client_capabilities & CLIENT_SSL) - { - /* Do the SSL layering. */ - if (!ssl_acceptor_fd) - { - inc_host_errors(&thd->remote.sin_addr); - return(ER_HANDSHAKE_ERROR); - } - DBUG_PRINT("info", ("IO layer change in progress...")); - if (sslaccept(ssl_acceptor_fd, net->vio, net->read_timeout)) - { - DBUG_PRINT("error", ("Failed to accept new SSL connection")); - inc_host_errors(&thd->remote.sin_addr); - return(ER_HANDSHAKE_ERROR); - } - DBUG_PRINT("info", ("Reading user information over SSL layer")); - if ((pkt_len= my_net_read(net)) == packet_error || - pkt_len < NORMAL_HANDSHAKE_SIZE) - { - DBUG_PRINT("error", ("Failed to read user information (pkt_len= %lu)", - pkt_len)); - inc_host_errors(&thd->remote.sin_addr); - return(ER_HANDSHAKE_ERROR); - } - } -#endif - - if (end >= (char*) net->read_pos+ pkt_len +2) - { - inc_host_errors(&thd->remote.sin_addr); - return(ER_HANDSHAKE_ERROR); - } - - if (thd->client_capabilities & CLIENT_INTERACTIVE) - thd->variables.net_wait_timeout= thd->variables.net_interactive_timeout; - if ((thd->client_capabilities & CLIENT_TRANSACTIONS) && - opt_using_transactions) - net->return_status= &thd->server_status; - - char *user= end; - char *passwd= strend(user)+1; - uint user_len= passwd - user - 1; - char *db= passwd; - char db_buff[NAME_LEN + 1]; // buffer to store db in utf8 - char user_buff[USERNAME_LENGTH + 1]; // buffer to store user in utf8 - uint dummy_errors; - - /* - Old clients send null-terminated string as password; new clients send - the size (1 byte) + string (not null-terminated). Hence in case of empty - password both send '\0'. - - This strlen() can't be easily deleted without changing protocol. - */ - uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ? - *passwd++ : strlen(passwd); - db= thd->client_capabilities & CLIENT_CONNECT_WITH_DB ? - db + passwd_len + 1 : 0; - /* strlen() can't be easily deleted without changing protocol */ - uint db_len= db ? strlen(db) : 0; - - if (passwd + passwd_len + db_len > (char *)net->read_pos + pkt_len) - { - inc_host_errors(&thd->remote.sin_addr); - return ER_HANDSHAKE_ERROR; - } - - /* Since 4.1 all database names are stored in utf8 */ - if (db) - { - db_buff[copy_and_convert(db_buff, sizeof(db_buff)-1, - system_charset_info, - db, db_len, - thd->charset(), &dummy_errors)]= 0; - db= db_buff; - } - - user_buff[user_len= copy_and_convert(user_buff, sizeof(user_buff)-1, - system_charset_info, user, user_len, - thd->charset(), &dummy_errors)]= '\0'; - user= user_buff; - - /* If username starts and ends in "'", chop them off */ - if (user_len > 1 && user[0] == '\'' && user[user_len - 1] == '\'') - { - user[user_len-1]= 0; - user++; - user_len-= 2; - } - - if (thd->main_security_ctx.user) - x_free(thd->main_security_ctx.user); - if (!(thd->main_security_ctx.user= my_strdup(user, MYF(0)))) - return (ER_OUT_OF_RESOURCES); - return check_user(thd, COM_CONNECT, passwd, passwd_len, db, TRUE); -} - void execute_init_command(THD *thd, sys_var_str *init_command_var, rw_lock_t *var_mutex) @@ -1129,153 +291,6 @@ void execute_init_command(THD *thd, sys_var_str *init_command_var, } -pthread_handler_t handle_one_connection(void *arg) -{ - THD *thd=(THD*) arg; - uint launch_time = - (uint) ((thd->thr_create_time = time(NULL)) - thd->connect_time); - if (launch_time >= slow_launch_time) - statistic_increment(slow_launch_threads,&LOCK_status ); - - pthread_detach_this_thread(); - -#if !defined( __WIN__) // Win32 calls this in pthread_create - /* The following calls needs to be done before we call DBUG_ macros */ - if (!(test_flags & TEST_NO_THREADS) & my_thread_init()) - { - close_connection(thd, ER_OUT_OF_RESOURCES, 1); - statistic_increment(aborted_connects,&LOCK_status); - end_thread(thd,0); - return 0; - } -#endif - - /* - handle_one_connection() is the only way a thread would start - and would always be on top of the stack, therefore, the thread - stack always starts at the address of the first local variable - of handle_one_connection, which is thd. We need to know the - start of the stack so that we could check for stack overruns. - */ - DBUG_PRINT("info", ("handle_one_connection called by thread %lu\n", - thd->thread_id)); - /* now that we've called my_thread_init(), it is safe to call DBUG_* */ - -#if defined(__WIN__) - init_signals(); -#elif !defined(__NETWARE__) - sigset_t set; - VOID(sigemptyset(&set)); // Get mask in use - VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals)); -#endif - thd->thread_stack= (char*) &thd; - if (thd->store_globals()) - { - close_connection(thd, ER_OUT_OF_RESOURCES, 1); - statistic_increment(aborted_connects,&LOCK_status); - end_thread(thd,0); - return 0; - } - - do - { - int error; - NET *net= &thd->net; - Security_context *sctx= thd->security_ctx; - net->no_send_error= 0; - - /* Use "connect_timeout" value during connection phase */ - net_set_read_timeout(net, connect_timeout); - net_set_write_timeout(net, connect_timeout); - - if ((error=check_connection(thd))) - { // Wrong permissions - if (error > 0) - net_printf_error(thd, error, sctx->host_or_ip); -#ifdef __NT__ - if (vio_type(net->vio) == VIO_TYPE_NAMEDPIPE) - my_sleep(1000); /* must wait after eof() */ -#endif - statistic_increment(aborted_connects,&LOCK_status); - goto end_thread; - } -#ifdef __NETWARE__ - netware_reg_user(sctx->ip, sctx->user, "MySQL"); -#endif - if (thd->variables.max_join_size == HA_POS_ERROR) - thd->options |= OPTION_BIG_SELECTS; - if (thd->client_capabilities & CLIENT_COMPRESS) - net->compress=1; // Use compression - - thd->version= refresh_version; - thd->proc_info= 0; - thd->command= COM_SLEEP; - thd->set_time(); - thd->init_for_queries(); - - if (sys_init_connect.value_length && !(sctx->master_access & SUPER_ACL)) - { - execute_init_command(thd, &sys_init_connect, &LOCK_sys_init_connect); - if (thd->query_error) - { - thd->killed= THD::KILL_CONNECTION; - sql_print_warning(ER(ER_NEW_ABORTING_CONNECTION), - thd->thread_id,(thd->db ? thd->db : "unconnected"), - sctx->user ? sctx->user : "unauthenticated", - sctx->host_or_ip, "init_connect command failed"); - sql_print_warning("%s", net->last_error); - } - thd->proc_info=0; - thd->set_time(); - thd->init_for_queries(); - } - - /* Connect completed, set read/write timeouts back to tdefault */ - net_set_read_timeout(net, thd->variables.net_read_timeout); - net_set_write_timeout(net, thd->variables.net_write_timeout); - - while (!net->error && net->vio != 0 && - !(thd->killed == THD::KILL_CONNECTION)) - { - net->no_send_error= 0; - if (do_command(thd)) - break; - } - if (thd->user_connect) - decrease_user_connections(thd->user_connect); - if (net->error && net->vio != 0 && net->report_error) - { - if (!thd->killed && thd->variables.log_warnings > 1) - sql_print_warning(ER(ER_NEW_ABORTING_CONNECTION), - thd->thread_id,(thd->db ? thd->db : "unconnected"), - sctx->user ? sctx->user : "unauthenticated", - sctx->host_or_ip, - (net->last_errno ? ER(net->last_errno) : - ER(ER_UNKNOWN_ERROR))); - net_send_error(thd, net->last_errno, NullS); - statistic_increment(aborted_threads,&LOCK_status); - } - else if (thd->killed) - { - statistic_increment(aborted_threads,&LOCK_status); - } - -end_thread: - close_connection(thd, 0, 1); - end_thread(thd,1); - /* - If end_thread returns, we are either running with --one-thread - or this thread has been schedule to handle the next query - */ - thd= current_thd; - thd->thread_stack= (char*) &thd; - } while (!(test_flags & TEST_NO_THREADS)); - /* The following is only executed if we are not using --one-thread */ - return(0); /* purecov: deadcode */ -} - -#endif /* EMBEDDED_LIBRARY */ - /* Execute commands from bootstrap_file. Used when creating the initial grant tables @@ -1302,11 +317,6 @@ pthread_handler_t handle_bootstrap(void *arg) #ifndef EMBEDDED_LIBRARY pthread_detach_this_thread(); thd->thread_stack= (char*) &thd; -#if !defined(__WIN__) && !defined(__NETWARE__) - sigset_t set; - VOID(sigemptyset(&set)); // Get mask in use - VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals)); -#endif #endif /* EMBEDDED_LIBRARY */ if (thd->variables.max_join_size == HA_POS_ERROR) @@ -1359,23 +369,35 @@ pthread_handler_t handle_bootstrap(void *arg) thd->query= thd->memdup_w_gap(buff, length+1, thd->db_length+1+QUERY_CACHE_FLAGS_SIZE); thd->query[length] = '\0'; + DBUG_PRINT("query",("%-.4096s",thd->query)); /* We don't need to obtain LOCK_thread_count here because in bootstrap mode we have only one thread. */ thd->query_id=next_query_id(); + thd->set_time(); mysql_parse(thd,thd->query,length); close_thread_tables(thd); // Free tables + if (thd->is_fatal_error) break; + + if (thd->net.report_error) + { + /* The query failed, send error to log and abort bootstrap */ + net_send_error(thd); + thd->fatal_error(); + break; + } + free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); #ifdef USING_TRANSACTIONS free_root(&thd->transaction.mem_root,MYF(MY_KEEP_PREALLOC)); #endif } - /* thd->fatal_error should be set in case something went wrong */ end: + /* Remember the exit code of bootstrap */ bootstrap_error= thd->is_fatal_error; net_end(&thd->net); @@ -1667,7 +689,10 @@ bool dispatch_command(enum enum_server_command command, THD *thd, DBUG_ENTER("dispatch_command"); if (thd->killed == THD::KILL_QUERY || thd->killed == THD::KILL_BAD_DATA) + { thd->killed= THD::NOT_KILLED; + thd->mysys_var->abort= 0; + } thd->command=command; /* @@ -1958,7 +983,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, break; /* init structures for VIEW processing */ table_list.select_lex= &(thd->lex->select_lex); - mysql_init_query(thd, (uchar*)"", 0); + mysql_init_query(thd, "", 0); thd->lex-> select_lex.table_list.link_in_list((byte*) &table_list, (byte**) &table_list.next_local); @@ -2085,7 +1110,6 @@ bool dispatch_command(enum enum_server_command command, THD *thd, */ enum mysql_enum_shutdown_level level= (enum mysql_enum_shutdown_level) (uchar) packet[0]; - DBUG_PRINT("quit",("Got shutdown command for level %u", level)); if (level == SHUTDOWN_DEFAULT) level= SHUTDOWN_WAIT_ALL_BUFFERS; // soon default will be configurable else if (level != SHUTDOWN_WAIT_ALL_BUFFERS) @@ -2393,8 +1417,7 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, /* 'parent_lex' is used in init_query() so it must be before it. */ sel->parent_lex= lex; sel->init_query(); - if (!sel->add_table_to_list(thd, table_ident, 0, 0, TL_READ, - (List<String> *) 0, (List<String> *) 0)) + if (!sel->add_table_to_list(thd, table_ident, 0, 0, TL_READ)) DBUG_RETURN(1); lex->query_tables_last= query_tables_last; TABLE_LIST *table_list= (TABLE_LIST*) sel->table_list.first; @@ -2504,6 +1527,91 @@ static void reset_one_shot_variables(THD *thd) } +static +bool sp_process_definer(THD *thd) +{ + DBUG_ENTER("sp_process_definer"); + + LEX *lex= thd->lex; + + /* + If the definer is not specified, this means that CREATE-statement missed + DEFINER-clause. DEFINER-clause can be missed in two cases: + + - The user submitted a statement w/o the clause. This is a normal + case, we should assign CURRENT_USER as definer. + + - Our slave received an updated from the master, that does not + replicate definer for stored rountines. We should also assign + CURRENT_USER as definer here, but also we should mark this routine + as NON-SUID. This is essential for the sake of backward + compatibility. + + The problem is the slave thread is running under "special" user (@), + that actually does not exist. In the older versions we do not fail + execution of a stored routine if its definer does not exist and + continue the execution under the authorization of the invoker + (BUG#13198). And now if we try to switch to slave-current-user (@), + we will fail. + + Actually, this leads to the inconsistent state of master and + slave (different definers, different SUID behaviour), but it seems, + this is the best we can do. + */ + + if (!lex->definer) + { + Query_arena original_arena; + Query_arena *ps_arena= thd->activate_stmt_arena_if_needed(&original_arena); + + lex->definer= create_default_definer(thd); + + if (ps_arena) + thd->restore_active_arena(ps_arena, &original_arena); + + /* Error has been already reported. */ + if (lex->definer == NULL) + DBUG_RETURN(TRUE); + + if (thd->slave_thread) + lex->sphead->m_chistics->suid= SP_IS_NOT_SUID; + } + else + { + /* + If the specified definer differs from the current user, we + should check that the current user has SUPER privilege (in order + to create a stored routine under another user one must have + SUPER privilege). + */ + if ((strcmp(lex->definer->user.str, thd->security_ctx->priv_user) || + my_strcasecmp(system_charset_info, lex->definer->host.str, + thd->security_ctx->priv_host)) && + check_global_access(thd, SUPER_ACL)) + { + my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER"); + DBUG_RETURN(TRUE); + } + } + + /* Check that the specified definer exists. Emit a warning if not. */ + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (!is_acl_user(lex->definer->host.str, lex->definer->user.str)) + { + push_warning_printf(thd, + MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_NO_SUCH_USER, + ER(ER_NO_SUCH_USER), + lex->definer->user.str, + lex->definer->host.str); + } +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ + + DBUG_RETURN(FALSE); +} + + /* Execute command saved in thd and lex->sql_command @@ -2581,13 +1689,36 @@ mysql_execute_command(THD *thd) Don't reset warnings when executing a stored routine. */ if ((all_tables || &lex->select_lex != lex->all_selects_list || - lex->sroutines.records) && !thd->spcont || - lex->time_zone_tables_used) + lex->sroutines.records) && !thd->spcont) mysql_reset_errors(thd, 0); #ifdef HAVE_REPLICATION if (unlikely(thd->slave_thread)) { + if (lex->sql_command == SQLCOM_DROP_TRIGGER) + { + /* + When dropping a trigger, we need to load its table name + before checking slave filter rules. + */ + add_table_for_trigger(thd, thd->lex->spname, 1, &all_tables); + + if (!all_tables) + { + /* + If table name cannot be loaded, + it means the trigger does not exists possibly because + CREATE TRIGGER was previously skipped for this trigger + according to slave filtering rules. + Returning success without producing any errors in this case. + */ + DBUG_RETURN(0); + } + + // force searching in slave.cc:tables_ok() + all_tables->updating= 1; + } + /* Check if statment should be skipped because of slave filtering rules @@ -3041,7 +2172,7 @@ mysql_execute_command(THD *thd) if (!(lex->create_info.options & HA_LEX_CREATE_TMP_TABLE)) { TABLE_LIST *duplicate; - if ((duplicate= unique_table(thd, create_table, select_tables))) + if ((duplicate= unique_table(thd, create_table, select_tables, 0))) { update_non_unique_table_error(create_table, "CREATE", duplicate); res= 1; @@ -3057,7 +2188,7 @@ mysql_execute_command(THD *thd) tab= tab->next_local) { TABLE_LIST *duplicate; - if ((duplicate= unique_table(thd, tab, select_tables))) + if ((duplicate= unique_table(thd, tab, select_tables, 0))) { update_non_unique_table_error(tab, "CREATE", duplicate); res= 1; @@ -3313,7 +2444,9 @@ end_with_restore_list: /* ! we write after unlocking the table */ if (!res && !lex->no_write_to_binlog) { - /* Presumably, REPAIR and binlog writing doesn't require synchronization */ + /* + Presumably, REPAIR and binlog writing doesn't require synchronization + */ if (mysql_bin_log.is_open()) { thd->clear_error(); // No binlog error generated @@ -3346,7 +2479,9 @@ end_with_restore_list: /* ! we write after unlocking the table */ if (!res && !lex->no_write_to_binlog) { - /* Presumably, ANALYZE and binlog writing doesn't require synchronization */ + /* + Presumably, ANALYZE and binlog writing doesn't require synchronization + */ if (mysql_bin_log.is_open()) { thd->clear_error(); // No binlog error generated @@ -3371,7 +2506,9 @@ end_with_restore_list: /* ! we write after unlocking the table */ if (!res && !lex->no_write_to_binlog) { - /* Presumably, OPTIMIZE and binlog writing doesn't require synchronization */ + /* + Presumably, OPTIMIZE and binlog writing doesn't require synchronization + */ if (mysql_bin_log.is_open()) { thd->clear_error(); // No binlog error generated @@ -3458,6 +2595,36 @@ end_with_restore_list: break; } case SQLCOM_REPLACE: +#ifndef DBUG_OFF + if (mysql_bin_log.is_open()) + { + /* + Generate an incident log event before writing the real event + to the binary log. We put this event is before the statement + since that makes it simpler to check that the statement was + not executed on the slave (since incidents usually stop the + slave). + + Observe that any row events that are generated will be + generated before. + + This is only for testing purposes and will not be present in a + release build. + */ + + Incident incident= INCIDENT_NONE; + DBUG_PRINT("debug", ("Just before generate_incident()")); + DBUG_EXECUTE_IF("incident_database_resync_on_replace", + incident= INCIDENT_LOST_EVENTS;); + if (incident) + { + Incident_log_event ev(thd, incident); + mysql_bin_log.write(&ev); + mysql_bin_log.rotate_and_purge(RP_FORCE_ROTATE); + } + DBUG_PRINT("debug", ("Just after generate_incident()")); + } +#endif case SQLCOM_INSERT: { DBUG_ASSERT(first_table == all_tables && first_table != 0); @@ -4007,6 +3174,11 @@ end_with_restore_list: "function calls as part of this statement"); break; } + + res= sp_process_definer(thd); + if (res) + break; + switch (lex->sql_command) { case SQLCOM_CREATE_EVENT: res= Events::get_instance()-> @@ -4061,8 +3233,7 @@ end_with_restore_list: if (!(res= Events::get_instance()->drop_event(thd, lex->spname->m_db, lex->spname->m_name, - lex->drop_if_exists, - FALSE))) + lex->drop_if_exists))) send_ok(thd); } break; @@ -4230,7 +3401,7 @@ end_with_restore_list: { if (!(user= get_current_user(thd, tmp_user))) goto error; - reset_mqh(user); + reset_mqh(user, 0); } } } @@ -4260,7 +3431,9 @@ end_with_restore_list: We WANT to write and we CAN write. ! we write after unlocking the table. */ - /* Presumably, RESET and binlog writing doesn't require synchronization */ + /* + Presumably, RESET and binlog writing doesn't require synchronization + */ if (!lex->no_write_to_binlog && write_to_binlog) { if (mysql_bin_log.is_open()) @@ -4502,83 +3675,8 @@ end_with_restore_list: } #endif - /* - If the definer is not specified, this means that CREATE-statement missed - DEFINER-clause. DEFINER-clause can be missed in two cases: - - - The user submitted a statement w/o the clause. This is a normal - case, we should assign CURRENT_USER as definer. - - - Our slave received an updated from the master, that does not - replicate definer for stored rountines. We should also assign - CURRENT_USER as definer here, but also we should mark this routine - as NON-SUID. This is essential for the sake of backward - compatibility. - - The problem is the slave thread is running under "special" user (@), - that actually does not exist. In the older versions we do not fail - execution of a stored routine if its definer does not exist and - continue the execution under the authorization of the invoker - (BUG#13198). And now if we try to switch to slave-current-user (@), - we will fail. - - Actually, this leads to the inconsistent state of master and - slave (different definers, different SUID behaviour), but it seems, - this is the best we can do. - */ - - if (!lex->definer) - { - bool local_res= FALSE; - Query_arena original_arena; - Query_arena *ps_arena = thd->activate_stmt_arena_if_needed(&original_arena); - - if (!(lex->definer= create_default_definer(thd))) - local_res= TRUE; - - if (ps_arena) - thd->restore_active_arena(ps_arena, &original_arena); - - /* Error has been already reported. */ - if (local_res) - goto create_sp_error; - - if (thd->slave_thread) - lex->sphead->m_chistics->suid= SP_IS_NOT_SUID; - } - - /* - If the specified definer differs from the current user, we should check - that the current user has SUPER privilege (in order to create a stored - routine under another user one must have SUPER privilege). - */ - - else if (strcmp(lex->definer->user.str, thd->security_ctx->priv_user) || - my_strcasecmp(system_charset_info, - lex->definer->host.str, - thd->security_ctx->priv_host)) - { - if (check_global_access(thd, SUPER_ACL)) - { - my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER"); - goto create_sp_error; - } - } - - /* Check that the specified definer exists. Emit a warning if not. */ - -#ifndef NO_EMBEDDED_ACCESS_CHECKS - if (!is_acl_user(lex->definer->host.str, - lex->definer->user.str)) - { - push_warning_printf(thd, - MYSQL_ERROR::WARN_LEVEL_NOTE, - ER_NO_SUCH_USER, - ER(ER_NO_SUCH_USER), - lex->definer->user.str, - lex->definer->host.str); - } -#endif /* NO_EMBEDDED_ACCESS_CHECKS */ + if (sp_process_definer(thd)) + goto create_sp_error; res= (sp_result= lex->sphead->create(thd)); switch (sp_result) { @@ -4618,9 +3716,6 @@ end_with_restore_list: clean up the environment. */ create_sp_error: - lex->unit.cleanup(); - delete lex->sphead; - lex->sphead= 0; if (sp_result != SP_OK ) goto error; send_ok(thd); @@ -4840,7 +3935,7 @@ create_sp_error: ER(ER_PROC_AUTO_REVOKE_FAIL)); } #endif - /* Conditionally writes to binlog */ + /* Conditionally writes to binlog */ if (lex->sql_command == SQLCOM_DROP_PROCEDURE) sp_result= sp_drop_procedure(thd, lex->spname); else @@ -4858,7 +3953,6 @@ create_sp_error: if (check_access(thd, DELETE_ACL, "mysql", 0, 1, 0, 0)) goto error; - /* Does NOT write to binlog */ if (!(res = mysql_drop_function(thd, &lex->spname->m_name))) { send_ok(thd); @@ -4996,9 +4090,6 @@ create_sp_error: /* Conditionally writes to binlog. */ res= mysql_create_or_drop_trigger(thd, all_tables, 1); - /* We don't care about trigger body after this point */ - delete lex->sphead; - lex->sphead= 0; break; } case SQLCOM_DROP_TRIGGER: @@ -5212,6 +4303,10 @@ create_sp_error: int error; LEX *lex= thd->lex; DBUG_PRINT("info", ("case SQLCOM_CREATE_SERVER")); + + if (check_global_access(thd, SUPER_ACL)) + break; + if ((error= create_server(thd, &lex->server_options))) { DBUG_PRINT("info", ("problem creating server <%s>", @@ -5227,6 +4322,10 @@ create_sp_error: int error; LEX *lex= thd->lex; DBUG_PRINT("info", ("case SQLCOM_ALTER_SERVER")); + + if (check_global_access(thd, SUPER_ACL)) + break; + if ((error= alter_server(thd, &lex->server_options))) { DBUG_PRINT("info", ("problem altering server <%s>", @@ -5242,9 +4341,13 @@ create_sp_error: int err_code; LEX *lex= thd->lex; DBUG_PRINT("info", ("case SQLCOM_DROP_SERVER")); + + if (check_global_access(thd, SUPER_ACL)) + break; + if ((err_code= drop_server(thd, &lex->server_options))) { - if (! lex->drop_if_exists && err_code == ER_FOREIGN_SERVER_EXISTS) + if (! lex->drop_if_exists && err_code == ER_FOREIGN_SERVER_DOESNT_EXIST) { DBUG_PRINT("info", ("problem dropping server %s", lex->server_options.server_name)); @@ -5644,7 +4747,7 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables, { uint found=0; ulong found_access=0; -#ifndef EMBEDDED_LIBRARY +#ifndef NO_EMBEDDED_ACCESS_CHECKS TABLE_LIST *org_tables= tables; #endif TABLE_LIST *first_not_own_table= thd->lex->first_not_own_table(); @@ -5676,9 +4779,7 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables, */ tables->grant.orig_want_privilege= (want_access & ~SHOW_VIEW_ACL); if (tables->derived || tables->schema_table || - (tables->table && (int)tables->table->s->tmp_table) || - my_tz_check_n_skip_implicit_tables(&tables, - thd->lex->time_zone_tables_used)) + (tables->table && (int)tables->table->s->tmp_table)) continue; thd->security_ctx= sctx; if ((sctx->master_access & want_access) == @@ -5910,7 +5011,7 @@ bool my_yyoverflow(short **yyss, YYSTYPE **yyvs, ulong *yystacksize) ****************************************************************************/ void -mysql_init_query(THD *thd, uchar *buf, uint length) +mysql_init_query(THD *thd, const char *buf, uint length) { DBUG_ENTER("mysql_init_query"); lex_start(thd, buf, length); @@ -6122,6 +5223,7 @@ void mysql_init_multi_delete(LEX *lex) lex->query_tables_last= &lex->query_tables; } + /* When you modify mysql_parse(), you may need to mofify mysql_test_parse_for_slave() in this same file. @@ -6133,7 +5235,8 @@ void mysql_parse(THD *thd, char *inBuf, uint length) DBUG_EXECUTE_IF("parser_debug", turn_parser_debug_on();); - mysql_init_query(thd, (uchar*) inBuf, length); + mysql_init_query(thd, inBuf, length); + if (query_cache_send_result_to_client(thd, inBuf, length) <= 0) { LEX *lex= thd->lex; @@ -6152,12 +5255,7 @@ void mysql_parse(THD *thd, char *inBuf, uint length) else #endif { - if (thd->net.report_error) - { - delete lex->sphead; - lex->sphead= NULL; - } - else + if (! thd->net.report_error) { /* Binlog logs a string starting from thd->query and having length @@ -6177,7 +5275,6 @@ void mysql_parse(THD *thd, char *inBuf, uint length) query_cache_end_of_result(thd); } } - lex->unit.cleanup(); } else { @@ -6185,19 +5282,14 @@ void mysql_parse(THD *thd, char *inBuf, uint length) DBUG_PRINT("info",("Command aborted. Fatal_error: %d", thd->is_fatal_error)); - /* - The first thing we do after parse error is freeing sp_head to - ensure that we have restored original memroot. - */ - if (lex->sphead) - { - /* Clean up after failed stored procedure/function */ - delete lex->sphead; - lex->sphead= NULL; - } query_cache_abort(&thd->net); - lex->unit.cleanup(); } + if (thd->lex->sphead) + { + delete thd->lex->sphead; + thd->lex->sphead= 0; + } + lex->unit.cleanup(); thd->proc_info="freeing items"; thd->end_statement(); thd->cleanup_after_query(); @@ -6223,7 +5315,7 @@ bool mysql_test_parse_for_slave(THD *thd, char *inBuf, uint length) bool error= 0; DBUG_ENTER("mysql_test_parse_for_slave"); - mysql_init_query(thd, (uchar*) inBuf, length); + mysql_init_query(thd, inBuf, length); if (!MYSQLparse((void*) thd) && ! thd->is_fatal_error && all_tables_not_ok(thd,(TABLE_LIST*) lex->select_lex.table_list.first)) error= 1; /* Ignore question */ @@ -6408,8 +5500,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, LEX_STRING *alias, ulong table_options, thr_lock_type lock_type, - List<String> *use_index_arg, - List<String> *ignore_index_arg, + List<index_hint> *index_hints_arg, LEX_STRING *option) { register TABLE_LIST *ptr; @@ -6484,12 +5575,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, } ptr->select_lex= lex->current_select; ptr->cacheable_table= 1; - if (use_index_arg) - ptr->use_index=(List<String> *) thd->memdup((gptr) use_index_arg, - sizeof(*use_index_arg)); - if (ignore_index_arg) - ptr->ignore_index=(List<String> *) thd->memdup((gptr) ignore_index_arg, - sizeof(*ignore_index_arg)); + ptr->index_hints= index_hints_arg; ptr->option= option ? option->str : 0; /* check that used name is unique */ if (lock_type != TL_IGNORE) @@ -6580,6 +5666,7 @@ bool st_select_lex::init_nested_join(THD *thd) join_list->push_front(ptr); ptr->embedding= embedding; ptr->join_list= join_list; + ptr->alias= (char*) "(nested_join)"; embedding= ptr; join_list= &nested_join->join_list; join_list->empty(); @@ -6664,6 +5751,7 @@ TABLE_LIST *st_select_lex::nest_last_join(THD *thd) ptr->embedding= embedding; ptr->join_list= join_list; + ptr->alias= (char*) "(nest_last_join)"; embedded_list= &nested_join->join_list; embedded_list->empty(); @@ -7158,7 +6246,7 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, } #endif if (options & REFRESH_USER_RESOURCES) - reset_mqh((LEX_USER *) NULL); + reset_mqh((LEX_USER *) NULL, 0); /* purecov: inspected */ *write_to_binlog= tmp_write_to_binlog; return result; } @@ -7455,14 +6543,12 @@ bool multi_update_precheck(THD *thd, TABLE_LIST *tables) /* Is there tables of subqueries? */ - if (&lex->select_lex != lex->all_selects_list || lex->time_zone_tables_used) + if (&lex->select_lex != lex->all_selects_list) { DBUG_PRINT("info",("Checking sub query list")); for (table= tables; table; table= table->next_global) { - if (!my_tz_check_n_skip_implicit_tables(&table, - lex->time_zone_tables_used) && - !table->table_in_first_from_clause) + if (!table->table_in_first_from_clause) { if (check_access(thd, SELECT_ACL, table->db, &table->grant.privilege, 0, 0, diff --git a/sql/sql_parse.cc.rej b/sql/sql_parse.cc.rej deleted file mode 100644 index 6e2bd03867d..00000000000 --- a/sql/sql_parse.cc.rej +++ /dev/null @@ -1,166 +0,0 @@ -*************** -*** 67,109 **** - static void decrease_user_connections(USER_CONN *uc); - #endif /* NO_EMBEDDED_ACCESS_CHECKS */ - static bool check_multi_update_lock(THD *thd); -- static void remove_escape(char *name); - static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables); - - const char *any_db="*any*"; // Special symbol for check_access - -! LEX_STRING command_name[]={ -! (char *)STRING_WITH_LEN("Sleep"), -! (char *)STRING_WITH_LEN("Quit"), -! (char *)STRING_WITH_LEN("Init DB"), -! (char *)STRING_WITH_LEN("Query"), -! (char *)STRING_WITH_LEN("Field List"), -! (char *)STRING_WITH_LEN("Create DB"), -! (char *)STRING_WITH_LEN("Drop DB"), -! (char *)STRING_WITH_LEN("Refresh"), -! (char *)STRING_WITH_LEN("Shutdown"), -! (char *)STRING_WITH_LEN("Statistics"), -! (char *)STRING_WITH_LEN("Processlist"), -! (char *)STRING_WITH_LEN("Connect"), -! (char *)STRING_WITH_LEN("Kill"), -! (char *)STRING_WITH_LEN("Debug"), -! (char *)STRING_WITH_LEN("Ping"), -! (char *)STRING_WITH_LEN("Time"), -! (char *)STRING_WITH_LEN("Delayed insert"), -! (char *)STRING_WITH_LEN("Change user"), -! (char *)STRING_WITH_LEN("Binlog Dump"), -! (char *)STRING_WITH_LEN("Table Dump"), -! (char *)STRING_WITH_LEN("Connect Out"), -! (char *)STRING_WITH_LEN("Register Slave"), -! (char *)STRING_WITH_LEN("Prepare"), -! (char *)STRING_WITH_LEN("Execute"), -! (char *)STRING_WITH_LEN("Long Data"), -! (char *)STRING_WITH_LEN("Close stmt"), -! (char *)STRING_WITH_LEN("Reset stmt"), -! (char *)STRING_WITH_LEN("Set option"), -! (char *)STRING_WITH_LEN("Fetch"), -! (char *)STRING_WITH_LEN("Daemon"), -! (char *)STRING_WITH_LEN("Error") // Last command number - }; - - const char *xa_state_names[]={ ---- 67,108 ---- - static void decrease_user_connections(USER_CONN *uc); - #endif /* NO_EMBEDDED_ACCESS_CHECKS */ - static bool check_multi_update_lock(THD *thd); - static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables); - - const char *any_db="*any*"; // Special symbol for check_access - -! const LEX_STRING command_name[]={ -! C_STRING_WITH_LEN("Sleep"), -! C_STRING_WITH_LEN("Quit"), -! C_STRING_WITH_LEN("Init DB"), -! C_STRING_WITH_LEN("Query"), -! C_STRING_WITH_LEN("Field List"), -! C_STRING_WITH_LEN("Create DB"), -! C_STRING_WITH_LEN("Drop DB"), -! C_STRING_WITH_LEN("Refresh"), -! C_STRING_WITH_LEN("Shutdown"), -! C_STRING_WITH_LEN("Statistics"), -! C_STRING_WITH_LEN("Processlist"), -! C_STRING_WITH_LEN("Connect"), -! C_STRING_WITH_LEN("Kill"), -! C_STRING_WITH_LEN("Debug"), -! C_STRING_WITH_LEN("Ping"), -! C_STRING_WITH_LEN("Time"), -! C_STRING_WITH_LEN("Delayed insert"), -! C_STRING_WITH_LEN("Change user"), -! C_STRING_WITH_LEN("Binlog Dump"), -! C_STRING_WITH_LEN("Table Dump"), -! C_STRING_WITH_LEN("Connect Out"), -! C_STRING_WITH_LEN("Register Slave"), -! C_STRING_WITH_LEN("Prepare"), -! C_STRING_WITH_LEN("Execute"), -! C_STRING_WITH_LEN("Long Data"), -! C_STRING_WITH_LEN("Close stmt"), -! C_STRING_WITH_LEN("Reset stmt"), -! C_STRING_WITH_LEN("Set option"), -! C_STRING_WITH_LEN("Fetch"), -! C_STRING_WITH_LEN("Daemon"), -! C_STRING_WITH_LEN("Error") // Last command number - }; - - const char *xa_state_names[]={ -*************** -*** 1738,1744 **** - password. New clients send the size (1 byte) + string (not null - terminated, so also '\0' for empty string). - */ -! char db_buff[NAME_LEN+1]; // buffer to store db in utf8 - char *db= passwd; - uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ? - *passwd++ : strlen(passwd); ---- 1736,1742 ---- - password. New clients send the size (1 byte) + string (not null - terminated, so also '\0' for empty string). - */ -! char db_buff[NAME_LEN+1]; // buffer to store db in utf8 - char *db= passwd; - uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ? - *passwd++ : strlen(passwd); -*************** -*** 2315,2321 **** - DBUG_RETURN(1); - } - db= lex->select_lex.db; -- remove_escape(db); // Fix escaped '_' - if (check_db_name(db)) - { - my_error(ER_WRONG_DB_NAME, MYF(0), db); ---- 2312,2317 ---- - DBUG_RETURN(1); - } - db= lex->select_lex.db; - if (check_db_name(db)) - { - my_error(ER_WRONG_DB_NAME, MYF(0), db); -*************** -*** 6310,6345 **** - } - - -- /* Fix escaping of _, % and \ in database and table names (for ODBC) */ -- -- static void remove_escape(char *name) -- { -- if (!*name) // For empty DB names -- return; -- char *to; -- #ifdef USE_MB -- char *strend=name+(uint) strlen(name); -- #endif -- for (to=name; *name ; name++) -- { -- #ifdef USE_MB -- int l; -- if (use_mb(system_charset_info) && -- (l = my_ismbchar(system_charset_info, name, strend))) -- { -- while (l--) -- *to++ = *name++; -- name--; -- continue; -- } -- #endif -- if (*name == '\\' && name[1]) -- name++; // Skip '\\' -- *to++= *name; -- } -- *to=0; -- } -- - /**************************************************************************** - ** save order by and tables in own lists - ****************************************************************************/ ---- 6296,6301 ---- - } - - - /**************************************************************************** - ** save order by and tables in own lists - ****************************************************************************/ diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index b5e9855be27..86068cc5c0d 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -3702,9 +3702,9 @@ void get_partition_set(const TABLE *table, byte *buf, const uint index, possible to retrace this given an item tree. */ -bool mysql_unpack_partition(THD *thd, const uchar *part_buf, - uint part_info_len, - uchar *part_state, uint part_state_len, +bool mysql_unpack_partition(THD *thd, + const char *part_buf, uint part_info_len, + const char *part_state, uint part_state_len, TABLE* table, bool is_create_table_ind, handlerton *default_db_type) { @@ -6733,7 +6733,7 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info, } } else - DBUG_ASSERT(0); + assert(0); /* Find minimum: Do special handling if the interval has left bound in form diff --git a/sql/sql_partition.h b/sql/sql_partition.h index 7ed43527688..e0c0f1c5bd3 100644 --- a/sql/sql_partition.h +++ b/sql/sql_partition.h @@ -77,9 +77,9 @@ void get_full_part_id_from_key(const TABLE *table, byte *buf, KEY *key_info, const key_range *key_spec, part_id_range *part_spec); -bool mysql_unpack_partition(THD *thd, const uchar *part_buf, +bool mysql_unpack_partition(THD *thd, const char *part_buf, uint part_info_len, - uchar *part_state, uint part_state_len, + const char *part_state, uint part_state_len, TABLE *table, bool is_create_table_ind, handlerton *default_db_type); void make_used_partitions_str(partition_info *part_info, String *parts_str); diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index e3e24c1f375..70bc9ef23d5 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -943,8 +943,7 @@ my_bool mysql_uninstall_plugin(THD *thd, const LEX_STRING *name) table->use_all_columns(); table->field[0]->store(name->str, name->length, system_charset_info); if (! table->file->index_read_idx(table->record[0], 0, - (byte *)table->field[0]->ptr, - table->key_info[0].key_length, + (byte *)table->field[0]->ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { int error; diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index a75b98997eb..e97670ab2b1 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -34,6 +34,12 @@ When one prepares a statement: [Params meta info (stubs only for now)] (if Param_count > 0) [Columns meta info] (if Column_count > 0) + During prepare the tables used in a statement are opened, but no + locks are acquired. Table opening will block any DDL during the + operation, and we do not need any locks as we neither read nor + modify any data during prepare. Tables are closed after prepare + finishes. + When one executes a statement: - Server gets the command 'COM_STMT_EXECUTE' to execute the @@ -53,6 +59,10 @@ When one executes a statement: - Execute the query without re-parsing and send back the results to client + During execution of prepared statement tables are opened and locked + the same way they would for normal (non-prepared) statement + execution. Tables are unlocked and closed after the execution. + When one supplies long data for a placeholder: - Server gets the long data in pieces with command type @@ -83,11 +93,11 @@ When one supplies long data for a placeholder: /* A result class used to send cursor rows using the binary protocol. */ -class Select_fetch_protocol_prep: public select_send +class Select_fetch_protocol_binary: public select_send { - Protocol_prep protocol; + Protocol_binary protocol; public: - Select_fetch_protocol_prep(THD *thd); + Select_fetch_protocol_binary(THD *thd); virtual bool send_fields(List<Item> &list, uint flags); virtual bool send_data(List<Item> &items); virtual bool send_eof(); @@ -99,9 +109,12 @@ public: #endif }; -/****************************************************************************** - Prepared_statement: a statement that can contain placeholders -******************************************************************************/ +/****************************************************************************/ + +/** + @class Prepared_statement + @brief Prepared_statement: a statement that can contain placeholders +*/ class Prepared_statement: public Statement { @@ -112,7 +125,7 @@ public: }; THD *thd; - Select_fetch_protocol_prep result; + Select_fetch_protocol_binary result; Protocol *protocol; Item_param **param_array; uint param_count; @@ -141,6 +154,16 @@ public: bool execute(String *expanded_query, bool open_cursor); /* Destroy this statement */ bool deallocate(); +private: + /** + Store the parsed tree of a prepared statement here. + */ + LEX main_lex; + /** + The memory root to allocate parsed tree elements (instances of Item, + SELECT_LEX and other classes). + */ + MEM_ROOT main_mem_root; }; @@ -224,9 +247,9 @@ static bool send_prep_stmt(Prepared_statement *stmt, uint columns) */ DBUG_RETURN(my_net_write(net, buff, sizeof(buff)) || (stmt->param_count && - stmt->thd->protocol_simple.send_fields((List<Item> *) - &stmt->lex->param_list, - Protocol::SEND_EOF))); + stmt->thd->protocol_text.send_fields((List<Item> *) + &stmt->lex->param_list, + Protocol::SEND_EOF))); } #else static bool send_prep_stmt(Prepared_statement *stmt, @@ -668,7 +691,7 @@ static void setup_one_conversion_function(THD *thd, Item_param *param, and generate a valid query for logging. NOTES - This function, along with other _withlog functions is called when one of + This function, along with other _with_log functions is called when one of binary, slow or general logs is open. Logging of prepared statements in all cases is performed by means of conventional queries: if parameter data was supplied from C API, each placeholder in the query is @@ -692,9 +715,9 @@ static void setup_one_conversion_function(THD *thd, Item_param *param, 0 if success, 1 otherwise */ -static bool insert_params_withlog(Prepared_statement *stmt, uchar *null_array, - uchar *read_pos, uchar *data_end, - String *query) +static bool insert_params_with_log(Prepared_statement *stmt, uchar *null_array, + uchar *read_pos, uchar *data_end, + String *query) { THD *thd= stmt->thd; Item_param **begin= stmt->param_array; @@ -702,7 +725,7 @@ static bool insert_params_withlog(Prepared_statement *stmt, uchar *null_array, uint32 length= 0; String str; const String *res; - DBUG_ENTER("insert_params_withlog"); + DBUG_ENTER("insert_params_with_log"); if (query->copy(stmt->query, stmt->query_length, default_charset_info)) DBUG_RETURN(1); @@ -846,7 +869,8 @@ static bool emb_insert_params(Prepared_statement *stmt, String *expanded_query) } -static bool emb_insert_params_withlog(Prepared_statement *stmt, String *query) +static bool emb_insert_params_with_log(Prepared_statement *stmt, + String *query) { THD *thd= stmt->thd; Item_param **it= stmt->param_array; @@ -857,7 +881,7 @@ static bool emb_insert_params_withlog(Prepared_statement *stmt, String *query) const String *res; uint32 length= 0; - DBUG_ENTER("emb_insert_params_withlog"); + DBUG_ENTER("emb_insert_params_with_log"); if (query->copy(stmt->query, stmt->query_length, default_charset_info)) DBUG_RETURN(1); @@ -1058,7 +1082,7 @@ static bool mysql_test_insert(Prepared_statement *stmt, if (mysql_prepare_insert(thd, table_list, table_list->table, fields, values, update_fields, update_values, - duplic, &unused_conds, FALSE)) + duplic, &unused_conds, FALSE, FALSE, FALSE)) goto error; value_count= values->elements; @@ -1116,32 +1140,20 @@ static int mysql_test_update(Prepared_statement *stmt, #ifndef NO_EMBEDDED_ACCESS_CHECKS uint want_privilege; #endif - bool need_reopen; DBUG_ENTER("mysql_test_update"); - if (update_precheck(thd, table_list)) + if (update_precheck(thd, table_list) || + open_tables(thd, &table_list, &table_count, 0)) goto error; - for ( ; ; ) + if (table_list->multitable_view) { - if (open_tables(thd, &table_list, &table_count, 0)) - goto error; - - if (table_list->multitable_view) - { - DBUG_ASSERT(table_list->view != 0); - DBUG_PRINT("info", ("Switch to multi-update")); - /* pass counter value */ - thd->lex->table_count= table_count; - /* convert to multiupdate */ - DBUG_RETURN(2); - } - - if (!lock_tables(thd, table_list, table_count, &need_reopen)) - break; - if (!need_reopen) - goto error; - close_tables_for_reopen(thd, &table_list); + DBUG_ASSERT(table_list->view != 0); + DBUG_PRINT("info", ("Switch to multi-update")); + /* pass counter value */ + thd->lex->table_count= table_count; + /* convert to multiupdate */ + DBUG_RETURN(2); } /* @@ -1208,7 +1220,7 @@ static bool mysql_test_delete(Prepared_statement *stmt, DBUG_ENTER("mysql_test_delete"); if (delete_precheck(thd, table_list) || - open_and_lock_tables(thd, table_list)) + open_normal_and_derived_tables(thd, table_list, 0)) goto error; if (!table_list->table) @@ -1267,7 +1279,7 @@ static int mysql_test_select(Prepared_statement *stmt, goto error; } - if (open_and_lock_tables(thd, tables)) + if (open_normal_and_derived_tables(thd, tables, 0)) goto error; thd->used_tables= 0; // Updated by setup_fields @@ -1328,7 +1340,7 @@ static bool mysql_test_do_fields(Prepared_statement *stmt, if (tables && check_table_access(thd, SELECT_ACL, tables, 0)) DBUG_RETURN(TRUE); - if (open_and_lock_tables(thd, tables)) + if (open_normal_and_derived_tables(thd, tables, 0)) DBUG_RETURN(TRUE); DBUG_RETURN(setup_fields(thd, 0, *values, MARK_COLUMNS_NONE, 0, 0)); } @@ -1358,7 +1370,7 @@ static bool mysql_test_set_fields(Prepared_statement *stmt, set_var_base *var; if (tables && check_table_access(thd, SELECT_ACL, tables, 0) || - open_and_lock_tables(thd, tables)) + open_normal_and_derived_tables(thd, tables, 0)) goto error; while ((var= it++)) @@ -1384,7 +1396,7 @@ error: NOTE This function won't directly open tables used in select. They should be opened either by calling function (and in this case you probably - should use select_like_stmt_test_with_open_n_lock()) or by + should use select_like_stmt_test_with_open()) or by "specific_prepare" call (like this happens in case of multi-update). RETURN VALUE @@ -1412,14 +1424,14 @@ static bool select_like_stmt_test(Prepared_statement *stmt, } /* - Check internal SELECT of the prepared command (with opening and - locking of used tables). + Check internal SELECT of the prepared command (with opening of used + tables). SYNOPSIS - select_like_stmt_test_with_open_n_lock() + select_like_stmt_test_with_open() stmt prepared statement - tables list of tables to be opened and locked - before calling specific_prepare function + tables list of tables to be opened before calling + specific_prepare function specific_prepare function of command specific prepare setup_tables_done_option options to be passed to LEX::unit.prepare() @@ -1429,19 +1441,20 @@ static bool select_like_stmt_test(Prepared_statement *stmt, */ static bool -select_like_stmt_test_with_open_n_lock(Prepared_statement *stmt, - TABLE_LIST *tables, - bool (*specific_prepare)(THD *thd), - ulong setup_tables_done_option) +select_like_stmt_test_with_open(Prepared_statement *stmt, + TABLE_LIST *tables, + bool (*specific_prepare)(THD *thd), + ulong setup_tables_done_option) { - DBUG_ENTER("select_like_stmt_test_with_open_n_lock"); + DBUG_ENTER("select_like_stmt_test_with_open"); /* - We should not call LEX::unit.cleanup() after this open_and_lock_tables() - call because we don't allow prepared EXPLAIN yet so derived tables will - clean up after themself. + We should not call LEX::unit.cleanup() after this + open_normal_and_derived_tables() call because we don't allow + prepared EXPLAIN yet so derived tables will clean up after + themself. */ - if (open_and_lock_tables(stmt->thd, tables)) + if (open_normal_and_derived_tables(stmt->thd, tables, 0)) DBUG_RETURN(TRUE); DBUG_RETURN(select_like_stmt_test(stmt, specific_prepare, @@ -1480,7 +1493,7 @@ static bool mysql_test_create_table(Prepared_statement *stmt) if (select_lex->item_list.elements) { select_lex->context.resolve_in_select_list= TRUE; - res= select_like_stmt_test_with_open_n_lock(stmt, tables, 0, 0); + res= select_like_stmt_test_with_open(stmt, tables, 0, 0); } /* put tables back for PS rexecuting */ @@ -1540,9 +1553,9 @@ static bool mysql_test_multidelete(Prepared_statement *stmt, } if (multi_delete_precheck(stmt->thd, tables) || - select_like_stmt_test_with_open_n_lock(stmt, tables, - &mysql_multi_delete_prepare, - OPTION_SETUP_TABLES_DONE)) + select_like_stmt_test_with_open(stmt, tables, + &mysql_multi_delete_prepare, + OPTION_SETUP_TABLES_DONE)) goto error; if (!tables->table) { @@ -1558,34 +1571,30 @@ error: /* Wrapper for mysql_insert_select_prepare, to make change of local tables - after open_and_lock_tables() call. + after open_normal_and_derived_tables() call. SYNOPSIS mysql_insert_select_prepare_tester() thd thread handle NOTE - We need to remove the first local table after open_and_lock_tables, - because mysql_handle_derived uses local tables lists. + We need to remove the first local table after + open_normal_and_derived_tables(), because mysql_handle_derived + uses local tables lists. */ static bool mysql_insert_select_prepare_tester(THD *thd) { - TABLE_LIST *first; - bool res; SELECT_LEX *first_select= &thd->lex->select_lex; + TABLE_LIST *second_table= ((TABLE_LIST*)first_select->table_list.first)-> + next_local; + /* Skip first table, which is the table we are inserting in */ - first_select->table_list.first= (byte*)(first= - ((TABLE_LIST*)first_select-> - table_list.first)->next_local); - res= mysql_insert_select_prepare(thd); - /* - insert/replace from SELECT give its SELECT_LEX for SELECT, - and item_list belong to SELECT - */ - thd->lex->select_lex.context.resolve_in_select_list= TRUE; - thd->lex->select_lex.context.table_list= first; - return res; + first_select->table_list.first= (byte *) second_table; + thd->lex->select_lex.context.table_list= + thd->lex->select_lex.context.first_name_resolution_table= second_table; + + return mysql_insert_select_prepare(thd); } @@ -1623,9 +1632,9 @@ static bool mysql_test_insert_select(Prepared_statement *stmt, DBUG_ASSERT(first_local_table != 0); res= - select_like_stmt_test_with_open_n_lock(stmt, tables, - &mysql_insert_select_prepare_tester, - OPTION_SETUP_TABLES_DONE); + select_like_stmt_test_with_open(stmt, tables, + &mysql_insert_select_prepare_tester, + OPTION_SETUP_TABLES_DONE); /* revert changes made by mysql_insert_select_prepare_tester */ lex->select_lex.table_list.first= (byte*) first_local_table; return res; @@ -1881,7 +1890,7 @@ void mysql_stmt_prepare(THD *thd, const char *packet, uint packet_length) /* First of all clear possible warnings from the previous command */ mysql_reset_thd_for_next_command(thd); - if (! (stmt= new Prepared_statement(thd, &thd->protocol_prep))) + if (! (stmt= new Prepared_statement(thd, &thd->protocol_binary))) DBUG_VOID_RETURN; /* out of memory: error is set in Sql_alloc */ if (thd->stmt_map.insert(thd, stmt)) @@ -2053,7 +2062,8 @@ void mysql_sql_stmt_prepare(THD *thd) const char *query; uint query_len; DBUG_ENTER("mysql_sql_stmt_prepare"); - DBUG_ASSERT(thd->protocol == &thd->protocol_simple); + LINT_INIT(query_len); + DBUG_ASSERT(thd->protocol == &thd->protocol_text); if ((stmt= (Prepared_statement*) thd->stmt_map.find_by_name(name))) { @@ -2066,7 +2076,7 @@ void mysql_sql_stmt_prepare(THD *thd) } if (! (query= get_dynamic_sql_string(lex, &query_len)) || - ! (stmt= new Prepared_statement(thd, &thd->protocol_simple))) + ! (stmt= new Prepared_statement(thd, &thd->protocol_text))) { DBUG_VOID_RETURN; /* out of memory */ } @@ -2077,6 +2087,7 @@ void mysql_sql_stmt_prepare(THD *thd) delete stmt; DBUG_VOID_RETURN; } + if (thd->stmt_map.insert(thd, stmt)) { /* The statement is deleted and an error is set if insert fails */ @@ -2242,7 +2253,7 @@ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length) { uchar *packet= (uchar*)packet_arg; // GCC 4.0.1 workaround ulong stmt_id= uint4korr(packet); - ulong flags= (ulong) ((uchar) packet[4]); + ulong flags= (ulong) packet[4]; /* Query text for binary, general or slow log, if any of them is open */ String expanded_query; #ifndef EMBEDDED_LIBRARY @@ -2618,14 +2629,14 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) /*************************************************************************** - Select_fetch_protocol_prep + Select_fetch_protocol_binary ****************************************************************************/ -Select_fetch_protocol_prep::Select_fetch_protocol_prep(THD *thd_arg) +Select_fetch_protocol_binary::Select_fetch_protocol_binary(THD *thd_arg) :protocol(thd_arg) {} -bool Select_fetch_protocol_prep::send_fields(List<Item> &list, uint flags) +bool Select_fetch_protocol_binary::send_fields(List<Item> &list, uint flags) { bool rc; Protocol *save_protocol= thd->protocol; @@ -2643,7 +2654,7 @@ bool Select_fetch_protocol_prep::send_fields(List<Item> &list, uint flags) return rc; } -bool Select_fetch_protocol_prep::send_eof() +bool Select_fetch_protocol_binary::send_eof() { Protocol *save_protocol= thd->protocol; @@ -2655,7 +2666,7 @@ bool Select_fetch_protocol_prep::send_eof() bool -Select_fetch_protocol_prep::send_data(List<Item> &fields) +Select_fetch_protocol_binary::send_data(List<Item> &fields) { Protocol *save_protocol= thd->protocol; bool rc; @@ -2671,32 +2682,44 @@ Select_fetch_protocol_prep::send_data(List<Item> &fields) ****************************************************************************/ Prepared_statement::Prepared_statement(THD *thd_arg, Protocol *protocol_arg) - :Statement(INITIALIZED, ++thd_arg->statement_id_counter, - thd_arg->variables.query_alloc_block_size, - thd_arg->variables.query_prealloc_size), + :Statement(&main_lex, &main_mem_root, + INITIALIZED, ++thd_arg->statement_id_counter), thd(thd_arg), result(thd_arg), protocol(protocol_arg), param_array(0), param_count(0), last_errno(0), - flags((uint) IS_IN_USE) + flags((uint) IS_IN_USE) { + init_alloc_root(&main_mem_root, thd_arg->variables.query_alloc_block_size, + thd_arg->variables.query_prealloc_size); *last_error= '\0'; } void Prepared_statement::setup_set_params() { - /* Setup binary logging */ + /* + Note: BUG#25843 applies here too (query cache lookup uses thd->db, not + db from "prepare" time). + */ + if (query_cache_maybe_disabled(thd)) // we won't expand the query + lex->safe_to_cache_query= FALSE; // so don't cache it at Execution + + /* + Decide if we have to expand the query (because we must write it to logs or + because we want to look it up in the query cache) or not. + */ if (mysql_bin_log.is_open() && is_update_query(lex->sql_command) || - opt_log || opt_slow_log) + opt_log || opt_slow_log || + query_cache_is_cacheable_query(lex)) { set_params_from_vars= insert_params_from_vars_with_log; #ifndef EMBEDDED_LIBRARY - set_params= insert_params_withlog; + set_params= insert_params_with_log; #else - set_params_data= emb_insert_params_withlog; + set_params_data= emb_insert_params_with_log; #endif } else @@ -2731,6 +2754,7 @@ Prepared_statement::~Prepared_statement() */ free_items(); delete lex->result; + free_root(&main_mem_root, MYF(0)); DBUG_VOID_RETURN; } @@ -2746,6 +2770,7 @@ void Prepared_statement::cleanup_stmt() DBUG_ENTER("Prepared_statement::cleanup_stmt"); DBUG_PRINT("enter",("stmt: 0x%lx", (long) this)); + DBUG_ASSERT(lex->sphead == 0); /* The order is important */ lex->unit.cleanup(); cleanup_items(free_list); @@ -2825,25 +2850,13 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) old_stmt_arena= thd->stmt_arena; thd->stmt_arena= this; - lex_start(thd, (uchar*) thd->query, thd->query_length); + lex_start(thd, thd->query, thd->query_length); lex->stmt_prepare_mode= TRUE; error= MYSQLparse((void *)thd) || thd->is_fatal_error || thd->net.report_error || init_param_array(this); /* - The first thing we do after parse error is freeing sp_head to - ensure that we have restored original memroot. - */ - if (error && lex->sphead) - { - delete lex->sphead; - lex->sphead= NULL; - } - - lex->safe_to_cache_query= FALSE; - - /* While doing context analysis of the query (in check_prepared_statement) we allocate a lot of additional memory: for open tables, JOINs, derived tables, etc. Let's save a snapshot of current parse tree to the @@ -2868,17 +2881,35 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) if (error == 0) error= check_prepared_statement(this, name.str != 0); - /* Free sp_head if check_prepared_statement() failed. */ - if (error && lex->sphead) + /* + Currently CREATE PROCEDURE/TRIGGER/EVENT are prohibited in prepared + statements: ensure we have no memory leak here if by someone tries + to PREPARE stmt FROM "CREATE PROCEDURE ..." + */ + DBUG_ASSERT(lex->sphead == NULL || error != 0); + if (lex->sphead) { delete lex->sphead; lex->sphead= NULL; } + lex_end(lex); cleanup_stmt(); thd->restore_backup_statement(this, &stmt_backup); thd->stmt_arena= old_stmt_arena; + if ((protocol->type() == Protocol::PROTOCOL_TEXT) && (param_count > 0)) + { + /* + This is a mysql_sql_stmt_prepare(); query expansion will insert user + variable references, and user variables are uncacheable, thus we have to + mark this statement as uncacheable. + This has to be done before setup_set_params(), as it may make expansion + unneeded. + */ + lex->safe_to_cache_query= FALSE; + } + if (error == 0) { setup_set_params(); @@ -2995,11 +3026,26 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) reinit_stmt_before_use(thd, lex); thd->protocol= protocol; /* activate stmt protocol */ - error= (open_cursor ? - mysql_open_cursor(thd, (uint) ALWAYS_MATERIALIZED_CURSOR, - &result, &cursor) : - mysql_execute_command(thd)); - thd->protocol= &thd->protocol_simple; /* use normal protocol */ + + if (open_cursor) + error= mysql_open_cursor(thd, (uint) ALWAYS_MATERIALIZED_CURSOR, + &result, &cursor); + else + { + /* + Try to find it in the query cache, if not, execute it. + Note that multi-statements cannot exist here (they are not supported in + prepared statements). + */ + if (query_cache_send_result_to_client(thd, thd->query, + thd->query_length) <= 0) + { + error= mysql_execute_command(thd); + query_cache_end_of_result(thd); + } + } + + thd->protocol= &thd->protocol_text; /* use normal protocol */ /* Assert that if an error, no cursor is open */ DBUG_ASSERT(! (error && cursor)); diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 17163fb1940..debc9a7b572 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -23,7 +23,9 @@ int max_binlog_dump_events = 0; // unlimited my_bool opt_sporadic_binlog_dump_fail = 0; +#ifndef DBUG_OFF static int binlog_dump_count = 0; +#endif /* fake_rotate_event() builds a fake (=which does not exist physically in any @@ -884,12 +886,14 @@ int start_slave(THD* thd , MASTER_INFO* mi, bool net_report) int stop_slave(THD* thd, MASTER_INFO* mi, bool net_report ) { + DBUG_ENTER("stop_slave"); + int slave_errno; if (!thd) thd = current_thd; if (check_access(thd, SUPER_ACL, any_db,0,0,0,0)) - return 1; + DBUG_RETURN(1); thd->proc_info = "Killing slave"; int thread_mask; lock_slave_threads(mi); @@ -923,12 +927,12 @@ int stop_slave(THD* thd, MASTER_INFO* mi, bool net_report ) { if (net_report) my_message(slave_errno, ER(slave_errno), MYF(0)); - return 1; + DBUG_RETURN(1); } else if (net_report) send_ok(thd); - return 0; + DBUG_RETURN(0); } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index b9e02d33130..eb6f4efccc3 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -163,13 +163,15 @@ static COND *make_cond_for_table(COND *cond,table_map table, static Item* part_of_refkey(TABLE *form,Field *field); uint find_shortest_key(TABLE *table, const key_map *usable_keys); static bool test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order, - ha_rows select_limit, bool no_changes); + ha_rows select_limit, bool no_changes, + key_map *map); static bool list_contains_unique_index(TABLE *table, bool (*find_func) (Field *, void *), void *data); static bool find_field_in_item_list (Field *field, void *data); static bool find_field_in_order_list (Field *field, void *data); static int create_sort_index(THD *thd, JOIN *join, ORDER *order, - ha_rows filesort_limit, ha_rows select_limit); + ha_rows filesort_limit, ha_rows select_limit, + bool is_order_by); static int remove_duplicates(JOIN *join,TABLE *entry,List<Item> &fields, Item *having); static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field, @@ -268,6 +270,70 @@ bool handle_select(THD *thd, LEX *lex, select_result *result, /* + Fix fields referenced from inner selects. + + SYNOPSIS + fix_inner_refs() + thd Thread handle + all_fields List of all fields used in select + select Current select + ref_pointer_array Array of references to Items used in current select + + DESCRIPTION + The function fixes fields referenced from inner selects and + also fixes references (Item_ref objects) to these fields. Each field + is fixed as a usual hidden field of the current select - it is added + to the all_fields list and the pointer to it is saved in the + ref_pointer_array if latter is provided. + After the field has been fixed we proceed with fixing references + (Item_ref objects) to this field from inner subqueries. If the + ref_pointer_array is provided then Item_ref objects is set to + reference element in that array with the pointer to the field. + + RETURN + TRUE an error occured + FALSE ok +*/ + +bool +fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select, + Item **ref_pointer_array) +{ + Item_outer_ref *ref; + bool res= FALSE; + List_iterator<Item_outer_ref> ref_it(select->inner_refs_list); + while ((ref= ref_it++)) + { + Item_field *item= ref->outer_field; + /* + TODO: this field item already might be present in the select list. + In this case instead of adding new field item we could use an + existing one. The change will lead to less operations for copying fields, + smaller temporary tables and less data passed through filesort. + */ + if (ref_pointer_array) + { + int el= all_fields.elements; + ref_pointer_array[el]= (Item*)item; + /* Add the field item to the select list of the current select. */ + all_fields.push_front((Item*)item); + /* + If it's needed reset each Item_ref item that refers this field with + a new reference taken from ref_pointer_array. + */ + ref->ref= ref_pointer_array + el; + } + if (!ref->fixed && ref->fix_fields(thd, 0)) + { + res= TRUE; + break; + } + thd->used_tables|= item->used_tables(); + } + return res; +} + +/* Function to setup clauses without sum functions */ inline int setup_without_group(THD *thd, Item **ref_pointer_array, @@ -332,6 +398,7 @@ JOIN::prepare(Item ***rref_pointer_array, join_list= &select_lex->top_join_list; union_part= (unit_arg->first_select()->next_select() != 0); + thd->lex->current_select->is_item_list_lookup= 1; /* If we have already executed SELECT, then it have not sense to prevent its table from update (see unique_table()) @@ -341,12 +408,19 @@ JOIN::prepare(Item ***rref_pointer_array, /* Check that all tables, fields, conds and order are ok */ - if ((!(select_options & OPTION_SETUP_TABLES_DONE) && - setup_tables_and_check_access(thd, &select_lex->context, join_list, - tables_list, - &select_lex->leaf_tables, FALSE, - SELECT_ACL, SELECT_ACL)) || - setup_wild(thd, tables_list, fields_list, &all_fields, wild_num) || + if (!(select_options & OPTION_SETUP_TABLES_DONE) && + setup_tables_and_check_access(thd, &select_lex->context, join_list, + tables_list, &select_lex->leaf_tables, + FALSE, SELECT_ACL, SELECT_ACL)) + DBUG_RETURN(-1); + + TABLE_LIST *table_ptr; + for (table_ptr= select_lex->leaf_tables; + table_ptr; + table_ptr= table_ptr->next_leaf) + tables++; + + if (setup_wild(thd, tables_list, fields_list, &all_fields, wild_num) || select_lex->setup_ref_array(thd, og_num) || setup_fields(thd, (*rref_pointer_array), fields_list, MARK_COLUMNS_READ, &all_fields, 1) || @@ -391,9 +465,24 @@ JOIN::prepare(Item ***rref_pointer_array, select_lex->fix_prepare_information(thd, &conds, &having); + if (order) + { + ORDER *ord; + for (ord= order; ord; ord= ord->next) + { + Item *item= *ord->item; + if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM) + item->split_sum_func(thd, ref_pointer_array, all_fields); + } + } + if (having && having->with_sum_func) having->split_sum_func2(thd, ref_pointer_array, all_fields, &having, TRUE); + if (select_lex->inner_refs_list.elements && + fix_inner_refs(thd, all_fields, select_lex, ref_pointer_array)) + DBUG_RETURN(-1); + if (select_lex->inner_sum_func_list) { Item_sum *end=select_lex->inner_sum_func_list; @@ -436,11 +525,6 @@ JOIN::prepare(Item ***rref_pointer_array, DBUG_RETURN(-1); } } - TABLE_LIST *table_ptr; - for (table_ptr= select_lex->leaf_tables; - table_ptr; - table_ptr= table_ptr->next_leaf) - tables++; } { /* Caclulate the number of groups */ @@ -474,6 +558,9 @@ JOIN::prepare(Item ***rref_pointer_array, } } + if (!procedure && result && result->prepare(fields_list, unit_arg)) + goto err; /* purecov: inspected */ + /* Init join struct */ count_field_types(&tmp_table_param, all_fields, 0); ref_pointer_array_size= all_fields.elements*sizeof(Item*); @@ -487,9 +574,6 @@ JOIN::prepare(Item ***rref_pointer_array, goto err; } #endif - if (!procedure && result && result->prepare(fields_list, unit_arg)) - goto err; /* purecov: inspected */ - if (select_lex->olap == ROLLUP_TYPE && rollup_init()) goto err; if (alloc_func_list()) @@ -572,7 +656,7 @@ void JOIN::remove_subq_pushed_predicates(Item **where) static void save_index_subquery_explain_info(JOIN_TAB *join_tab, Item* where) { join_tab->packed_info= TAB_INFO_HAVE_VALUE; - if (join_tab->table->used_keys.is_set(join_tab->ref.key)) + if (join_tab->table->covering_keys.is_set(join_tab->ref.key)) join_tab->packed_info |= TAB_INFO_USING_INDEX; if (where) join_tab->packed_info |= TAB_INFO_USING_WHERE; @@ -670,7 +754,6 @@ JOIN::optimize() } { - Item::cond_result having_value; having= optimize_cond(this, having, join_list, &having_value); if (thd->net.report_error) { @@ -678,6 +761,10 @@ JOIN::optimize() DBUG_PRINT("error",("Error from optimize_cond")); DBUG_RETURN(1); } + if (select_lex->where) + select_lex->cond_value= cond_value; + if (select_lex->having) + select_lex->having_value= having_value; if (cond_value == Item::COND_FALSE || having_value == Item::COND_FALSE || (!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS))) @@ -838,6 +925,7 @@ JOIN::optimize() conds->update_used_tables(); DBUG_EXECUTE("where", print_where(conds, "after substitute_best_equal");); } + /* Permorm the the optimization on fields evaluation mentioned above for all on expressions. @@ -940,14 +1028,15 @@ JOIN::optimize() JOIN_TAB *tab= &join_tab[const_tables]; bool all_order_fields_used; if (order) - skip_sort_order= test_if_skip_sort_order(tab, order, select_limit, 1); + skip_sort_order= test_if_skip_sort_order(tab, order, select_limit, 1, + &tab->table->keys_in_use_for_order_by); if ((group_list=create_distinct_group(thd, select_lex->ref_pointer_array, order, fields_list, &all_order_fields_used))) { bool skip_group= (skip_sort_order && - test_if_skip_sort_order(tab, group_list, select_limit, - 1) != 0); + test_if_skip_sort_order(tab, group_list, select_limit, 1, + &tab->table->keys_in_use_for_group_by) != 0); if ((skip_group && all_order_fields_used) || select_limit == HA_POS_ERROR || (order && !skip_sort_order)) @@ -1145,7 +1234,9 @@ JOIN::optimize() ((group_list && (!simple_group || !test_if_skip_sort_order(&join_tab[const_tables], group_list, - unit->select_limit_cnt, 0))) || + unit->select_limit_cnt, 0, + &join_tab[const_tables].table-> + keys_in_use_for_group_by))) || select_distinct) && tmp_table_param.quick_group && !procedure) { @@ -1247,7 +1338,7 @@ JOIN::optimize() DBUG_PRINT("info",("Sorting for group")); thd->proc_info="Sorting for group"; if (create_sort_index(thd, this, group_list, - HA_POS_ERROR, HA_POS_ERROR) || + HA_POS_ERROR, HA_POS_ERROR, FALSE) || alloc_group_fields(this, group_list) || make_sum_func_list(all_fields, fields_list, 1) || setup_sum_funcs(thd, sum_funcs)) @@ -1264,7 +1355,7 @@ JOIN::optimize() DBUG_PRINT("info",("Sorting for order")); thd->proc_info="Sorting for order"; if (create_sort_index(thd, this, order, - HA_POS_ERROR, HA_POS_ERROR)) + HA_POS_ERROR, HA_POS_ERROR, TRUE)) DBUG_RETURN(1); order=0; } @@ -1291,7 +1382,9 @@ JOIN::optimize() { /* Should always succeed */ if (test_if_skip_sort_order(&join_tab[const_tables], - order, unit->select_limit_cnt, 0)) + order, unit->select_limit_cnt, 0, + &join_tab[const_tables].table-> + keys_in_use_for_order_by)) order=0; } } @@ -1484,7 +1577,9 @@ JOIN::exec() (const_tables == tables || ((simple_order || skip_sort_order) && test_if_skip_sort_order(&join_tab[const_tables], order, - select_limit, 0)))) + select_limit, 0, + &join_tab[const_tables].table-> + keys_in_use_for_order_by)))) order=0; having= tmp_having; select_describe(this, need_tmp, @@ -1507,7 +1602,7 @@ JOIN::exec() if ((curr_join->select_lex->options & OPTION_SCHEMA_TABLE) && !thd->lex->describe && - get_schema_tables_result(curr_join)) + get_schema_tables_result(curr_join, PROCESSED_BY_JOIN_EXEC)) { DBUG_VOID_RETURN; } @@ -1660,7 +1755,7 @@ JOIN::exec() DBUG_VOID_RETURN; } if (create_sort_index(thd, curr_join, curr_join->group_list, - HA_POS_ERROR, HA_POS_ERROR) || + HA_POS_ERROR, HA_POS_ERROR, FALSE) || make_group_fields(this, curr_join)) { DBUG_VOID_RETURN; @@ -1876,7 +1971,8 @@ JOIN::exec() curr_join->group_list : curr_join->order, curr_join->select_limit, (select_options & OPTION_FOUND_ROWS ? - HA_POS_ERROR : unit->select_limit_cnt))) + HA_POS_ERROR : unit->select_limit_cnt), + curr_join->group_list ? TRUE : FALSE)) DBUG_VOID_RETURN; sortorder= curr_join->sortorder; if (curr_join->const_tables != curr_join->tables && @@ -2884,15 +2980,9 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, Item_func *cond, /* We can't use indexes if the effective collation of the operation differ from the field collation. - - We also cannot use index on a text column, as the column may - contain 'x' 'x\t' 'x ' and 'read_next_same' will stop after - 'x' when searching for WHERE col='x ' */ if (field->cmp_type() == STRING_RESULT && - (((Field_str*)field)->charset() != cond->compare_collation() || - ((*value)->type() != Item::NULL_ITEM && - (field->flags & BLOB_FLAG) && !field->binary()))) + ((Field_str*)field)->charset() != cond->compare_collation()) return; } } @@ -3886,7 +3976,7 @@ best_access_path(JOIN *join, /* Limit the number of matched rows */ tmp= records; set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); - if (table->used_keys.is_set(key)) + if (table->covering_keys.is_set(key)) { /* we can use only index tree */ uint keys_per_block= table->file->stats.block_size/2/ @@ -4053,7 +4143,7 @@ best_access_path(JOIN *join, /* Limit the number of matched rows */ set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); - if (table->used_keys.is_set(key)) + if (table->covering_keys.is_set(key)) { /* we can use only index tree */ uint keys_per_block= table->file->stats.block_size/2/ @@ -4112,7 +4202,7 @@ best_access_path(JOIN *join, !(s->quick && best_key && s->quick->index == best_key->key && // (2) best_max_key_part >= s->table->quick_key_parts[best_key->key]) &&// (2) !((s->table->file->ha_table_flags() & HA_TABLE_SCAN_ON_INDEX) && // (3) - ! s->table->used_keys.is_clear_all() && best_key) && // (3) + ! s->table->covering_keys.is_clear_all() && best_key) && // (3) !(s->table->force_index && best_key && !s->quick)) // (4) { // Check full join ha_rows rnd_records= s->found_records; @@ -5287,13 +5377,15 @@ get_store_key(THD *thd, KEYUSE *keyuse, table_map used_tables, key_part->length, keyuse->val); } - else if (keyuse->val->type() == Item::FIELD_ITEM) + else if (keyuse->val->type() == Item::FIELD_ITEM || + (keyuse->val->type() == Item::REF_ITEM && + ((Item_ref*)keyuse->val)->ref_type() == Item_ref::OUTER_REF) ) return new store_key_field(thd, key_part->field, key_buff + maybe_null, maybe_null ? key_buff : 0, key_part->length, - ((Item_field*) keyuse->val)->field, + ((Item_field*) keyuse->val->real_item())->field, keyuse->val->full_name()); return new store_key_item(thd, key_part->field, @@ -6016,10 +6108,7 @@ make_join_readinfo(JOIN *join, ulonglong options) */ if (!ordered_set && (table == join->sort_by_table && - (!join->order || join->skip_sort_order || - test_if_skip_sort_order(tab, join->order, join->select_limit, - 1)) - ) || + (!join->order || join->skip_sort_order)) || (join->sort_by_table == (TABLE *) 1 && i != join->const_tables)) ordered_set= 1; @@ -6035,7 +6124,7 @@ make_join_readinfo(JOIN *join, ulonglong options) table->status=STATUS_NO_RECORD; tab->read_first_record= join_read_const; tab->read_record.read_record= join_no_more_records; - if (table->used_keys.is_set(tab->ref.key) && + if (table->covering_keys.is_set(tab->ref.key) && !table->no_keyread) { table->key_read=1; @@ -6053,7 +6142,7 @@ make_join_readinfo(JOIN *join, ulonglong options) tab->quick=0; tab->read_first_record= join_read_key; tab->read_record.read_record= join_no_more_records; - if (table->used_keys.is_set(tab->ref.key) && + if (table->covering_keys.is_set(tab->ref.key) && !table->no_keyread) { table->key_read=1; @@ -6070,7 +6159,7 @@ make_join_readinfo(JOIN *join, ulonglong options) } delete tab->quick; tab->quick=0; - if (table->used_keys.is_set(tab->ref.key) && + if (table->covering_keys.is_set(tab->ref.key) && !table->no_keyread) { table->key_read=1; @@ -6156,15 +6245,15 @@ make_join_readinfo(JOIN *join, ulonglong options) { if (tab->select && tab->select->quick && tab->select->quick->index != MAX_KEY && //not index_merge - table->used_keys.is_set(tab->select->quick->index)) + table->covering_keys.is_set(tab->select->quick->index)) { table->key_read=1; table->file->extra(HA_EXTRA_KEYREAD); } - else if (!table->used_keys.is_clear_all() && + else if (!table->covering_keys.is_clear_all() && !(tab->select && tab->select->quick)) { // Only read index tree - tab->index=find_shortest_key(table, & table->used_keys); + tab->index=find_shortest_key(table, & table->covering_keys); tab->read_first_record= join_read_first; tab->type=JT_NEXT; // Read with index_first / index_next } @@ -6548,7 +6637,8 @@ static void update_depend_map(JOIN *join, ORDER *order) order->item[0]->update_used_tables(); order->depend_map=depend_map=order->item[0]->used_tables(); // Not item_sum(), RAND() and no reference to table outside of sub select - if (!(order->depend_map & (OUTER_REF_TABLE_BIT | RAND_TABLE_BIT))) + if (!(order->depend_map & (OUTER_REF_TABLE_BIT | RAND_TABLE_BIT)) + && !order->item[0]->with_sum_func) { for (JOIN_TAB **tab=join->map2table; depend_map ; @@ -7030,6 +7120,7 @@ static bool check_simple_equality(Item *left_item, Item *right_item, SYNOPSIS check_row_equality() + thd thread handle left_row left term of the row equality to be processed right_row right term of the row equality to be processed cond_equal multiple equalities that must hold together with the predicate @@ -7050,7 +7141,7 @@ static bool check_simple_equality(Item *left_item, Item *right_item, FALSE otherwise */ -static bool check_row_equality(Item *left_row, Item_row *right_row, +static bool check_row_equality(THD *thd, Item *left_row, Item_row *right_row, COND_EQUAL *cond_equal, List<Item>* eq_list) { uint n= left_row->cols(); @@ -7061,13 +7152,21 @@ static bool check_row_equality(Item *left_row, Item_row *right_row, Item *right_item= right_row->element_index(i); if (left_item->type() == Item::ROW_ITEM && right_item->type() == Item::ROW_ITEM) - is_converted= check_row_equality((Item_row *) left_item, - (Item_row *) right_item, - cond_equal, eq_list); - else + { + is_converted= check_row_equality(thd, + (Item_row *) left_item, + (Item_row *) right_item, + cond_equal, eq_list); + if (!is_converted) + thd->lex->current_select->cond_count++; + } + else + { is_converted= check_simple_equality(left_item, right_item, 0, cond_equal); - - if (!is_converted) + thd->lex->current_select->cond_count++; + } + + if (!is_converted) { Item_func_eq *eq_item; if (!(eq_item= new Item_func_eq(left_item, right_item))) @@ -7086,6 +7185,7 @@ static bool check_row_equality(Item *left_row, Item_row *right_row, SYNOPSIS check_equality() + thd thread handle item predicate to process cond_equal multiple equalities that must hold together with the predicate eq_list results of conversions of row equalities that are not simple @@ -7110,7 +7210,7 @@ static bool check_row_equality(Item *left_row, Item_row *right_row, or, if the procedure fails by a fatal error. */ -static bool check_equality(Item *item, COND_EQUAL *cond_equal, +static bool check_equality(THD *thd, Item *item, COND_EQUAL *cond_equal, List<Item> *eq_list) { if (item->type() == Item::FUNC_ITEM && @@ -7121,9 +7221,13 @@ static bool check_equality(Item *item, COND_EQUAL *cond_equal, if (left_item->type() == Item::ROW_ITEM && right_item->type() == Item::ROW_ITEM) - return check_row_equality((Item_row *) left_item, + { + thd->lex->current_select->cond_count--; + return check_row_equality(thd, + (Item_row *) left_item, (Item_row *) right_item, cond_equal, eq_list); + } else return check_simple_equality(left_item, right_item, item, cond_equal); } @@ -7136,6 +7240,7 @@ static bool check_equality(Item *item, COND_EQUAL *cond_equal, SYNOPSIS build_equal_items_for_cond() + thd thread handle cond condition(expression) where to make replacement inherited path to all inherited multiple equality items @@ -7198,7 +7303,7 @@ static bool check_equality(Item *item, COND_EQUAL *cond_equal, pointer to the transformed condition */ -static COND *build_equal_items_for_cond(COND *cond, +static COND *build_equal_items_for_cond(THD *thd, COND *cond, COND_EQUAL *inherited) { Item_equal *item_equal; @@ -7231,7 +7336,7 @@ static COND *build_equal_items_for_cond(COND *cond, structure here because it's restored before each re-execution of any prepared statement/stored procedure. */ - if (check_equality(item, &cond_equal, &eq_list)) + if (check_equality(thd, item, &cond_equal, &eq_list)) li.remove(); } @@ -7266,7 +7371,7 @@ static COND *build_equal_items_for_cond(COND *cond, while ((item= li++)) { Item *new_item; - if ((new_item = build_equal_items_for_cond(item, inherited))!= item) + if ((new_item= build_equal_items_for_cond(thd, item, inherited)) != item) { /* This replacement happens only for standalone equalities */ /* @@ -7296,7 +7401,7 @@ static COND *build_equal_items_for_cond(COND *cond, for WHERE a=b AND c=d AND (b=c OR d=5) b=c is replaced by =(a,b,c,d). */ - if (check_equality(cond, &cond_equal, &eq_list)) + if (check_equality(thd, cond, &cond_equal, &eq_list)) { int n= cond_equal.current_level.elements + eq_list.elements; if (n == 0) @@ -7359,7 +7464,7 @@ static COND *build_equal_items_for_cond(COND *cond, SYNOPSIS build_equal_items() - thd Thread handler + thd thread handle cond condition to build the multiple equalities for inherited path to all inherited multiple equality items join_list list of join tables to which the condition refers to @@ -7420,7 +7525,7 @@ static COND *build_equal_items(THD *thd, COND *cond, if (cond) { - cond= build_equal_items_for_cond(cond, inherited); + cond= build_equal_items_for_cond(thd, cond, inherited); cond->update_used_tables(); if (cond->type() == Item::COND_ITEM && ((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC) @@ -7708,6 +7813,10 @@ static COND* substitute_for_best_equal_field(COND *cond, break; } } + if (cond->type() == Item::COND_ITEM && + !((Item_cond*)cond)->argument_list()->elements) + cond= new Item_int((int32)cond->val_bool()); + } else if (cond->type() == Item::FUNC_ITEM && ((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC) @@ -8102,9 +8211,14 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top) */ expr= simplify_joins(join, &nested_join->join_list, expr, FALSE); - table->on_expr= expr; - if (!table->prep_on_expr) + + if (!table->prep_on_expr || expr != table->on_expr) + { + DBUG_ASSERT(expr); + + table->on_expr= expr; table->prep_on_expr= expr->copy_andor_structure(join->thd); + } } nested_join->used_tables= (table_map) 0; nested_join->not_null_tables=(table_map) 0; @@ -8114,7 +8228,7 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top) } else { - if (!(table->prep_on_expr)) + if (!table->prep_on_expr) table->prep_on_expr= table->on_expr; used_tables= table->table->map; if (conds) @@ -8264,7 +8378,7 @@ static uint build_bitmap_for_nested_joins(List<TABLE_LIST> *join_list, */ if (nested_join->join_list.elements != 1) { - nested_join->nj_map= 1 << first_unused++; + nested_join->nj_map= (nested_join_map) 1 << first_unused++; first_unused= build_bitmap_for_nested_joins(&nested_join->join_list, first_unused); } @@ -8606,7 +8720,7 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value) if ((new_cond= new Item_func_eq(args[0], new Item_int("last_insert_id()", thd->read_first_successful_insert_id_in_prev_stmt(), - 21)))) + MY_INT64_NUM_DECIMAL_DIGITS)))) { cond=new_cond; /* @@ -8871,7 +8985,7 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, break; case INT_RESULT: /* Select an integer type with the minimal fit precision */ - if (item->max_length > 11) + if (item->max_length > MY_INT32_NUM_DECIMAL_DIGITS) new_field=new Field_longlong(item->max_length, maybe_null, item->name, item->unsigned_flag); else @@ -8883,19 +8997,19 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, enum enum_field_types type; /* - DATE/TIME fields have STRING_RESULT result type. To preserve - type they needed to be handled separately. + DATE/TIME and GEOMETRY fields have STRING_RESULT result type. + To preserve type they needed to be handled separately. */ if ((type= item->field_type()) == MYSQL_TYPE_DATETIME || - type == MYSQL_TYPE_TIME || type == MYSQL_TYPE_DATE) + type == MYSQL_TYPE_TIME || type == MYSQL_TYPE_DATE || + type == MYSQL_TYPE_TIMESTAMP || type == MYSQL_TYPE_GEOMETRY) new_field= item->tmp_table_field_from_field_type(table, 1); /* Make sure that the blob fits into a Field_varstring which has 2-byte lenght. */ else if (item->max_length/item->collation.collation->mbmaxlen > 255 && - item->max_length/item->collation.collation->mbmaxlen < UINT_MAX16 - && convert_blob_length) + convert_blob_length < UINT_MAX16 && convert_blob_length) new_field= new Field_varstring(convert_blob_length, maybe_null, item->name, table->s, item->collation.collation); @@ -9003,8 +9117,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, Item *orig_item= 0; if (type != Item::FIELD_ITEM && - item->real_item()->type() == Item::FIELD_ITEM && - !((Item_ref *) item)->depended_from) + item->real_item()->type() == Item::FIELD_ITEM) { orig_item= item; item= item->real_item(); @@ -9166,7 +9279,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, uint hidden_null_count, hidden_null_pack_length, hidden_field_count; uint blob_count,group_null_items, string_count; uint temp_pool_slot=MY_BIT_NONE; - ulong reclength, string_total_length, fieldnr= 0; + uint fieldnr= 0; + ulong reclength, string_total_length; bool using_unique_constraint= 0; bool use_packed_rows= 0; bool not_all_columns= !(select_options & TMP_TABLE_ALL_COLUMNS); @@ -9293,7 +9407,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, table->copy_blobs= 1; table->in_use= thd; table->quick_keys.init(); - table->used_keys.init(); + table->covering_keys.init(); table->keys_in_use_for_query.init(); table->s= share; @@ -9322,13 +9436,19 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, { if (item->with_sum_func && type != Item::SUM_FUNC_ITEM) { - /* - Mark that the we have ignored an item that refers to a summary - function. We need to know this if someone is going to use - DISTINCT on the result. - */ - param->using_indirect_summary_function=1; - continue; + if (item->used_tables() & OUTER_REF_TABLE_BIT) + item->update_used_tables(); + if (type == Item::SUBSELECT_ITEM || + (item->used_tables() & ~OUTER_REF_TABLE_BIT)) + { + /* + Mark that the we have ignored an item that refers to a summary + function. We need to know this if someone is going to use + DISTINCT on the result. + */ + param->using_indirect_summary_function=1; + continue; + } } if (item->const_item() && (int) hidden_field_count <= 0) continue; // We don't have to store this @@ -9513,6 +9633,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, share->default_values= table->record[1]+alloc_length; } copy_func[0]=0; // End marker + param->func_count= copy_func - param->items_to_copy; setup_tmp_table_column_bitmaps(table, bitmaps); @@ -10332,7 +10453,6 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) else { DBUG_ASSERT(join->tables); - DBUG_ASSERT(join_tab); error= sub_select(join,join_tab,0); if (error == NESTED_LOOP_OK || error == NESTED_LOOP_NO_MORE_ROWS) error= sub_select(join,join_tab,1); @@ -10889,7 +11009,8 @@ int safe_index_read(JOIN_TAB *tab) TABLE *table= tab->table; if ((error=table->file->index_read(table->record[0], tab->ref.key_buff, - tab->ref.key_length, HA_READ_KEY_EXACT))) + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT))) return report_error(table, error); return 0; } @@ -10919,7 +11040,7 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos) } else { - if (!table->key_read && table->used_keys.is_set(tab->ref.key) && + if (!table->key_read && table->covering_keys.is_set(tab->ref.key) && !table->no_keyread && (int) table->reginfo.lock_type <= (int) TL_READ_HIGH_PRIORITY) { @@ -11027,7 +11148,8 @@ join_read_const(JOIN_TAB *tab) { error=table->file->index_read_idx(table->record[0],tab->ref.key, (byte*) tab->ref.key_buff, - tab->ref.key_length,HA_READ_KEY_EXACT); + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT); } if (error) { @@ -11070,7 +11192,8 @@ join_read_key(JOIN_TAB *tab) } error=table->file->index_read(table->record[0], tab->ref.key_buff, - tab->ref.key_length,HA_READ_KEY_EXACT); + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT); if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) return report_error(table, error); } @@ -11098,7 +11221,8 @@ join_read_always_key(JOIN_TAB *tab) return -1; if ((error=table->file->index_read(table->record[0], tab->ref.key_buff, - tab->ref.key_length,HA_READ_KEY_EXACT))) + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) return report_error(table, error); @@ -11124,8 +11248,7 @@ join_read_last_key(JOIN_TAB *tab) if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref)) return -1; if ((error=table->file->index_read_last(table->record[0], - tab->ref.key_buff, - tab->ref.key_length))) + tab->ref.key_buff, make_prev_keypart_map(tab->ref.key_parts)))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) return report_error(table, error); @@ -11226,7 +11349,7 @@ join_read_first(JOIN_TAB *tab) { int error; TABLE *table=tab->table; - if (!table->key_read && table->used_keys.is_set(tab->index) && + if (!table->key_read && table->covering_keys.is_set(tab->index) && !table->no_keyread) { table->key_read=1; @@ -11265,7 +11388,7 @@ join_read_last(JOIN_TAB *tab) { TABLE *table=tab->table; int error; - if (!table->key_read && table->used_keys.is_set(tab->index) && + if (!table->key_read && table->covering_keys.is_set(tab->index) && !table->no_keyread) { table->key_read=1; @@ -11666,7 +11789,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), group->buff[-1]= (char) group->field->is_null(); } if (!table->file->index_read(table->record[1], - join->tmp_table_param.group_buff,0, + join->tmp_table_param.group_buff, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { /* Update old record */ restore_record(table,record[1]); @@ -12289,7 +12412,7 @@ find_field_in_item_list (Field *field, void *data) static bool test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, - bool no_changes) + bool no_changes, key_map *map) { int ref_key; uint ref_key_parts; @@ -12299,14 +12422,11 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, DBUG_ENTER("test_if_skip_sort_order"); LINT_INIT(ref_key_parts); - /* Check which keys can be used to resolve ORDER BY. */ - usable_keys= table->keys_in_use_for_query; - /* Keys disabled by ALTER TABLE ... DISABLE KEYS should have already been taken into account. */ - DBUG_ASSERT(usable_keys.is_subset(table->s->keys_in_use)); + usable_keys= *map; for (ORDER *tmp_order=order; tmp_order ; tmp_order=tmp_order->next) { @@ -12364,8 +12484,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, If using index only read, only consider other possible index only keys */ - if (table->used_keys.is_set(ref_key)) - usable_keys.intersect(table->used_keys); + if (table->covering_keys.is_set(ref_key)) + usable_keys.intersect(table->covering_keys); if ((new_ref_key= test_if_subkey(order, table, ref_key, ref_key_parts, &usable_keys)) < MAX_KEY) { @@ -12480,7 +12600,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, if (select_limit >= table->file->stats.records) { keys= *table->file->keys_to_use_for_scanning(); - keys.merge(table->used_keys); + keys.merge(table->covering_keys); /* We are adding here also the index specified in FORCE INDEX clause, @@ -12508,7 +12628,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, tab->read_first_record= (flag > 0 ? join_read_first: join_read_last); tab->type=JT_NEXT; // Read with index_first(), index_next() - if (table->used_keys.is_set(nr)) + if (table->covering_keys.is_set(nr)) { table->key_read=1; table->file->extra(HA_EXTRA_KEYREAD); @@ -12534,6 +12654,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, filesort_limit Max number of rows that needs to be sorted select_limit Max number of rows in final output Used to decide if we should use index or not + is_order_by true if we are sorting on ORDER BY, false if GROUP BY + Used to decide if we should use index or not IMPLEMENTATION @@ -12552,7 +12674,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, static int create_sort_index(THD *thd, JOIN *join, ORDER *order, - ha_rows filesort_limit, ha_rows select_limit) + ha_rows filesort_limit, ha_rows select_limit, + bool is_order_by) { uint length= 0; ha_rows examined_rows; @@ -12573,7 +12696,9 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, */ if ((order != join->group_list || !(join->select_options & SELECT_BIG_RESULT)) && - test_if_skip_sort_order(tab,order,select_limit,0)) + test_if_skip_sort_order(tab,order,select_limit,0, + is_order_by ? &table->keys_in_use_for_order_by : + &table->keys_in_use_for_group_by)) DBUG_RETURN(0); for (ORDER *ord= join->order; ord; ord= ord->next) length++; @@ -12621,7 +12746,7 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, /* Fill schema tables with data before filesort if it's necessary */ if ((join->select_lex->options & OPTION_SCHEMA_TABLE) && !thd->lex->describe && - get_schema_tables_result(join)) + get_schema_tables_result(join, PROCESSED_BY_CREATE_SORT_INDEX)) goto err; if (table->s->tmp_table) @@ -12995,15 +13120,15 @@ SORT_FIELD *make_unireg_sortorder(ORDER *order, uint *length, for (;order;order=order->next,pos++) { - pos->field=0; pos->item=0; - if (order->item[0]->type() == Item::FIELD_ITEM) - pos->field= ((Item_field*) (*order->item))->field; - else if (order->item[0]->type() == Item::SUM_FUNC_ITEM && - !order->item[0]->const_item()) - pos->field= ((Item_sum*) order->item[0])->get_tmp_table_field(); - else if (order->item[0]->type() == Item::COPY_STR_ITEM) + Item *item= order->item[0]->real_item(); + pos->field= 0; pos->item= 0; + if (item->type() == Item::FIELD_ITEM) + pos->field= ((Item_field*) item)->field; + else if (item->type() == Item::SUM_FUNC_ITEM && !item->const_item()) + pos->field= ((Item_sum*) item)->get_tmp_table_field(); + else if (item->type() == Item::COPY_STR_ITEM) { // Blob patch - pos->item= ((Item_copy_string*) (*order->item))->item; + pos->item= ((Item_copy_string*) item)->item; } else pos->item= *order->item; @@ -13343,7 +13468,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, Item **select_item; /* The corresponding item from the SELECT clause. */ Field *from_field; /* The corresponding field from the FROM clause. */ uint counter; - bool unaliased; + enum_resolution_type resolution; /* Local SP variables may be int but are expressions, not positions. @@ -13366,7 +13491,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, } /* Lookup the current GROUP/ORDER field in the SELECT clause. */ select_item= find_item_in_list(order_item, fields, &counter, - REPORT_EXCEPT_NOT_FOUND, &unaliased); + REPORT_EXCEPT_NOT_FOUND, &resolution); if (!select_item) return TRUE; /* The item is not unique, or some other error occured. */ @@ -13380,7 +13505,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, original field name, we should additionaly check if we have conflict for this name (in case if we would perform lookup in all tables). */ - if (unaliased && !order_item->fixed && + if (resolution == RESOLVED_BEHIND_ALIAS && !order_item->fixed && order_item->fix_fields(thd, order->item)) return TRUE; @@ -13450,16 +13575,11 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, We check order_item->fixed because Item_func_group_concat can put arguments for which fix_fields already was called. */ - thd->lex->current_select->is_item_list_lookup= 1; if (!order_item->fixed && (order_item->fix_fields(thd, order->item) || (order_item= *order->item)->check_cols(1) || thd->is_fatal_error)) - { - thd->lex->current_select->is_item_list_lookup= 0; return TRUE; /* Wrong field. */ - } - thd->lex->current_select->is_item_list_lookup= 0; uint el= all_fields.elements; all_fields.push_front(order_item); /* Add new field to field list. */ @@ -13611,7 +13731,7 @@ setup_new_fields(THD *thd, List<Item> &fields, { Item **item; uint counter; - bool not_used; + enum_resolution_type not_used; DBUG_ENTER("setup_new_fields"); thd->mark_used_columns= MARK_COLUMNS_READ; // Not really needed, but... @@ -13719,10 +13839,8 @@ count_field_types(TMP_TABLE_PARAM *param, List<Item> &fields, param->quick_group=1; while ((field=li++)) { - Item::Type type=field->type(); Item::Type real_type= field->real_item()->type(); - if (type == Item::FIELD_ITEM || (real_type == Item::FIELD_ITEM && - !((Item_ref *) field)->depended_from)) + if (real_type == Item::FIELD_ITEM) param->field_count++; else if (real_type == Item::SUM_FUNC_ITEM) { @@ -13732,6 +13850,7 @@ count_field_types(TMP_TABLE_PARAM *param, List<Item> &fields, if (!sum_item->quick_group) param->quick_group=0; // UDF SUM function param->sum_func_count++; + param->func_count++; for (uint i=0 ; i < sum_item->arg_count ; i++) { @@ -15188,7 +15307,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, examined_rows=(ha_rows)join->best_positions[i].records_read; item_list.push_back(new Item_int((longlong) (ulonglong) examined_rows, - 21)); + MY_INT64_NUM_DECIMAL_DIGITS)); /* Add "filtered" field to item_list. */ if (join->thd->lex->describe & DESCRIBE_EXTENDED) @@ -15196,7 +15315,8 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, Item_float *filtered; float f; if (examined_rows) - f= 100.0 * join->best_positions[i].records_read / examined_rows; + f= (float) (100.0 * join->best_positions[i].records_read / + examined_rows); else f= 0.0; item_list.push_back((filtered= new Item_float(f))); @@ -15207,7 +15327,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, /* Build "Extra" field and add it to item_list. */ my_bool key_read=table->key_read; if ((tab->type == JT_NEXT || tab->type == JT_CONST) && - table->used_keys.is_set(tab->index)) + table->covering_keys.is_set(tab->index)) key_read=1; if (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT && !((QUICK_ROR_INTERSECT_SELECT*)tab->select->quick)->need_to_fetch_row) @@ -15572,10 +15692,13 @@ void st_select_lex::print(THD *thd, String *str) Item *cur_where= where; if (join) cur_where= join->conds; - if (cur_where) + if (cur_where || cond_value != Item::COND_UNDEF) { str->append(STRING_WITH_LEN(" where ")); - cur_where->print(str); + if (cur_where) + cur_where->print(str); + else + str->append(cond_value != Item::COND_FALSE ? "1" : "0"); } // group by & olap @@ -15601,10 +15724,13 @@ void st_select_lex::print(THD *thd, String *str) if (join) cur_having= join->having; - if (cur_having) + if (cur_having || having_value != Item::COND_UNDEF) { str->append(STRING_WITH_LEN(" having ")); - cur_having->print(str); + if (cur_having) + cur_having->print(str); + else + str->append(having_value != Item::COND_FALSE ? "1" : "0"); } if (order_list.elements) diff --git a/sql/sql_select.h b/sql/sql_select.h index 1d1fa666c60..ca37c7bd274 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -192,7 +192,7 @@ typedef struct st_join_table { JOIN *join; /* Bitmap of nested joins this table is part of */ nested_join_map embedding_map; - + void cleanup(); inline bool is_using_loose_index_scan() { @@ -331,7 +331,7 @@ public: bool need_tmp, hidden_group_fields; DYNAMIC_ARRAY keyuse; - Item::cond_result cond_value; + Item::cond_result cond_value, having_value; List<Item> all_fields; // to store all fields that used in query //Above list changed to use temporary table List<Item> tmp_all_fields1, tmp_all_fields2, tmp_all_fields3; @@ -527,15 +527,11 @@ extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b); class store_key :public Sql_alloc { - protected: - Field *to_field; // Store data here - char *null_ptr; - char err; public: bool null_key; /* TRUE <=> the value of the key has a null part */ enum store_key_result { STORE_KEY_OK, STORE_KEY_FATAL, STORE_KEY_CONV }; store_key(THD *thd, Field *field_arg, char *ptr, char *null, uint length) - :null_ptr(null), err(0), null_key(0) + :null_key(0), null_ptr(null), err(0) { if (field_arg->type() == MYSQL_TYPE_BLOB) { @@ -550,8 +546,35 @@ public: ptr, (uchar*) null, 1); } virtual ~store_key() {} /* Not actually needed */ - virtual enum store_key_result copy()=0; virtual const char *name() const=0; + + /** + @brief sets ignore truncation warnings mode and calls the real copy method + + @details this function makes sure truncation warnings when preparing the + key buffers don't end up as errors (because of an enclosing INSERT/UPDATE). + */ + enum store_key_result copy() + { + enum store_key_result result; + enum_check_fields saved_count_cuted_fields= + to_field->table->in_use->count_cuted_fields; + + to_field->table->in_use->count_cuted_fields= CHECK_FIELD_IGNORE; + + result= copy_inner(); + + to_field->table->in_use->count_cuted_fields= saved_count_cuted_fields; + + return result; + } + + protected: + Field *to_field; // Store data here + char *null_ptr; + char err; + + virtual enum store_key_result copy_inner()=0; }; @@ -571,7 +594,10 @@ class store_key_field: public store_key copy_field.set(to_field,from_field,0); } } - enum store_key_result copy() + const char *name() const { return field_name; } + + protected: + enum store_key_result copy_inner() { TABLE *table= copy_field.to_field->table; my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, @@ -581,7 +607,6 @@ class store_key_field: public store_key null_key= to_field->is_null(); return err != 0 ? STORE_KEY_FATAL : STORE_KEY_OK; } - const char *name() const { return field_name; } }; @@ -596,7 +621,10 @@ public: null_ptr_arg ? null_ptr_arg : item_arg->maybe_null ? &err : NullS, length), item(item_arg) {} - enum store_key_result copy() + const char *name() const { return "func"; } + + protected: + enum store_key_result copy_inner() { TABLE *table= to_field->table; my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, @@ -606,7 +634,6 @@ public: null_key= to_field->is_null() || item->null_value; return (err != 0 || res > 2 ? STORE_KEY_FATAL : (store_key_result) res); } - const char *name() const { return "func"; } }; @@ -622,7 +649,10 @@ public: &err : NullS, length, item_arg), inited(0) { } - enum store_key_result copy() + const char *name() const { return "const"; } + +protected: + enum store_key_result copy_inner() { int res; if (!inited) @@ -637,7 +667,6 @@ public: null_key= to_field->is_null() || item->null_value; return (err > 2 ? STORE_KEY_FATAL : (store_key_result) err); } - const char *name() const { return "const"; } }; bool cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref); diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc index 5fa97dc5c2b..a62ce98850b 100644 --- a/sql/sql_servers.cc +++ b/sql/sql_servers.cc @@ -16,6 +16,21 @@ /* The servers are saved in the system table "servers" + + Currently, when the user performs an ALTER SERVER or a DROP SERVER + operation, it will cause all open tables which refer to the named + server connection to be flushed. This may cause some undesirable + behaviour with regard to currently running transactions. It is + expected that the DBA knows what s/he is doing when s/he performs + the ALTER SERVER or DROP SERVER operation. + + TODO: + It is desirable for us to implement a callback mechanism instead where + callbacks can be registered for specific server protocols. The callback + will be fired when such a server name has been created/altered/dropped + or when statistics are to be gathered such as how many actual connections. + Storage engines etc will be able to make use of the callback so that + currently running transactions etc will not be disrupted. */ #include "mysql_priv.h" @@ -25,14 +40,43 @@ #include "sp_head.h" #include "sp.h" -HASH servers_cache; -pthread_mutex_t servers_cache_mutex; // To init the hash -uint servers_cache_initialised=FALSE; -/* Version of server table. incremented by servers_load */ -static uint servers_version=0; +/* + We only use 1 mutex to guard the data structures - THR_LOCK_servers. + Read locked when only reading data and write-locked for all other access. +*/ + +static HASH servers_cache; static MEM_ROOT mem; static rw_lock_t THR_LOCK_servers; +static bool get_server_from_table_to_cache(TABLE *table); + +/* insert functions */ +static int insert_server(THD *thd, FOREIGN_SERVER *server_options); +static int insert_server_record(TABLE *table, FOREIGN_SERVER *server); +static int insert_server_record_into_cache(FOREIGN_SERVER *server); +static void prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options, + FOREIGN_SERVER *server); +/* drop functions */ +static int delete_server_record(TABLE *table, + char *server_name, + int server_name_length); +static int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options); + +/* update functions */ +static void prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options, + FOREIGN_SERVER *existing, + FOREIGN_SERVER *altered); +static int update_server(THD *thd, FOREIGN_SERVER *existing, + FOREIGN_SERVER *altered); +static int update_server_record(TABLE *table, FOREIGN_SERVER *server); +static int update_server_record_in_cache(FOREIGN_SERVER *existing, + FOREIGN_SERVER *altered); +/* utility functions */ +static void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to); + + + static byte *servers_cache_get_key(FOREIGN_SERVER *server, uint *length, my_bool not_used __attribute__((unused))) { @@ -45,6 +89,7 @@ static byte *servers_cache_get_key(FOREIGN_SERVER *server, uint *length, DBUG_RETURN((byte*) server->server_name); } + /* Initialize structures responsible for servers used in federated server scheme information for them from the server @@ -64,35 +109,27 @@ static byte *servers_cache_get_key(FOREIGN_SERVER *server, uint *length, 1 Could not initialize servers */ -my_bool servers_init(bool dont_read_servers_table) +bool servers_init(bool dont_read_servers_table) { THD *thd; - my_bool return_val= 0; + bool return_val= FALSE; DBUG_ENTER("servers_init"); /* init the mutex */ - if (pthread_mutex_init(&servers_cache_mutex, MY_MUTEX_INIT_FAST)) - DBUG_RETURN(1); - if (my_rwlock_init(&THR_LOCK_servers, NULL)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); /* initialise our servers cache */ if (hash_init(&servers_cache, system_charset_info, 32, 0, 0, (hash_get_key) servers_cache_get_key, 0, 0)) { - return_val= 1; /* we failed, out of memory? */ + return_val= TRUE; /* we failed, out of memory? */ goto end; } /* Initialize the mem root for data */ init_alloc_root(&mem, ACL_ALLOC_BLOCK_SIZE, 0); - /* - at this point, the cache is initialised, let it be known - */ - servers_cache_initialised= TRUE; - if (dont_read_servers_table) goto end; @@ -100,7 +137,7 @@ my_bool servers_init(bool dont_read_servers_table) To be able to run this from boot, we allocate a temporary THD */ if (!(thd=new THD)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); thd->thread_stack= (char*) &thd; thd->store_globals(); /* @@ -130,19 +167,13 @@ end: TRUE Error */ -static my_bool servers_load(THD *thd, TABLE_LIST *tables) +static bool servers_load(THD *thd, TABLE_LIST *tables) { TABLE *table; READ_RECORD read_record_info; - my_bool return_val= TRUE; + bool return_val= TRUE; DBUG_ENTER("servers_load"); - if (!servers_cache_initialised) - DBUG_RETURN(0); - - /* need to figure out how to utilise this variable */ - servers_version++; /* servers updated */ - /* first, send all cached rows to sleep with the fishes, oblivion! I expect this crappy comment replaced */ free_root(&mem, MYF(MY_MARK_BLOCKS_FREE)); @@ -156,7 +187,7 @@ static my_bool servers_load(THD *thd, TABLE_LIST *tables) goto end; } - return_val=0; + return_val= FALSE; end: end_read_record(&read_record_info); @@ -183,10 +214,10 @@ end: TRUE Failure */ -my_bool servers_reload(THD *thd) +bool servers_reload(THD *thd) { TABLE_LIST tables[1]; - my_bool return_val= 1; + bool return_val= TRUE; DBUG_ENTER("servers_reload"); if (thd->locked_tables) @@ -196,10 +227,9 @@ my_bool servers_reload(THD *thd) close_thread_tables(thd); } - /* - To avoid deadlocks we should obtain table locks before - obtaining servers_cache->lock mutex. - */ + DBUG_PRINT("info", ("locking servers_cache")); + rw_wrlock(&THR_LOCK_servers); + bzero((char*) tables, sizeof(tables)); tables[0].alias= tables[0].table_name= (char*) "servers"; tables[0].db= (char*) "mysql"; @@ -207,17 +237,11 @@ my_bool servers_reload(THD *thd) if (simple_open_n_lock_tables(thd, tables)) { - sql_print_error("Fatal error: Can't open and lock privilege tables: %s", + sql_print_error("Can't open and lock privilege tables: %s", thd->net.last_error); goto end; } - DBUG_PRINT("info", ("locking servers_cache")); - VOID(pthread_mutex_lock(&servers_cache_mutex)); - - //old_servers_cache= servers_cache; - //old_mem=mem; - if ((return_val= servers_load(thd, tables))) { // Error. Revert to old list /* blast, for now, we have no servers, discuss later way to preserve */ @@ -226,14 +250,14 @@ my_bool servers_reload(THD *thd) servers_free(); } - DBUG_PRINT("info", ("unlocking servers_cache")); - VOID(pthread_mutex_unlock(&servers_cache_mutex)); - end: close_thread_tables(thd); + DBUG_PRINT("info", ("unlocking servers_cache")); + rw_unlock(&THR_LOCK_servers); DBUG_RETURN(return_val); } + /* Initialize structures responsible for servers used in federated server scheme information for them from the server @@ -260,7 +284,8 @@ end: 1 could not insert server struct into global servers cache */ -my_bool get_server_from_table_to_cache(TABLE *table) +static bool +get_server_from_table_to_cache(TABLE *table) { /* alloc a server struct */ char *ptr; @@ -308,69 +333,6 @@ my_bool get_server_from_table_to_cache(TABLE *table) DBUG_RETURN(FALSE); } -/* - SYNOPSIS - server_exists_in_table() - THD *thd - thread pointer - LEX_SERVER_OPTIONS *server_options - pointer to Lex->server_options - - NOTES - This function takes a LEX_SERVER_OPTIONS struct, which is very much the - same type of structure as a FOREIGN_SERVER, it contains the values parsed - in any one of the [CREATE|DELETE|DROP] SERVER statements. Using the - member "server_name", index_read_idx either founds the record and returns - 1, or doesn't find the record, and returns 0 - - RETURN VALUES - 0 record not found - 1 record found -*/ - -my_bool server_exists_in_table(THD *thd, LEX_SERVER_OPTIONS *server_options) -{ - int result= 1; - int error= 0; - TABLE_LIST tables; - TABLE *table; - DBUG_ENTER("server_exists"); - - bzero((char*) &tables, sizeof(tables)); - tables.db= (char*) "mysql"; - tables.alias= tables.table_name= (char*) "servers"; - - /* need to open before acquiring THR_LOCK_plugin or it will deadlock */ - if (! (table= open_ltable(thd, &tables, TL_WRITE))) - DBUG_RETURN(TRUE); - - table->use_all_columns(); - - rw_wrlock(&THR_LOCK_servers); - VOID(pthread_mutex_lock(&servers_cache_mutex)); - - /* set the field that's the PK to the value we're looking for */ - table->field[0]->store(server_options->server_name, - server_options->server_name_length, - system_charset_info); - - if ((error= table->file->index_read_idx(table->record[0], 0, - (byte *)table->field[0]->ptr, - table->key_info[0].key_length, - HA_READ_KEY_EXACT))) - { - if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) - { - table->file->print_error(error, MYF(0)); - result= -1; - } - result= 0; - DBUG_PRINT("info",("record for server '%s' not found!", - server_options->server_name)); - } - - VOID(pthread_mutex_unlock(&servers_cache_mutex)); - rw_unlock(&THR_LOCK_servers); - DBUG_RETURN(result); -} /* SYNOPSIS @@ -382,15 +344,18 @@ my_bool server_exists_in_table(THD *thd, LEX_SERVER_OPTIONS *server_options) This function takes a server object that is has all members properly prepared, ready to be inserted both into the mysql.servers table and the servers cache. + + THR_LOCK_servers must be write locked. RETURN VALUES 0 - no error other - error code */ -int insert_server(THD *thd, FOREIGN_SERVER *server) +static int +insert_server(THD *thd, FOREIGN_SERVER *server) { - int error= 0; + int error= -1; TABLE_LIST tables; TABLE *table; @@ -402,13 +367,7 @@ int insert_server(THD *thd, FOREIGN_SERVER *server) /* need to open before acquiring THR_LOCK_plugin or it will deadlock */ if (! (table= open_ltable(thd, &tables, TL_WRITE))) - DBUG_RETURN(TRUE); - - /* lock mutex to make sure no changes happen */ - VOID(pthread_mutex_lock(&servers_cache_mutex)); - - /* lock table */ - rw_wrlock(&THR_LOCK_servers); + goto end; /* insert the server into the table */ if ((error= insert_server_record(table, server))) @@ -419,12 +378,10 @@ int insert_server(THD *thd, FOREIGN_SERVER *server) goto end; end: - /* unlock the table */ - rw_unlock(&THR_LOCK_servers); - VOID(pthread_mutex_unlock(&servers_cache_mutex)); DBUG_RETURN(error); } + /* SYNOPSIS int insert_server_record_into_cache() @@ -434,13 +391,16 @@ end: This function takes a FOREIGN_SERVER pointer to an allocated (root mem) and inserts it into the global servers cache + THR_LOCK_servers must be write locked. + RETURN VALUE 0 - no error >0 - error code */ -int insert_server_record_into_cache(FOREIGN_SERVER *server) +static int +insert_server_record_into_cache(FOREIGN_SERVER *server) { int error=0; DBUG_ENTER("insert_server_record_into_cache"); @@ -461,6 +421,7 @@ int insert_server_record_into_cache(FOREIGN_SERVER *server) DBUG_RETURN(error); } + /* SYNOPSIS store_server_fields() @@ -478,7 +439,8 @@ int insert_server_record_into_cache(FOREIGN_SERVER *server) */ -void store_server_fields(TABLE *table, FOREIGN_SERVER *server) +static void +store_server_fields(TABLE *table, FOREIGN_SERVER *server) { table->use_all_columns(); @@ -539,12 +501,15 @@ void store_server_fields(TABLE *table, FOREIGN_SERVER *server) */ +static int insert_server_record(TABLE *table, FOREIGN_SERVER *server) { int error; DBUG_ENTER("insert_server_record"); table->use_all_columns(); + empty_record(table); + /* set the field that's the PK to the value we're looking for */ table->field[0]->store(server->server_name, server->server_name_length, @@ -552,8 +517,7 @@ int insert_server_record(TABLE *table, FOREIGN_SERVER *server) /* read index until record is that specified in server_name */ if ((error= table->file->index_read_idx(table->record[0], 0, - (byte *)table->field[0]->ptr, - table->key_info[0].key_length, + (byte *)table->field[0]->ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT))) { /* if not found, err */ @@ -604,9 +568,11 @@ int insert_server_record(TABLE *table, FOREIGN_SERVER *server) int drop_server(THD *thd, LEX_SERVER_OPTIONS *server_options) { - int error= 0; + int error; TABLE_LIST tables; TABLE *table; + LEX_STRING name= { server_options->server_name, + server_options->server_name_length }; DBUG_ENTER("drop_server"); DBUG_PRINT("info", ("server name server->server_name %s", @@ -616,28 +582,35 @@ int drop_server(THD *thd, LEX_SERVER_OPTIONS *server_options) tables.db= (char*) "mysql"; tables.alias= tables.table_name= (char*) "servers"; - /* need to open before acquiring THR_LOCK_plugin or it will deadlock */ - if (! (table= open_ltable(thd, &tables, TL_WRITE))) - DBUG_RETURN(TRUE); - rw_wrlock(&THR_LOCK_servers); - VOID(pthread_mutex_lock(&servers_cache_mutex)); + /* hit the memory hit first */ + if ((error= delete_server_record_in_cache(server_options))) + goto end; - if ((error= delete_server_record(table, - server_options->server_name, - server_options->server_name_length))) + if (! (table= open_ltable(thd, &tables, TL_WRITE))) + { + error= my_errno; goto end; + } + error= delete_server_record(table, name.str, name.length); - if ((error= delete_server_record_in_cache(server_options))) - goto end; + /* close the servers table before we call closed_cached_connection_tables */ + close_thread_tables(thd); + + if (close_cached_connection_tables(thd, TRUE, &name)) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, "Server connection in use"); + } end: - VOID(pthread_mutex_unlock(&servers_cache_mutex)); rw_unlock(&THR_LOCK_servers); DBUG_RETURN(error); } + + /* SYNOPSIS @@ -656,10 +629,10 @@ end: */ -int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options) +static int +delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options) { - - int error= 0; + int error= ER_FOREIGN_SERVER_DOESNT_EXIST; FOREIGN_SERVER *server; DBUG_ENTER("delete_server_record_in_cache"); @@ -675,7 +648,7 @@ int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options) DBUG_PRINT("info", ("server_name %s length %d not found!", server_options->server_name, server_options->server_name_length)); - // what should be done if not found in the cache? + goto end; } /* We succeded in deletion of the server to the table, now delete @@ -685,14 +658,15 @@ int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options) server->server_name, server->server_name_length)); - if (server) - VOID(hash_delete(&servers_cache, (byte*) server)); - - servers_version++; /* servers updated */ + VOID(hash_delete(&servers_cache, (byte*) server)); + + error= 0; +end: DBUG_RETURN(error); } + /* SYNOPSIS @@ -712,6 +686,8 @@ int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options) table for the particular server via the call to update_server_record, and in the servers_cache via update_server_record_in_cache. + THR_LOCK_servers must be write locked. + RETURN VALUE 0 - no error >0 - error code @@ -720,7 +696,7 @@ int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options) int update_server(THD *thd, FOREIGN_SERVER *existing, FOREIGN_SERVER *altered) { - int error= 0; + int error; TABLE *table; TABLE_LIST tables; DBUG_ENTER("update_server"); @@ -730,19 +706,26 @@ int update_server(THD *thd, FOREIGN_SERVER *existing, FOREIGN_SERVER *altered) tables.alias= tables.table_name= (char*)"servers"; if (!(table= open_ltable(thd, &tables, TL_WRITE))) - DBUG_RETURN(1); + { + error= my_errno; + goto end; + } - rw_wrlock(&THR_LOCK_servers); if ((error= update_server_record(table, altered))) goto end; - update_server_record_in_cache(existing, altered); + error= update_server_record_in_cache(existing, altered); + + /* + Perform a reload so we don't have a 'hole' in our mem_root + */ + servers_load(thd, &tables); end: - rw_unlock(&THR_LOCK_servers); DBUG_RETURN(error); } + /* SYNOPSIS @@ -759,6 +742,8 @@ end: HASH, then the updated record inserted, in essence replacing the old record. + THR_LOCK_servers must be write locked. + RETURN VALUE 0 - no error 1 - error @@ -789,13 +774,13 @@ int update_server_record_in_cache(FOREIGN_SERVER *existing, { DBUG_PRINT("info", ("had a problem inserting server %s at %lx", altered->server_name, (long unsigned int) altered)); - error= 1; + error= ER_OUT_OF_RESOURCES; } - servers_version++; /* servers updated */ DBUG_RETURN(error); } + /* SYNOPSIS @@ -828,9 +813,9 @@ void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to) to->password= strdup_root(&mem, from->password); if (to->port == -1) to->port= from->port; - if (!to->socket) + if (!to->socket && from->socket) to->socket= strdup_root(&mem, from->socket); - if (!to->scheme) + if (!to->scheme && from->scheme) to->scheme= strdup_root(&mem, from->scheme); if (!to->owner) to->owner= strdup_root(&mem, from->owner); @@ -838,6 +823,7 @@ void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to) DBUG_VOID_RETURN; } + /* SYNOPSIS @@ -860,7 +846,9 @@ void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to) */ -int update_server_record(TABLE *table, FOREIGN_SERVER *server) + +static int +update_server_record(TABLE *table, FOREIGN_SERVER *server) { int error=0; DBUG_ENTER("update_server_record"); @@ -871,15 +859,11 @@ int update_server_record(TABLE *table, FOREIGN_SERVER *server) system_charset_info); if ((error= table->file->index_read_idx(table->record[0], 0, - (byte *)table->field[0]->ptr, - table->key_info[0].key_length, + (byte *)table->field[0]->ptr, ~(longlong)0, HA_READ_KEY_EXACT))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) - { table->file->print_error(error, MYF(0)); - error= 1; - } DBUG_PRINT("info",("server not found!")); error= ER_FOREIGN_SERVER_DOESNT_EXIST; } @@ -899,6 +883,7 @@ end: DBUG_RETURN(error); } + /* SYNOPSIS @@ -914,11 +899,11 @@ end: */ -int delete_server_record(TABLE *table, - char *server_name, - int server_name_length) +static int +delete_server_record(TABLE *table, + char *server_name, int server_name_length) { - int error= 0; + int error; DBUG_ENTER("delete_server_record"); table->use_all_columns(); @@ -926,15 +911,11 @@ int delete_server_record(TABLE *table, table->field[0]->store(server_name, server_name_length, system_charset_info); if ((error= table->file->index_read_idx(table->record[0], 0, - (byte *)table->field[0]->ptr, - table->key_info[0].key_length, + (byte *)table->field[0]->ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) - { table->file->print_error(error, MYF(0)); - error= 1; - } DBUG_PRINT("info",("server not found!")); error= ER_FOREIGN_SERVER_DOESNT_EXIST; } @@ -963,28 +944,35 @@ int delete_server_record(TABLE *table, int create_server(THD *thd, LEX_SERVER_OPTIONS *server_options) { - int error; + int error= ER_FOREIGN_SERVER_EXISTS; FOREIGN_SERVER *server; DBUG_ENTER("create_server"); DBUG_PRINT("info", ("server_options->server_name %s", server_options->server_name)); + rw_wrlock(&THR_LOCK_servers); + + /* hit the memory first */ + if (hash_search(&servers_cache, (byte*) server_options->server_name, + server_options->server_name_length)) + goto end; + server= (FOREIGN_SERVER *)alloc_root(&mem, sizeof(FOREIGN_SERVER)); - if ((error= prepare_server_struct_for_insert(server_options, server))) - goto end; + prepare_server_struct_for_insert(server_options, server); - if ((error= insert_server(thd, server))) - goto end; + error= insert_server(thd, server); DBUG_PRINT("info", ("error returned %d", error)); end: + rw_unlock(&THR_LOCK_servers); DBUG_RETURN(error); } + /* SYNOPSIS @@ -1001,37 +989,44 @@ end: int alter_server(THD *thd, LEX_SERVER_OPTIONS *server_options) { - int error= 0; + int error= ER_FOREIGN_SERVER_DOESNT_EXIST; FOREIGN_SERVER *altered, *existing; + LEX_STRING name= { server_options->server_name, + server_options->server_name_length }; DBUG_ENTER("alter_server"); DBUG_PRINT("info", ("server_options->server_name %s", server_options->server_name)); + rw_wrlock(&THR_LOCK_servers); + + if (!(existing= (FOREIGN_SERVER *) hash_search(&servers_cache, + (byte*) name.str, + name.length))) + goto end; + altered= (FOREIGN_SERVER *)alloc_root(&mem, sizeof(FOREIGN_SERVER)); - VOID(pthread_mutex_lock(&servers_cache_mutex)); + prepare_server_struct_for_update(server_options, existing, altered); - if (!(existing= (FOREIGN_SERVER *) hash_search(&servers_cache, - (byte*) server_options->server_name, - server_options->server_name_length))) - { - error= ER_FOREIGN_SERVER_DOESNT_EXIST; - goto end; - } + error= update_server(thd, existing, altered); - if ((error= prepare_server_struct_for_update(server_options, existing, altered))) - goto end; + /* close the servers table before we call closed_cached_connection_tables */ + close_thread_tables(thd); - if ((error= update_server(thd, existing, altered))) - goto end; + if (close_cached_connection_tables(thd, FALSE, &name)) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, "Server connection in use"); + } end: DBUG_PRINT("info", ("error returned %d", error)); - VOID(pthread_mutex_unlock(&servers_cache_mutex)); + rw_unlock(&THR_LOCK_servers); DBUG_RETURN(error); } + /* SYNOPSIS @@ -1042,19 +1037,17 @@ end: NOTES RETURN VALUE - 0 - no error + none */ -int prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options, - FOREIGN_SERVER *server) +static void +prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options, + FOREIGN_SERVER *server) { - int error; char *unset_ptr= (char*)""; DBUG_ENTER("prepare_server_struct"); - error= 0; - /* these two MUST be set */ server->server_name= strdup_root(&mem, server_options->server_name); server->server_name_length= server_options->server_name_length; @@ -1084,7 +1077,7 @@ int prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options, server->owner= server_options->owner ? strdup_root(&mem, server_options->owner) : unset_ptr; - DBUG_RETURN(error); + DBUG_VOID_RETURN; } /* @@ -1100,13 +1093,12 @@ int prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options, */ -int prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options, - FOREIGN_SERVER *existing, - FOREIGN_SERVER *altered) +static void +prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options, + FOREIGN_SERVER *existing, + FOREIGN_SERVER *altered) { - int error; DBUG_ENTER("prepare_server_struct_for_update"); - error= 0; altered->server_name= strdup_root(&mem, server_options->server_name); altered->server_name_length= server_options->server_name_length; @@ -1157,7 +1149,7 @@ int prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options, (strcmp(server_options->owner, existing->owner))) ? strdup_root(&mem, server_options->owner) : 0; - DBUG_RETURN(error); + DBUG_VOID_RETURN; } /* @@ -1176,16 +1168,65 @@ int prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options, void servers_free(bool end) { DBUG_ENTER("servers_free"); - if (!servers_cache_initialised) + if (!hash_inited(&servers_cache)) + DBUG_VOID_RETURN; + if (!end) + { + free_root(&mem, MYF(MY_MARK_BLOCKS_FREE)); + my_hash_reset(&servers_cache); DBUG_VOID_RETURN; - VOID(pthread_mutex_destroy(&servers_cache_mutex)); - servers_cache_initialised=0; + } + rwlock_destroy(&THR_LOCK_servers); free_root(&mem,MYF(0)); hash_free(&servers_cache); DBUG_VOID_RETURN; } +/* + SYNOPSIS + + clone_server(MEM_ROOT *mem_root, FOREIGN_SERVER *orig, FOREIGN_SERVER *buff) + + Create a clone of FOREIGN_SERVER. If the supplied mem_root is of + thd->mem_root then the copy is automatically disposed at end of statement. + + NOTES + + ARGS + MEM_ROOT pointer (strings are copied into this mem root) + FOREIGN_SERVER pointer (made a copy of) + FOREIGN_SERVER buffer (if not-NULL, this pointer is returned) + + RETURN VALUE + FOREIGN_SEVER pointer (copy of one supplied FOREIGN_SERVER) +*/ + +static FOREIGN_SERVER *clone_server(MEM_ROOT *mem, const FOREIGN_SERVER *server, + FOREIGN_SERVER *buffer) +{ + DBUG_ENTER("sql_server.cc:clone_server"); + + if (!buffer) + buffer= (FOREIGN_SERVER *) alloc_root(mem, sizeof(FOREIGN_SERVER)); + + buffer->server_name= strmake_root(mem, server->server_name, + server->server_name_length); + buffer->port= server->port; + buffer->server_name_length= server->server_name_length; + + /* TODO: We need to examine which of these can really be NULL */ + buffer->db= server->db ? strdup_root(mem, server->db) : NULL; + buffer->scheme= server->scheme ? strdup_root(mem, server->scheme) : NULL; + buffer->username= server->username? strdup_root(mem, server->username): NULL; + buffer->password= server->password? strdup_root(mem, server->password): NULL; + buffer->socket= server->socket ? strdup_root(mem, server->socket) : NULL; + buffer->owner= server->owner ? strdup_root(mem, server->owner) : NULL; + buffer->host= server->host ? strdup_root(mem, server->host) : NULL; + + DBUG_RETURN(buffer); +} + /* @@ -1200,11 +1241,11 @@ void servers_free(bool end) */ -FOREIGN_SERVER *get_server_by_name(const char *server_name) +FOREIGN_SERVER *get_server_by_name(MEM_ROOT *mem, const char *server_name, + FOREIGN_SERVER *buff) { - ulong error_num=0; uint server_name_length; - FOREIGN_SERVER *server= 0; + FOREIGN_SERVER *server; DBUG_ENTER("get_server_by_name"); DBUG_PRINT("info", ("server_name %s", server_name)); @@ -1213,12 +1254,11 @@ FOREIGN_SERVER *get_server_by_name(const char *server_name) if (! server_name || !strlen(server_name)) { DBUG_PRINT("info", ("server_name not defined!")); - error_num= 1; DBUG_RETURN((FOREIGN_SERVER *)NULL); } DBUG_PRINT("info", ("locking servers_cache")); - VOID(pthread_mutex_lock(&servers_cache_mutex)); + rw_rdlock(&THR_LOCK_servers); if (!(server= (FOREIGN_SERVER *) hash_search(&servers_cache, (byte*) server_name, server_name_length))) @@ -1227,8 +1267,12 @@ FOREIGN_SERVER *get_server_by_name(const char *server_name) server_name, server_name_length)); server= (FOREIGN_SERVER *) NULL; } + /* otherwise, make copy of server */ + else + server= clone_server(mem, server, buff); + DBUG_PRINT("info", ("unlocking servers_cache")); - VOID(pthread_mutex_unlock(&servers_cache_mutex)); + rw_unlock(&THR_LOCK_servers); DBUG_RETURN(server); } diff --git a/sql/sql_servers.h b/sql/sql_servers.h index 23b8cefd5bb..63c691893d1 100644 --- a/sql/sql_servers.h +++ b/sql/sql_servers.h @@ -25,40 +25,19 @@ typedef struct st_federated_server } FOREIGN_SERVER; /* cache handlers */ -my_bool servers_init(bool dont_read_server_table); -my_bool servers_reload(THD *thd); -my_bool get_server_from_table_to_cache(TABLE *table); +bool servers_init(bool dont_read_server_table); +bool servers_reload(THD *thd); void servers_free(bool end=0); /* insert functions */ int create_server(THD *thd, LEX_SERVER_OPTIONS *server_options); -int insert_server(THD *thd, FOREIGN_SERVER *server_options); -int insert_server_record(TABLE *table, FOREIGN_SERVER *server); -int insert_server_record_into_cache(FOREIGN_SERVER *server); -void store_server_fields_for_insert(TABLE *table, FOREIGN_SERVER *server); -void store_server_fields_for_insert(TABLE *table, - FOREIGN_SERVER *existing, - FOREIGN_SERVER *altered); -int prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options, - FOREIGN_SERVER *server); /* drop functions */ int drop_server(THD *thd, LEX_SERVER_OPTIONS *server_options); -int delete_server_record(TABLE *table, - char *server_name, - int server_name_length); -int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options); /* update functions */ int alter_server(THD *thd, LEX_SERVER_OPTIONS *server_options); -int prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options, - FOREIGN_SERVER *existing, - FOREIGN_SERVER *altered); -int update_server(THD *thd, FOREIGN_SERVER *existing, FOREIGN_SERVER *altered); -int update_server_record(TABLE *table, FOREIGN_SERVER *server); -int update_server_record_in_cache(FOREIGN_SERVER *existing, - FOREIGN_SERVER *altered); -/* utility functions */ -void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to); -FOREIGN_SERVER *get_server_by_name(const char *server_name); -my_bool server_exists_in_table(THD *thd, char *server_name); + +/* lookup functions */ +FOREIGN_SERVER *get_server_by_name(MEM_ROOT *mem, const char *server_name, + FOREIGN_SERVER *server_buffer); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 2ca64b9d5ed..445890adedb 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -38,6 +38,7 @@ enum enum_i_s_events_fields ISE_EVENT_SCHEMA, ISE_EVENT_NAME, ISE_DEFINER, + ISE_TIME_ZONE, ISE_EVENT_BODY, ISE_EVENT_DEFINITION, ISE_EVENT_TYPE, @@ -52,7 +53,8 @@ enum enum_i_s_events_fields ISE_CREATED, ISE_LAST_ALTERED, ISE_LAST_EXECUTED, - ISE_EVENT_COMMENT + ISE_EVENT_COMMENT, + ISE_ORIGINATOR }; @@ -424,7 +426,8 @@ bool mysqld_show_column_types(THD *thd) DBUG_ENTER("mysqld_show_column_types"); field_list.push_back(new Item_empty_string("Type",30)); - field_list.push_back(new Item_int("Size",(longlong) 1,21)); + field_list.push_back(new Item_int("Size",(longlong) 1, + MY_INT64_NUM_DECIMAL_DIGITS)); field_list.push_back(new Item_empty_string("Min_Value",20)); field_list.push_back(new Item_empty_string("Max_Value",20)); field_list.push_back(new Item_return_int("Prec", 4, MYSQL_TYPE_SHORT)); @@ -1221,7 +1224,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, if (key_part->field && (key_part->length != table->field[key_part->fieldnr-1]->key_length() && - !(key_info->flags & HA_FULLTEXT))) + !(key_info->flags & (HA_FULLTEXT | HA_SPATIAL)))) { char *end; buff[0] = '('; @@ -1620,7 +1623,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) Protocol *protocol= thd->protocol; DBUG_ENTER("mysqld_list_processes"); - field_list.push_back(new Item_int("Id",0,11)); + field_list.push_back(new Item_int("Id", 0, MY_INT32_NUM_DECIMAL_DIGITS)); field_list.push_back(new Item_empty_string("User",16)); field_list.push_back(new Item_empty_string("Host",LIST_PROCESS_HOST_LEN)); field_list.push_back(field=new Item_empty_string("db",NAME_LEN)); @@ -1687,10 +1690,6 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) if (mysys_var) pthread_mutex_unlock(&mysys_var->mutex); -#if !defined(DONT_USE_THR_ALARM) && ! defined(SCO) - if (pthread_kill(tmp->real_id,0)) - tmp->proc_info="*** DEAD ***"; // This shouldn't happen -#endif #ifdef EXTRA_DEBUG thd_info->start_time= tmp->time_after_lock; #else @@ -2270,8 +2269,7 @@ int make_table_list(THD *thd, SELECT_LEX *sel, ident_table.length= strlen(table); table_ident= new Table_ident(thd, ident_db, ident_table, 1); sel->init_query(); - if (!sel->add_table_to_list(thd, table_ident, 0, 0, TL_READ, - (List<String> *) 0, (List<String> *) 0)) + if (!sel->add_table_to_list(thd, table_ident, 0, 0, TL_READ)) return 1; return 0; } @@ -2536,7 +2534,7 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) ST_SCHEMA_TABLE *schema_table= tables->schema_table; SELECT_LEX sel; INDEX_FIELD_VALUES idx_field_vals; - char path[FN_REFLEN], *end, *base_name, *orig_base_name, *file_name; + char path[FN_REFLEN], *base_name, *orig_base_name, *file_name; uint len; bool with_i_schema; enum enum_schema_tables schema_table_idx; @@ -2554,7 +2552,6 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) #endif DBUG_ENTER("get_all_tables"); - LINT_INIT(end); LINT_INIT(len); lex->view_prepare_mode= TRUE; @@ -2646,7 +2643,6 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) else { len= build_table_filename(path, sizeof(path), base_name, "", "", 0); - end= path + len; len= FN_LEN - len; find_files_result res= find_files(thd, &files, base_name, path, idx_field_vals.table_value, 0); @@ -2696,7 +2692,9 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) } else { - my_snprintf(end, len, "/%s%s", file_name, reg_ext); + build_table_filename(path, sizeof(path), + base_name, file_name, reg_ext, 0); + switch (mysql_frm_type(thd, path, ¬_used)) { case FRMTYPE_ERROR: table->field[3]->store(STRING_WITH_LEN("ERROR"), @@ -2942,20 +2940,21 @@ static int get_schema_tables_record(THD *thd, struct st_table_list *tables, if (file->stats.create_time) { thd->variables.time_zone->gmt_sec_to_TIME(&time, - file->stats.create_time); + (my_time_t) file->stats.create_time); table->field[14]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); table->field[14]->set_notnull(); } if (file->stats.update_time) { thd->variables.time_zone->gmt_sec_to_TIME(&time, - file->stats.update_time); + (my_time_t) file->stats.update_time); table->field[15]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); table->field[15]->set_notnull(); } if (file->stats.check_time) { - thd->variables.time_zone->gmt_sec_to_TIME(&time, file->stats.check_time); + thd->variables.time_zone->gmt_sec_to_TIME(&time, + (my_time_t) file->stats.check_time); table->field[16]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); table->field[16]->set_notnull(); } @@ -3514,7 +3513,7 @@ int fill_schema_proc(THD *thd, TABLE_LIST *tables, COND *cond) err: proc_table->file->ha_index_end(); - close_proc_table(thd, &open_tables_state_backup); + close_system_tables(thd, &open_tables_state_backup); DBUG_RETURN(res); } @@ -3984,20 +3983,21 @@ static void store_schema_partitions_record(THD *thd, TABLE *schema_table, if (stat_info.create_time) { thd->variables.time_zone->gmt_sec_to_TIME(&time, - stat_info.create_time); + (my_time_t)stat_info.create_time); table->field[18]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); table->field[18]->set_notnull(); } if (stat_info.update_time) { thd->variables.time_zone->gmt_sec_to_TIME(&time, - stat_info.update_time); + (my_time_t)stat_info.update_time); table->field[19]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); table->field[19]->set_notnull(); } if (stat_info.check_time) { - thd->variables.time_zone->gmt_sec_to_TIME(&time, stat_info.check_time); + thd->variables.time_zone->gmt_sec_to_TIME(&time, + (my_time_t)stat_info.check_time); table->field[20]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); table->field[20]->set_notnull(); } @@ -4312,7 +4312,7 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) restore_record(sch_table, s->default_values); - if (et.load_from_row(event_table)) + if (et.load_from_row(thd, event_table)) { my_error(ER_CANNOT_LOAD_FROM_TABLE, MYF(0)); DBUG_RETURN(1); @@ -4339,6 +4339,9 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) store(et.name.str, et.name.length, scs); sch_table->field[ISE_DEFINER]-> store(et.definer.str, et.definer.length, scs); + const String *tz_name= et.time_zone->get_name(); + sch_table->field[ISE_TIME_ZONE]-> + store(tz_name->ptr(), tz_name->length(), scs); sch_table->field[ISE_EVENT_BODY]-> store(STRING_WITH_LEN("SQL"), scs); sch_table->field[ISE_EVENT_DEFINITION]-> @@ -4355,6 +4358,8 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) store((const char*)sql_mode_str, sql_mode_len, scs); } + int not_used=0; + if (et.expression) { String show_str; @@ -4374,15 +4379,17 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) sch_table->field[ISE_INTERVAL_FIELD]->store(ival->str, ival->length, scs); /* starts & ends . STARTS is always set - see sql_yacc.yy */ + et.time_zone->gmt_sec_to_TIME(&time, et.starts); sch_table->field[ISE_STARTS]->set_notnull(); sch_table->field[ISE_STARTS]-> - store_time(&et.starts, MYSQL_TIMESTAMP_DATETIME); + store_time(&time, MYSQL_TIMESTAMP_DATETIME); if (!et.ends_null) { + et.time_zone->gmt_sec_to_TIME(&time, et.ends); sch_table->field[ISE_ENDS]->set_notnull(); sch_table->field[ISE_ENDS]-> - store_time(&et.ends, MYSQL_TIMESTAMP_DATETIME); + store_time(&time, MYSQL_TIMESTAMP_DATETIME); } } else @@ -4390,16 +4397,30 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) /* type */ sch_table->field[ISE_EVENT_TYPE]->store(STRING_WITH_LEN("ONE TIME"), scs); + et.time_zone->gmt_sec_to_TIME(&time, et.execute_at); sch_table->field[ISE_EXECUTE_AT]->set_notnull(); sch_table->field[ISE_EXECUTE_AT]-> - store_time(&et.execute_at, MYSQL_TIMESTAMP_DATETIME); + store_time(&time, MYSQL_TIMESTAMP_DATETIME); } /* status */ - if (et.status == Event_timed::ENABLED) - sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("ENABLED"), scs); - else - sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("DISABLED"), scs); + + switch (et.status) + { + case Event_timed::ENABLED: + sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("ENABLED"), scs); + break; + case Event_timed::SLAVESIDE_DISABLED: + sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("SLAVESIDE_DISABLED"), + scs); + break; + case Event_timed::DISABLED: + sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("DISABLED"), scs); + break; + default: + DBUG_ASSERT(0); + } + sch_table->field[ISE_ORIGINATOR]->store(et.originator, TRUE); /* on_completion */ if (et.on_completion == Event_timed::ON_COMPLETION_DROP) @@ -4409,7 +4430,6 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) sch_table->field[ISE_ON_COMPLETION]-> store(STRING_WITH_LEN("PRESERVE"), scs); - int not_used=0; number_to_datetime(et.created, &time, 0, ¬_used); DBUG_ASSERT(not_used==0); sch_table->field[ISE_CREATED]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); @@ -4419,11 +4439,12 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) sch_table->field[ISE_LAST_ALTERED]-> store_time(&time, MYSQL_TIMESTAMP_DATETIME); - if (et.last_executed.year) + if (et.last_executed) { + et.time_zone->gmt_sec_to_TIME(&time, et.last_executed); sch_table->field[ISE_LAST_EXECUTED]->set_notnull(); sch_table->field[ISE_LAST_EXECUTED]-> - store_time(&et.last_executed, MYSQL_TIMESTAMP_DATETIME); + store_time(&time, MYSQL_TIMESTAMP_DATETIME); } sch_table->field[ISE_EVENT_COMMENT]-> @@ -4938,9 +4959,7 @@ int mysql_schema_table(THD *thd, LEX *lex, TABLE_LIST *table_list) TABLE *table; DBUG_ENTER("mysql_schema_table"); if (!(table= table_list->schema_table->create_table(thd, table_list))) - { DBUG_RETURN(1); - } table->s->tmp_table= SYSTEM_TMP_TABLE; table->grant.privilege= SELECT_ACL; /* @@ -5035,8 +5054,7 @@ int make_schema_select(THD *thd, SELECT_LEX *sel, strlen(schema_table->table_name), 0); if (schema_table->old_format(thd, schema_table) || /* Handle old syntax */ !sel->add_table_to_list(thd, new Table_ident(thd, db, table, 0), - 0, 0, TL_READ, (List<String> *) 0, - (List<String> *) 0)) + 0, 0, TL_READ)) { DBUG_RETURN(1); } @@ -5050,13 +5068,15 @@ int make_schema_select(THD *thd, SELECT_LEX *sel, SYNOPSIS get_schema_tables_result() join join which use schema tables + executed_place place where I_S table processed RETURN FALSE success TRUE error */ -bool get_schema_tables_result(JOIN *join) +bool get_schema_tables_result(JOIN *join, + enum enum_schema_table_state executed_place) { JOIN_TAB *tmp_join_tab= join->join_tab+join->tables; THD *thd= join->thd; @@ -5076,14 +5096,24 @@ bool get_schema_tables_result(JOIN *join) bool is_subselect= (&lex->unit != lex->current_select->master_unit() && lex->current_select->master_unit()->item); /* - The schema table is already processed and - the statement is not a subselect. - So we don't need to handle this table again. + If schema table is already processed and + the statement is not a subselect then + we don't need to fill this table again. + If schema table is already processed and + schema_table_state != executed_place then + table is already processed and + we should skip second data processing. */ - if (table_list->is_schema_table_processed && !is_subselect) + if (table_list->schema_table_state && + (!is_subselect || table_list->schema_table_state != executed_place)) continue; - if (is_subselect) // is subselect + /* + if table is used in a subselect and + table has been processed earlier with the same + 'executed_place' value then we should refresh the table. + */ + if (table_list->schema_table_state && is_subselect) { table_list->table->file->extra(HA_EXTRA_NO_CACHE); table_list->table->file->extra(HA_EXTRA_RESET_STATE); @@ -5100,10 +5130,10 @@ bool get_schema_tables_result(JOIN *join) { result= 1; join->error= 1; - table_list->is_schema_table_processed= TRUE; + table_list->schema_table_state= executed_place; break; } - table_list->is_schema_table_processed= TRUE; + table_list->schema_table_state= executed_place; } } thd->no_warnings_for_error= 0; @@ -5326,20 +5356,25 @@ ST_FIELD_INFO tables_fields_info[]= {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Name"}, {"TABLE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, {"ENGINE", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, "Engine"}, - {"VERSION", 21 , MYSQL_TYPE_LONG, 0, 1, "Version"}, + {"VERSION", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, "Version"}, {"ROW_FORMAT", 10, MYSQL_TYPE_STRING, 0, 1, "Row_format"}, - {"TABLE_ROWS", 21 , MYSQL_TYPE_LONG, 0, 1, "Rows"}, - {"AVG_ROW_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, "Avg_row_length"}, - {"DATA_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, "Data_length"}, - {"MAX_DATA_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, "Max_data_length"}, - {"INDEX_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, "Index_length"}, - {"DATA_FREE", 21 , MYSQL_TYPE_LONG, 0, 1, "Data_free"}, - {"AUTO_INCREMENT", 21 , MYSQL_TYPE_LONG, 0, 1, "Auto_increment"}, + {"TABLE_ROWS", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, "Rows"}, + {"AVG_ROW_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, + "Avg_row_length"}, + {"DATA_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, + "Data_length"}, + {"MAX_DATA_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, + "Max_data_length"}, + {"INDEX_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, + "Index_length"}, + {"DATA_FREE", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, "Data_free"}, + {"AUTO_INCREMENT", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, + "Auto_increment"}, {"CREATE_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Create_time"}, {"UPDATE_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Update_time"}, {"CHECK_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Check_time"}, {"TABLE_COLLATION", 64, MYSQL_TYPE_STRING, 0, 1, "Collation"}, - {"CHECKSUM", 21 , MYSQL_TYPE_LONG, 0, 1, "Checksum"}, + {"CHECKSUM", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, "Checksum"}, {"CREATE_OPTIONS", 255, MYSQL_TYPE_STRING, 0, 1, "Create_options"}, {"TABLE_COMMENT", 80, MYSQL_TYPE_STRING, 0, 0, "Comment"}, {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} @@ -5352,14 +5387,15 @@ ST_FIELD_INFO columns_fields_info[]= {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, {"COLUMN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Field"}, - {"ORDINAL_POSITION", 21 , MYSQL_TYPE_LONG, 0, 0, 0}, + {"ORDINAL_POSITION", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 0, 0}, {"COLUMN_DEFAULT", MAX_FIELD_VARCHARLENGTH, MYSQL_TYPE_STRING, 0, 1, "Default"}, {"IS_NULLABLE", 3, MYSQL_TYPE_STRING, 0, 0, "Null"}, {"DATA_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"CHARACTER_MAXIMUM_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, 0}, - {"CHARACTER_OCTET_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, 0}, - {"NUMERIC_PRECISION", 21 , MYSQL_TYPE_LONG, 0, 1, 0}, - {"NUMERIC_SCALE", 21 , MYSQL_TYPE_LONG, 0, 1, 0}, + {"CHARACTER_MAXIMUM_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, + 0}, + {"CHARACTER_OCTET_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, 0}, + {"NUMERIC_PRECISION", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, 0}, + {"NUMERIC_SCALE", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, 0}, {"CHARACTER_SET_NAME", 64, MYSQL_TYPE_STRING, 0, 1, 0}, {"COLLATION_NAME", 64, MYSQL_TYPE_STRING, 0, 1, "Collation"}, {"COLUMN_TYPE", 65535, MYSQL_TYPE_STRING, 0, 0, "Type"}, @@ -5385,7 +5421,7 @@ ST_FIELD_INFO collation_fields_info[]= { {"COLLATION_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Collation"}, {"CHARACTER_SET_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Charset"}, - {"ID", 11, MYSQL_TYPE_LONG, 0, 0, "Id"}, + {"ID", MY_INT32_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Id"}, {"IS_DEFAULT", 3, MYSQL_TYPE_STRING, 0, 0, "Default"}, {"IS_COMPILED", 3, MYSQL_TYPE_STRING, 0, 0, "Compiled"}, {"SORTLEN", 3 ,MYSQL_TYPE_LONG, 0, 0, "Sortlen"}, @@ -5411,6 +5447,7 @@ ST_FIELD_INFO events_fields_info[]= {"EVENT_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Db"}, {"EVENT_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Name"}, {"DEFINER", 77, MYSQL_TYPE_STRING, 0, 0, "Definer"}, + {"TIME_ZONE", 64, MYSQL_TYPE_STRING, 0, 0, "Time zone"}, {"EVENT_BODY", 8, MYSQL_TYPE_STRING, 0, 0, 0}, {"EVENT_DEFINITION", 65535, MYSQL_TYPE_STRING, 0, 0, 0}, {"EVENT_TYPE", 9, MYSQL_TYPE_STRING, 0, 0, "Type"}, @@ -5420,13 +5457,14 @@ ST_FIELD_INFO events_fields_info[]= {"SQL_MODE", 65535, MYSQL_TYPE_STRING, 0, 0, 0}, {"STARTS", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Starts"}, {"ENDS", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Ends"}, - {"STATUS", 8, MYSQL_TYPE_STRING, 0, 0, "Status"}, + {"STATUS", 18, MYSQL_TYPE_STRING, 0, 0, "Status"}, {"ON_COMPLETION", 12, MYSQL_TYPE_STRING, 0, 0, 0}, {"CREATED", 0, MYSQL_TYPE_TIMESTAMP, 0, 0, 0}, {"LAST_ALTERED", 0, MYSQL_TYPE_TIMESTAMP, 0, 0, 0}, {"LAST_EXECUTED", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, 0}, {"EVENT_COMMENT", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} + {"ORIGINATOR", 10, MYSQL_TYPE_LONG, 0, 0, "Originator"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} }; @@ -5476,7 +5514,7 @@ ST_FIELD_INFO stat_fields_info[]= {"SEQ_IN_INDEX", 2, MYSQL_TYPE_LONG, 0, 0, "Seq_in_index"}, {"COLUMN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Column_name"}, {"COLLATION", 1, MYSQL_TYPE_STRING, 0, 1, "Collation"}, - {"CARDINALITY", 21, MYSQL_TYPE_LONG, 0, 1, "Cardinality"}, + {"CARDINALITY", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 1, "Cardinality"}, {"SUB_PART", 3, MYSQL_TYPE_LONG, 0, 1, "Sub_part"}, {"PACKED", 10, MYSQL_TYPE_STRING, 0, 1, "Packed"}, {"NULLABLE", 3, MYSQL_TYPE_STRING, 0, 0, "Null"}, diff --git a/sql/sql_table.cc b/sql/sql_table.cc index bbf510a7437..3eb47ebae6e 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -41,7 +41,7 @@ static int copy_data_between_tables(TABLE *from,TABLE *to, static bool prepare_blob_field(THD *thd, create_field *sql_field); static bool check_engine(THD *thd, const char *table_name, - HA_CREATE_INFO *create_info); + HA_CREATE_INFO *create_info); static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, List<create_field> *fields, List<Key> *keys, bool tmp_table, @@ -460,10 +460,10 @@ static bool write_ddl_log_header() global_ddl_log.num_entries); const_var= FN_LEN; int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_LEN_POS], - const_var); + (ulong) const_var); const_var= IO_SIZE; int4store(&global_ddl_log.file_entry_buf[DDL_LOG_IO_SIZE_POS], - const_var); + (ulong) const_var); if (write_ddl_log_file_entry(0UL)) { sql_print_error("Error writing ddl log header"); @@ -2791,6 +2791,12 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, { column->length*= sql_field->charset->mbmaxlen; + if (key->type == Key::SPATIAL && column->length) + { + my_error(ER_WRONG_SUB_KEY, MYF(0)); + DBUG_RETURN(-1); + } + if (f_is_blob(sql_field->pack_flag) || (f_is_geom(sql_field->pack_flag) && key->type != Key::SPATIAL)) { @@ -2884,6 +2890,7 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, } else if (!f_is_geom(sql_field->pack_flag) && (column->length > length || + !Field::type_can_have_key_part (sql_field->sql_type) || ((f_is_packed(sql_field->pack_flag) || ((file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS) && (key_info->flags & HA_NOSAME))) && @@ -3416,16 +3423,11 @@ bool mysql_create_table_internal(THD *thd, { #ifdef FN_DEVCHAR /* check if the table name contains FN_DEVCHAR when defined */ - const char *start= alias; - while (*start != '\0') + if (strchr(alias, FN_DEVCHAR)) { - if (*start == FN_DEVCHAR) - { - my_error(ER_WRONG_TABLE_NAME, MYF(0), alias); - DBUG_RETURN(TRUE); - } - start++; - } + my_error(ER_WRONG_TABLE_NAME, MYF(0), alias); + DBUG_RETURN(TRUE); + } #endif path_length= build_table_filename(path, sizeof(path), db, alias, reg_ext, internal_tmp_table ? FN_IS_TMP : 0); @@ -4602,7 +4604,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, Table_ident *table_ident) { TABLE *tmp_table; - char src_path[FN_REFLEN], dst_path[FN_REFLEN], tmp_path[FN_REFLEN]; + char src_path[FN_REFLEN], dst_path[FN_REFLEN]; char src_table_name_buff[FN_REFLEN], src_db_name_buff[FN_REFLEN]; uint dst_path_length; char *db= table->db; @@ -4613,7 +4615,9 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, bool res= TRUE, unlock_dst_table= FALSE; enum legacy_db_type not_used; HA_CREATE_INFO *create_info; - +#ifdef WITH_PARTITION_STORAGE_ENGINE + char tmp_path[FN_REFLEN]; +#endif TABLE_LIST src_tables_list, dst_tables_list; DBUG_ENTER("mysql_create_like_table"); @@ -4821,7 +4825,8 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, else unlock_dst_table= TRUE; - int result= store_create_info(thd, table, &query, create_info); + IF_DBUG(int result=) store_create_info(thd, table, &query, + create_info); DBUG_ASSERT(result == 0); // store_create_info() always return 0 write_bin_log(thd, TRUE, query.ptr(), query.length()); @@ -5861,6 +5866,8 @@ view_err: */ if (!Field::type_can_have_key_part(cfield->field->type()) || !Field::type_can_have_key_part(cfield->sql_type) || + /* spatial keys can't have sub-key length */ + (key_info->flags & HA_SPATIAL) || (cfield->field->field_length == key_part_length && !f_is_blob(key_part->key_type)) || (cfield->length && (cfield->length < key_part_length / @@ -6648,7 +6655,8 @@ view_err: thd->query, thd->query_length, db, table_name); - DBUG_ASSERT(!(mysql_bin_log.is_open() && thd->current_stmt_binlog_row_based && + DBUG_ASSERT(!(mysql_bin_log.is_open() && + thd->current_stmt_binlog_row_based && (create_info->options & HA_LEX_CREATE_TMP_TABLE))); write_bin_log(thd, TRUE, thd->query, thd->query_length); @@ -6830,7 +6838,9 @@ copy_data_between_tables(TABLE *from,TABLE *to, copy_ptr->do_copy(copy_ptr); } prev_insert_id= to->file->next_insert_id; - if ((error=to->file->ha_write_row((byte*) to->record[0]))) + error=to->file->write_row((byte*) to->record[0]); + to->auto_increment_field_not_null= FALSE; + if (error) { if (!ignore || to->file->is_fatal_error(error, HA_CHECK_DUP)) @@ -6940,7 +6950,8 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, field_list.push_back(item = new Item_empty_string("Table", NAME_LEN*2)); item->maybe_null= 1; - field_list.push_back(item=new Item_int("Checksum",(longlong) 1,21)); + field_list.push_back(item= new Item_int("Checksum", (longlong) 1, + MY_INT64_NUM_DECIMAL_DIGITS)); item->maybe_null= 1; if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) diff --git a/sql/sql_test.cc b/sql/sql_test.cc index 4fc5bde8fdc..9e30cf5878c 100644 --- a/sql/sql_test.cc +++ b/sql/sql_test.cc @@ -73,17 +73,18 @@ void print_cached_tables(void) uint idx,count,unused; TABLE *start_link,*lnk; + /* purecov: begin tested */ VOID(pthread_mutex_lock(&LOCK_open)); - puts("DB Table Version Thread L.thread Open Lock"); + puts("DB Table Version Thread Open Lock"); for (idx=unused=0 ; idx < open_cache.records ; idx++) { TABLE *entry=(TABLE*) hash_element(&open_cache,idx); - printf("%-14.14s %-32s%6ld%8ld%10ld%6d %s\n", - entry->s->db.str, entry->s->table_name.str, entry->s->version, + printf("%-14.14s %-32s%6ld%8ld%6d %s\n", + entry->s->db.str, entry->s->table_name.str, entry->s->version, entry->in_use ? entry->in_use->thread_id : 0L, - entry->in_use ? entry->in_use->dbug_thread_id : 0L, - entry->db_stat ? 1 : 0, entry->in_use ? lock_descriptions[(int)entry->reginfo.lock_type] : "Not in use"); + entry->db_stat ? 1 : 0, + entry->in_use ? lock_descriptions[(int)entry->reginfo.lock_type] : "Not in use"); if (!entry->in_use) unused++; } @@ -110,6 +111,7 @@ void print_cached_tables(void) printf("Error: File hash table is corrupted\n"); fflush(stdout); VOID(pthread_mutex_unlock(&LOCK_open)); + /* purecov: end */ return; } diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index df363c3c21c..66132efb8e4 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -106,10 +106,6 @@ const LEX_STRING trg_event_type_names[]= }; -static int -add_table_for_trigger(THD *thd, sp_name *trig, bool if_exists, - TABLE_LIST ** table); - class Handle_old_incorrect_sql_modes_hook: public Unknown_key_hook { private: @@ -340,7 +336,7 @@ end: tables - table list containing one open table for which the trigger is created. stmt_query - [OUT] after successful return, this string contains - well-formed statement for creation this trigger. + well-formed statement for creating this trigger. NOTE - Assumes that trigger name is fully qualified. @@ -380,7 +376,7 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables, } /* We don't allow creation of several triggers of the same type yet */ - if (bodies[lex->trg_chistics.event][lex->trg_chistics.action_time]) + if (bodies[lex->trg_chistics.event][lex->trg_chistics.action_time] != 0) { my_error(ER_NOT_SUPPORTED_YET, MYF(0), "multiple triggers with the same action time" @@ -673,7 +669,7 @@ static bool save_trigger_file(Table_triggers_list *triggers, const char *db, tables - table list containing one open table for which trigger is dropped. stmt_query - [OUT] after successful return, this string contains - well-formed statement for creation this trigger. + well-formed statement for deleting this trigger. RETURN VALUE False - success @@ -980,16 +976,13 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, LEX_STRING *trg_definer= it_definer++; thd->variables.sql_mode= (ulong)*trg_sql_mode; - lex_start(thd, (uchar*)trg_create_str->str, trg_create_str->length); + lex_start(thd, trg_create_str->str, trg_create_str->length); thd->spcont= 0; if (MYSQLparse((void *)thd) || thd->is_fatal_error) { - /* - Free lex associated resources. - QQ: Do we really need all this stuff here ? - */ - delete lex.sphead; + /* Currently sphead is always deleted in case of a parse error */ + DBUG_ASSERT(lex.sphead == 0); goto err_with_lex_cleanup; } @@ -1180,7 +1173,7 @@ bool Table_triggers_list::get_trigger_info(THD *thd, trg_event_type event, 1 Error */ -static int +int add_table_for_trigger(THD *thd, sp_name *trig, bool if_exists, TABLE_LIST **table) { diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h index 75dda6be1cf..707fcc4e380 100644 --- a/sql/sql_trigger.h +++ b/sql/sql_trigger.h @@ -137,3 +137,7 @@ private: extern const LEX_STRING trg_action_time_type_names[]; extern const LEX_STRING trg_event_type_names[]; + +int +add_table_for_trigger(THD *thd, sp_name *trig, bool if_exists, + TABLE_LIST **table); diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc index 7dec58d9b6e..da5c1b0bc66 100644 --- a/sql/sql_udf.cc +++ b/sql/sql_udf.cc @@ -403,6 +403,13 @@ int mysql_create_function(THD *thd,udf_func *udf) DBUG_RETURN(1); } + /* + Turn off row binlogging of this statement and use statement-based + so that all supporting tables are updated for CREATE FUNCTION command. + */ + if (thd->current_stmt_binlog_row_based) + thd->clear_current_stmt_binlog_row_based(); + rw_wrlock(&THR_LOCK_udf); if ((hash_search(&udf_hash,(byte*) udf->name.str, udf->name.length))) { @@ -466,6 +473,15 @@ int mysql_create_function(THD *thd,udf_func *udf) goto err; } rw_unlock(&THR_LOCK_udf); + + /* Binlog the create function. */ + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + thd->binlog_query(THD::MYSQL_QUERY_TYPE, + thd->query, thd->query_length, FALSE, FALSE); + } + DBUG_RETURN(0); err: @@ -484,11 +500,20 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name) char *exact_name_str; uint exact_name_len; DBUG_ENTER("mysql_drop_function"); + if (!initialized) { my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); DBUG_RETURN(1); } + + /* + Turn off row binlogging of this statement and use statement-based + so that all supporting tables are updated for DROP FUNCTION command. + */ + if (thd->current_stmt_binlog_row_based) + thd->clear_current_stmt_binlog_row_based(); + rw_wrlock(&THR_LOCK_udf); if (!(udf=(udf_func*) hash_search(&udf_hash,(byte*) udf_name->str, (uint) udf_name->length))) @@ -514,8 +539,7 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name) table->use_all_columns(); table->field[0]->store(exact_name_str, exact_name_len, &my_charset_bin); if (!table->file->index_read_idx(table->record[0], 0, - (byte*) table->field[0]->ptr, - table->key_info[0].key_length, + (byte*) table->field[0]->ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { int error; @@ -524,7 +548,16 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name) } close_thread_tables(thd); - rw_unlock(&THR_LOCK_udf); + rw_unlock(&THR_LOCK_udf); + + /* Binlog the drop function. */ + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + thd->binlog_query(THD::MYSQL_QUERY_TYPE, + thd->query, thd->query_length, FALSE, FALSE); + } + DBUG_RETURN(0); err: rw_unlock(&THR_LOCK_udf); diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 9fe20c502d6..c3635e24407 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -147,6 +147,8 @@ st_select_lex_unit::init_prepare_fake_select_lex(THD *thd_arg) fake_select_lex->table_list.link_in_list((byte *)&result_table_list, (byte **) &result_table_list.next_local); + fake_select_lex->context.table_list= fake_select_lex->context.first_name_resolution_table= + fake_select_lex->get_table_list(); for (ORDER *order= (ORDER *)global_parameters->order_list.first; order; order=order->next) @@ -621,6 +623,12 @@ bool st_select_lex_unit::cleanup() join->tables= 0; } error|= fake_select_lex->cleanup(); + if (fake_select_lex->order_list.elements) + { + ORDER *ord; + for (ord= (ORDER*)fake_select_lex->order_list.first; ord; ord= ord->next) + (*ord->item)->cleanup(); + } } DBUG_RETURN(error); diff --git a/sql/sql_update.cc b/sql/sql_update.cc index baccb3358f7..0b4632edfbe 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -26,7 +26,7 @@ /* Return 0 if row hasn't changed */ -static bool compare_record(TABLE *table) +bool compare_record(TABLE *table) { if (table->s->blob_fields + table->s->varchar_fields == 0) return cmp_record(table,record[1]); @@ -126,13 +126,14 @@ int mysql_update(THD *thd, #endif uint table_count= 0; ha_rows updated, found; - key_map old_used_keys; + key_map old_covering_keys; TABLE *table; SQL_SELECT *select; READ_RECORD info; SELECT_LEX *select_lex= &thd->lex->select_lex; bool need_reopen; ulonglong id; + List<Item> all_fields; DBUG_ENTER("mysql_update"); for ( ; ; ) @@ -164,8 +165,8 @@ int mysql_update(THD *thd, thd->proc_info="init"; table= table_list->table; - /* Calculate "table->used_keys" based on the WHERE */ - table->used_keys= table->s->keys_in_use; + /* Calculate "table->covering_keys" based on the WHERE */ + table->covering_keys= table->s->keys_in_use; table->quick_keys.clear_all(); #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -175,7 +176,7 @@ int mysql_update(THD *thd, if (mysql_prepare_update(thd, table_list, &conds, order_num, order)) DBUG_RETURN(1); - old_used_keys= table->used_keys; // Keys used in WHERE + old_covering_keys= table->covering_keys; // Keys used in WHERE /* Check the fields we are going to modify */ #ifndef NO_EMBEDDED_ACCESS_CHECKS table_list->grant.want_privilege= table->grant.want_privilege= want_privilege; @@ -216,6 +217,10 @@ int mysql_update(THD *thd, DBUG_RETURN(1); /* purecov: inspected */ } + if (select_lex->inner_refs_list.elements && + fix_inner_refs(thd, all_fields, select_lex, select_lex->ref_pointer_array)) + DBUG_RETURN(-1); + if (conds) { Item::cond_result cond_value; @@ -224,7 +229,7 @@ int mysql_update(THD *thd, limit= 0; // Impossible WHERE } // Don't count on usage of 'only index' when calculating which key to use - table->used_keys.clear_all(); + table->covering_keys.clear_all(); #ifdef WITH_PARTITION_STORAGE_ENGINE if (prune_partitions(thd, table, conds)) @@ -299,7 +304,7 @@ int mysql_update(THD *thd, We can't update table directly; We must first search after all matching rows before updating the table! */ - if (used_index < MAX_KEY && old_used_keys.is_set(used_index)) + if (used_index < MAX_KEY && old_covering_keys.is_set(used_index)) { table->key_read=1; table->mark_columns_used_by_index(used_index); @@ -756,7 +761,7 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list, /* Check that we are not using table that we are updating in a sub select */ { TABLE_LIST *duplicate; - if ((duplicate= unique_table(thd, table_list, table_list->next_global))) + if ((duplicate= unique_table(thd, table_list, table_list->next_global, 0))) { update_non_unique_table_error(table_list, "UPDATE", duplicate); my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name); @@ -902,7 +907,7 @@ reopen_tables: tl->lock_type= using_update_log ? TL_READ_NO_INSERT : TL_READ; tl->updating= 0; /* Update TABLE::lock_type accordingly. */ - if (!tl->placeholder() && !tl->schema_table && !using_lock_tables) + if (!tl->placeholder() && !using_lock_tables) tl->table->reginfo.lock_type= tl->lock_type; } } @@ -982,7 +987,7 @@ reopen_tables: tl->lock_type != TL_READ_NO_INSERT) { TABLE_LIST *duplicate; - if ((duplicate= unique_table(thd, tl, table_list))) + if ((duplicate= unique_table(thd, tl, table_list, 0))) { update_non_unique_table_error(table_list, "UPDATE", duplicate); DBUG_RETURN(TRUE); @@ -1087,7 +1092,7 @@ int multi_update::prepare(List<Item> ¬_used_values, } /* - We have to check values after setup_tables to get used_keys right in + We have to check values after setup_tables to get covering_keys right in reference tables */ @@ -1114,7 +1119,7 @@ int multi_update::prepare(List<Item> ¬_used_values, update.link_in_list((byte*) tl, (byte**) &tl->next_local); tl->shared= table_count++; table->no_keyread=1; - table->used_keys.clear_all(); + table->covering_keys.clear_all(); table->pos_in_table_list= tl; } } @@ -1198,7 +1203,7 @@ static bool safe_update_on_fly(THD *thd, JOIN_TAB *join_tab, TABLE_LIST *table_ref, TABLE_LIST *all_tables) { TABLE *table= join_tab->table; - if (unique_table(thd, table_ref, all_tables)) + if (unique_table(thd, table_ref, all_tables, 0)) return 0; switch (join_tab->type) { case JT_SYSTEM: @@ -1569,6 +1574,15 @@ int multi_update::do_updates(bool from_send_error) if (!can_compare_record || compare_record(table)) { + int error; + if ((error= cur_table->view_check_option(thd, ignore)) != + VIEW_CHECK_OK) + { + if (error == VIEW_CHECK_SKIP) + continue; + else if (error == VIEW_CHECK_ERROR) + goto err; + } if ((local_error=table->file->ha_update_row(table->record[1], table->record[0]))) { diff --git a/sql/sql_view.cc b/sql/sql_view.cc index eb3de565d9f..f84847f2f9c 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -224,6 +224,9 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, { LEX *lex= thd->lex; bool link_to_local; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + bool definer_check_is_needed= mode != VIEW_ALTER || lex->definer; +#endif /* first table in list is target VIEW name => cut off it */ TABLE_LIST *view= lex->unlink_first_table(&link_to_local); TABLE_LIST *tables= lex->query_tables; @@ -256,8 +259,9 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, /* DEFINER-clause is missing; we have to create default definer in persistent arena to be PS/SP friendly. + If this is an ALTER VIEW then the current user should be set as + the definer. */ - Query_arena original_arena; Query_arena *ps_arena = thd->activate_stmt_arena_if_needed(&original_arena); @@ -277,11 +281,11 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, - same as current user - current user has SUPER_ACL */ - if (strcmp(lex->definer->user.str, - thd->security_ctx->priv_user) != 0 || - my_strcasecmp(system_charset_info, - lex->definer->host.str, - thd->security_ctx->priv_host) != 0) + if (definer_check_is_needed && + (strcmp(lex->definer->user.str, thd->security_ctx->priv_user) != 0 || + my_strcasecmp(system_charset_info, + lex->definer->host.str, + thd->security_ctx->priv_host) != 0)) { if (!(thd->security_ctx->master_access & SUPER_ACL)) { @@ -675,7 +679,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view, char md5[MD5_BUFF_LENGTH]; bool can_be_merged; char dir_buff[FN_REFLEN], path_buff[FN_REFLEN]; - const uchar *endp; + const char *endp; LEX_STRING dir, file, path; DBUG_ENTER("mysql_register_view"); @@ -713,7 +717,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view, File_parser *parser; path.str= path_buff; - fn_format(path_buff, file.str, dir.str, 0, MY_UNPACK_FILENAME); + fn_format(path_buff, file.str, dir.str, "", MY_UNPACK_FILENAME); path.length= strlen(path_buff); if (!access(path.str, F_OK)) @@ -759,9 +763,9 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view, view->query.str= (char*)str.ptr(); view->query.length= str.length()-1; // we do not need last \0 view->source.str= thd->query + thd->lex->create_view_select_start; - endp= (uchar*) view->source.str; - endp= skip_rear_comments(endp, (uchar*) (thd->query + thd->query_length)); - view->source.length= endp - (uchar*) view->source.str; + endp= view->source.str; + endp= skip_rear_comments(endp, thd->query + thd->query_length); + view->source.length= endp - view->source.str; view->file_version= 1; view->calc_md5(md5); view->md5.str= md5; @@ -970,7 +974,7 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table, now Lex placed in statement memory */ table->view= lex= thd->lex= (LEX*) new(thd->mem_root) st_lex_local; - lex_start(thd, (uchar*)table->query.str, table->query.length); + lex_start(thd, table->query.str, table->query.length); view_select= &lex->select_lex; view_select->select_number= ++thd->select_number; { @@ -1272,6 +1276,7 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table, but it will not be included to SELECT_LEX tree, because it will not be executed */ + table->select_lex->order_list.push_back(&lex->select_lex.order_list); goto ok; } @@ -1301,8 +1306,6 @@ ok: (st_select_lex_node**)&old_lex->all_selects_list; ok2: - if (!old_lex->time_zone_tables_used && thd->lex->time_zone_tables_used) - old_lex->time_zone_tables_used= thd->lex->time_zone_tables_used; DBUG_ASSERT(lex == thd->lex); thd->lex= old_lex; // Needed for prepare_security result= !table->prelocking_placeholder && table->prepare_security(thd); diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 9fdb1874e1d..7e57c772ef3 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -47,11 +47,18 @@ const LEX_STRING null_lex_str={0,0}; #define yyoverflow(A,B,C,D,E,F) {ulong val= *(F); if (my_yyoverflow((B), (D), &val)) { yyerror((char*) (A)); return 2; } else { *(F)= (YYSIZE_T)val; }} -#define YYERROR_UNLESS(A) \ +#define MYSQL_YYABORT \ + do \ + { \ + LEX::cleanup_lex_after_parse_error(YYTHD);\ + YYABORT; \ + } while (0) + +#define MYSQL_YYABORT_UNLESS(A) \ if (!(A)) \ - { \ - yyerror(ER(ER_SYNTAX_ERROR)); \ - YYABORT; \ + { \ + my_parse_error(ER(ER_SYNTAX_ERROR));\ + MYSQL_YYABORT; \ } /* @@ -76,19 +83,6 @@ const LEX_STRING null_lex_str={0,0}; #define __attribute__(X) #endif -/* Helper for parsing "IS [NOT] truth_value" */ -inline Item *is_truth_value(THD *thd, Item *A, bool v1, bool v2) -{ - Item *v1_t= new (thd->mem_root) Item_int((char *) (v1 ? "TRUE" : "FALSE"), - v1, 1); - Item *v1_f= new (thd->mem_root) Item_int((char *) (v1 ? "FALSE" : "TRUE"), - !v1, 1); - Item *v2_t= new (thd->mem_root) Item_int((char *) (v2 ? "TRUE" : "FALSE"), - v2, 1); - Item *ifnull= new (thd->mem_root) Item_func_ifnull(A, v2_t); - - return new (thd->mem_root) Item_func_if(ifnull, v1_t, v1_f); -} #ifndef DBUG_OFF #define YYDEBUG 1 @@ -96,6 +90,66 @@ inline Item *is_truth_value(THD *thd, Item *A, bool v1, bool v2) #define YYDEBUG 0 #endif +/** + @brief Push an error message into MySQL error stack with line + and position information. + + This function provides semantic action implementers with a way + to push the famous "You have a syntax error near..." error + message into the error stack, which is normally produced only if + a parse error is discovered internally by the Bison generated + parser. +*/ + +void my_parse_error(const char *s) +{ + THD *thd= current_thd; + + char *yytext= (char*) thd->lex->tok_start; + /* Push an error into the error stack */ + my_printf_error(ER_PARSE_ERROR, ER(ER_PARSE_ERROR), MYF(0), s, + (yytext ? (char*) yytext : ""), + thd->lex->yylineno); +} + +/** + @brief Bison callback to report a syntax/OOM error + + This function is invoked by the bison-generated parser + when a syntax error, a parse error or an out-of-memory + condition occurs. This function is not invoked when the + parser is requested to abort by semantic action code + by means of YYABORT or YYACCEPT macros. This is why these + macros should not be used (use MYSQL_YYABORT/MYSQL_YYACCEPT + instead). + + The parser will abort immediately after invoking this callback. + + This function is not for use in semantic actions and is internal to + the parser, as it performs some pre-return cleanup. + In semantic actions, please use my_parse_error or my_error to + push an error into the error stack and MYSQL_YYABORT + to abort from the parser. +*/ + +void MYSQLerror(const char *s) +{ + THD *thd= current_thd; + + /* + Restore the original LEX if it was replaced when parsing + a stored procedure. We must ensure that a parsing error + does not leave any side effects in the THD. + */ + LEX::cleanup_lex_after_parse_error(thd); + + /* "parse error" changed into "syntax error" between bison 1.75 and 1.875 */ + if (strcmp(s,"parse error") == 0 || strcmp(s,"syntax error") == 0) + s= ER(ER_SYNTAX_ERROR); + my_parse_error(s); +} + + #ifndef DBUG_OFF void turn_parser_debug_on() { @@ -310,6 +364,81 @@ void case_stmt_action_end_case(LEX *lex, bool simple) lex->sphead->do_cont_backpatch(); } +/** + Helper to resolve the SQL:2003 Syntax exception 1) in <in predicate>. + See SQL:2003, Part 2, section 8.4 <in predicate>, Note 184, page 383. + This function returns the proper item for the SQL expression + <code>left [NOT] IN ( expr )</code> + @param thd the current thread + @param left the in predicand + @param equal true for IN predicates, false for NOT IN predicates + @param expr first and only expression of the in value list + @return an expression representing the IN predicate. +*/ +Item* handle_sql2003_note184_exception(THD *thd, Item* left, bool equal, + Item *expr) +{ + /* + Relevant references for this issue: + - SQL:2003, Part 2, section 8.4 <in predicate>, page 383, + - SQL:2003, Part 2, section 7.2 <row value expression>, page 296, + - SQL:2003, Part 2, section 6.3 <value expression primary>, page 174, + - SQL:2003, Part 2, section 7.15 <subquery>, page 370, + - SQL:2003 Feature F561, "Full value expressions". + + The exception in SQL:2003 Note 184 means: + Item_singlerow_subselect, which corresponds to a <scalar subquery>, + should be re-interpreted as an Item_in_subselect, which corresponds + to a <table subquery> when used inside an <in predicate>. + + Our reading of Note 184 is reccursive, so that all: + - IN (( <subquery> )) + - IN ((( <subquery> ))) + - IN '('^N <subquery> ')'^N + - etc + should be interpreted as a <table subquery>, no matter how deep in the + expression the <subquery> is. + */ + + Item *result; + + DBUG_ENTER("handle_sql2003_note184_exception"); + + if (expr->type() == Item::SUBSELECT_ITEM) + { + Item_subselect *expr2 = (Item_subselect*) expr; + + if (expr2->substype() == Item_subselect::SINGLEROW_SUBS) + { + Item_singlerow_subselect *expr3 = (Item_singlerow_subselect*) expr2; + st_select_lex *subselect; + + /* + Implement the mandated change, by altering the semantic tree: + left IN Item_singlerow_subselect(subselect) + is modified to + left IN (subselect) + which is represented as + Item_in_subselect(left, subselect) + */ + subselect= expr3->invalidate_and_restore_select_lex(); + result= new (thd->mem_root) Item_in_subselect(left, subselect); + + if (! equal) + result = negate_expression(thd, result); + + DBUG_RETURN(result); + } + } + + if (equal) + result= new (thd->mem_root) Item_func_eq(left, expr); + else + result= new (thd->mem_root) Item_func_ne(left, expr); + + DBUG_RETURN(result); +} + %} %union { int num; @@ -352,6 +481,7 @@ void case_stmt_action_end_case(LEX *lex, bool simple) struct st_lex *lex; sp_head *sphead; struct p_elem_val *p_elem_value; + enum index_hint_type index_hint; } %{ @@ -359,6 +489,11 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %} %pure_parser /* We have threads */ +/* + Currently there is 287 shift/reduce conflict. We should not introduce + new conflicts any more. +*/ +%expect 286 /* Comments for TOKENS. @@ -556,7 +691,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token GLOBAL_SYM /* SQL-2003-R */ %token GRANT /* SQL-2003-R */ %token GRANTS -%token GROUP /* SQL-2003-R */ +%token GROUP_SYM /* SQL-2003-R */ %token GROUP_CONCAT_SYM %token GT_SYM /* OPERATOR */ %token HANDLER_SYM @@ -813,6 +948,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token SIGNED_SYM %token SIMPLE_SYM /* SQL-2003-N */ %token SLAVE +%token SLAVESIDE_DISABLE_SYM %token SMALLINT /* SQL-2003-R */ %token SNAPSHOT_SYM %token SOCKET_SYM @@ -1028,7 +1164,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); btree_or_rtree %type <string_list> - key_usage_list using_list + using_list %type <key_part> key_part @@ -1043,7 +1179,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %type <interval_time_st> interval_time_st -%type <db_type> storage_engines +%type <db_type> storage_engines known_storage_engines %type <row_type> row_types @@ -1067,6 +1203,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); old_or_new_charset_name_or_default collation_name collation_name_or_default + opt_load_data_charset %type <variable> internal_variable_name @@ -1098,7 +1235,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); opt_column_list grant_privileges grant_ident grant_list grant_option object_privilege object_privilege_list user_list rename_list clear_privileges flush_options flush_option - equal optional_braces opt_key_definition key_usage_list2 + equal optional_braces opt_mi_check_type opt_to mi_check_types normal_join db_to_db table_to_table_list table_to_table opt_table_list opt_as handler_rkey_function handler_read_or_scan @@ -1111,7 +1248,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); statement sp_suid sp_c_chistics sp_a_chistics sp_chistic sp_c_chistic xa load_data opt_field_or_var_spec fields_or_vars opt_load_data_set_spec - definer view_replace_or_algorithm view_replace view_algorithm_opt + definer view_replace_or_algorithm view_replace view_algorithm view_or_trigger_or_sp_or_event view_or_trigger_or_sp_or_event_tail view_suid view_tail view_list_opt view_list view_select @@ -1134,6 +1271,8 @@ END_OF_INPUT %type <spblock> sp_decls sp_decl %type <lex> sp_cursor_stmt %type <spname> sp_name +%type <index_hint> index_hint_type +%type <num> index_hint_clause %type <NONE> '-' '+' '*' '/' '%' '(' ')' @@ -1150,7 +1289,7 @@ query: (!(thd->lex->select_lex.options & OPTION_FOUND_COMMENT))) { my_message(ER_EMPTY_QUERY, ER(ER_EMPTY_QUERY), MYF(0)); - YYABORT; + MYSQL_YYABORT; } else { @@ -1226,8 +1365,8 @@ deallocate: LEX *lex= thd->lex; if (lex->stmt_prepare_mode) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } lex->sql_command= SQLCOM_DEALLOCATE_PREPARE; lex->prepared_stmt_name= $3; @@ -1246,8 +1385,8 @@ prepare: LEX *lex= thd->lex; if (lex->stmt_prepare_mode) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } lex->sql_command= SQLCOM_PREPARE; lex->prepared_stmt_name= $2; @@ -1276,8 +1415,8 @@ execute: LEX *lex= thd->lex; if (lex->stmt_prepare_mode) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } lex->sql_command= SQLCOM_EXECUTE; lex->prepared_stmt_name= $2; @@ -1301,7 +1440,7 @@ execute_var_ident: '@' ident_or_text LEX *lex=Lex; LEX_STRING *lexstr= (LEX_STRING*)sql_memdup(&$2, sizeof(LEX_STRING)); if (!lexstr || lex->prepared_stmt_params.push_back(lexstr)) - YYABORT; + MYSQL_YYABORT; } ; @@ -1313,7 +1452,7 @@ help: if (Lex->sphead) { my_error(ER_SP_BADSTATEMENT, MYF(0), "HELP"); - YYABORT; + MYSQL_YYABORT; } } ident_or_text @@ -1440,7 +1579,7 @@ create: (using_update_log ? TL_READ_NO_INSERT: TL_READ))) - YYABORT; + MYSQL_YYABORT; lex->create_list.empty(); lex->key_list.empty(); lex->col_list.empty(); @@ -1463,7 +1602,7 @@ create: if (!lex->current_select->add_table_to_list(lex->thd, $7, NULL, TL_OPTION_UPDATING)) - YYABORT; + MYSQL_YYABORT; lex->create_list.empty(); lex->key_list.empty(); lex->col_list.empty(); @@ -1474,8 +1613,8 @@ create: LEX *lex=Lex; if ($2 != Key::FULLTEXT && lex->key_create_info.parser_name.str) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } lex->key_list.push_back(new Key($2, $4.str, &lex->key_create_info, 0, lex->col_list)); @@ -1505,7 +1644,7 @@ create: { Lex->sql_command = SQLCOM_CREATE_USER; } - | CREATE LOGFILE_SYM GROUP logfile_group_info + | CREATE LOGFILE_SYM GROUP_SYM logfile_group_info { Lex->alter_tablespace_info->ts_cmd_type= CREATE_LOGFILE_GROUP; } @@ -1579,7 +1718,7 @@ event_tail: Lex->create_info.options= $2; if (!(Lex->event_parse_data= Event_parse_data::new_instance(YYTHD))) - YYABORT; + MYSQL_YYABORT; Lex->event_parse_data->identifier= $3; /* @@ -1632,12 +1771,17 @@ ev_schedule_time: EVERY_SYM expr interval opt_ev_status: /* empty */ { $$= 0; } | ENABLE_SYM { - Lex->event_parse_data->status= Event_parse_data::ENABLED; + Lex->event_parse_data->status= Event_basic::ENABLED; + $$= 1; + } + | DISABLE_SYM ON SLAVE + { + Lex->event_parse_data->status= Event_basic::SLAVESIDE_DISABLED; $$= 1; } | DISABLE_SYM { - Lex->event_parse_data->status= Event_parse_data::DISABLED; + Lex->event_parse_data->status= Event_basic::DISABLED; $$= 1; } ; @@ -1667,13 +1811,13 @@ ev_on_completion: ON COMPLETION_SYM PRESERVE_SYM { Lex->event_parse_data->on_completion= - Event_parse_data::ON_COMPLETION_PRESERVE; + Event_basic::ON_COMPLETION_PRESERVE; $$= 1; } | ON COMPLETION_SYM NOT_SYM PRESERVE_SYM { Lex->event_parse_data->on_completion= - Event_parse_data::ON_COMPLETION_DROP; + Event_basic::ON_COMPLETION_DROP; $$= 1; } ; @@ -1709,12 +1853,12 @@ ev_sql_stmt: */ if (lex->sphead) { - my_error(ER_EVENT_RECURSIVITY_FORBIDDEN, MYF(0)); - YYABORT; + my_error(ER_EVENT_RECURSION_FORBIDDEN, MYF(0)); + MYSQL_YYABORT; } if (!(lex->sphead= new sp_head())) - YYABORT; + MYSQL_YYABORT; lex->sphead->reset_thd_mem_root(YYTHD); lex->sphead->init(lex); @@ -1780,12 +1924,12 @@ sp_name: if (!$1.str || check_db_name(&$1)) { my_error(ER_WRONG_DB_NAME, MYF(0), $1.str); - YYABORT; + MYSQL_YYABORT; } if (check_routine_name($3)) { my_error(ER_SP_WRONG_NAME, MYF(0), $3.str); - YYABORT; + MYSQL_YYABORT; } $$= new sp_name($1, $3); $$->init_qname(YYTHD); @@ -1797,10 +1941,10 @@ sp_name: if (check_routine_name($1)) { my_error(ER_SP_WRONG_NAME, MYF(0), $1.str); - YYABORT; + MYSQL_YYABORT; } if (thd->copy_db_to(&db.str, &db.length)) - YYABORT; + MYSQL_YYABORT; $$= new sp_name(db, $1); if ($$) $$->init_qname(YYTHD); @@ -1821,13 +1965,13 @@ create_function_tail: and is considered a parsing error. */ my_error(ER_WRONG_USAGE, MYF(0), "SONAME", "DEFINER"); - YYABORT; + MYSQL_YYABORT; } if (is_native_function(thd, & lex->spname->m_name)) { my_error(ER_NATIVE_FCT_NAME_COLLISION, MYF(0), lex->spname->m_name.str); - YYABORT; + MYSQL_YYABORT; } lex->sql_command = SQLCOM_CREATE_FUNCTION; lex->udf.name = lex->spname->m_name; @@ -1846,13 +1990,13 @@ create_function_tail: if (lex->udf.type == UDFTYPE_AGGREGATE) { my_error(ER_SP_NO_AGGREGATE, MYF(0)); - YYABORT; + MYSQL_YYABORT; } if (lex->sphead) { my_error(ER_SP_NO_RECURSIVE_CREATE, MYF(0), "FUNCTION"); - YYABORT; + MYSQL_YYABORT; } /* Order is important here: new - reset - init */ sp= new sp_head(); @@ -1898,13 +2042,13 @@ create_function_tail: && (lex->type & BINCMP_FLAG)) { my_error(ER_NOT_SUPPORTED_YET, MYF(0), "return value collation"); - YYABORT; + MYSQL_YYABORT; } if (sp->fill_field_definition(YYTHD, lex, (enum enum_field_types) $8, &sp->m_return_field_def)) - YYABORT; + MYSQL_YYABORT; bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics)); } @@ -1922,14 +2066,14 @@ create_function_tail: sp_head *sp= lex->sphead; if (sp->is_not_allowed_in_function("function")) - YYABORT; + MYSQL_YYABORT; lex->sql_command= SQLCOM_CREATE_SPFUNCTION; sp->init_strings(thd, lex); if (!(sp->m_flags & sp_head::HAS_RETURN)) { my_error(ER_SP_NORETURN, MYF(0), sp->m_qname.str); - YYABORT; + MYSQL_YYABORT; } if (is_native_function(thd, & sp->m_name)) { @@ -2093,7 +2237,7 @@ sp_fdparam: if (spc->find_variable(&$1, TRUE)) { my_error(ER_SP_DUP_PARAM, MYF(0), $1.str); - YYABORT; + MYSQL_YYABORT; } sp_variable_t *spvar= spc->push_variable(&$1, (enum enum_field_types)$3, @@ -2103,7 +2247,7 @@ sp_fdparam: (enum enum_field_types) $3, &spvar->field_def)) { - YYABORT; + MYSQL_YYABORT; } spvar->field_def.field_name= spvar->name.str; spvar->field_def.pack_flag |= FIELDFLAG_MAYBE_NULL; @@ -2130,7 +2274,7 @@ sp_pdparam: if (spc->find_variable(&$3, TRUE)) { my_error(ER_SP_DUP_PARAM, MYF(0), $3.str); - YYABORT; + MYSQL_YYABORT; } sp_variable_t *spvar= spc->push_variable(&$3, (enum enum_field_types)$4, @@ -2140,7 +2284,7 @@ sp_pdparam: (enum enum_field_types) $4, &spvar->field_def)) { - YYABORT; + MYSQL_YYABORT; } spvar->field_def.field_name= spvar->name.str; spvar->field_def.pack_flag |= FIELDFLAG_MAYBE_NULL; @@ -2179,13 +2323,13 @@ sp_decls: { /* Variable or condition following cursor or handler */ my_message(ER_SP_VARCOND_AFTER_CURSHNDLR, ER(ER_SP_VARCOND_AFTER_CURSHNDLR), MYF(0)); - YYABORT; + MYSQL_YYABORT; } if ($2.curs && $1.hndlrs) { /* Cursor following handler */ my_message(ER_SP_CURSOR_AFTER_HANDLER, ER(ER_SP_CURSOR_AFTER_HANDLER), MYF(0)); - YYABORT; + MYSQL_YYABORT; } $$.vars= $1.vars + $2.vars; $$.conds= $1.conds + $2.conds; @@ -2223,7 +2367,7 @@ sp_decl: sp_variable_t *spvar= pctx->find_variable(var_idx); if (!spvar) - YYABORT; + MYSQL_YYABORT; spvar->type= var_type; spvar->dflt= dflt_value_item; @@ -2231,7 +2375,7 @@ sp_decl: if (lex->sphead->fill_field_definition(YYTHD, lex, var_type, &spvar->field_def)) { - YYABORT; + MYSQL_YYABORT; } spvar->field_def.field_name= spvar->name.str; @@ -2259,7 +2403,7 @@ sp_decl: if (spc->find_cond(&$2, TRUE)) { my_error(ER_SP_DUP_COND, MYF(0), $2.str); - YYABORT; + MYSQL_YYABORT; } YYTHD->lex->spcont->push_cond(&$2, $5); $$.vars= $$.hndlrs= $$.curs= 0; @@ -2269,6 +2413,9 @@ sp_decl: { LEX *lex= Lex; sp_head *sp= lex->sphead; + + lex->spcont= lex->spcont->push_context(LABEL_HANDLER_SCOPE); + sp_pcontext *ctx= lex->spcont; sp_instr_hpush_jump *i= new sp_instr_hpush_jump(sp->instructions(), ctx, $2, @@ -2276,7 +2423,6 @@ sp_decl: sp->add_instr(i); sp->push_backpatch(i, ctx->push_label((char *)"", 0)); - sp->m_flags|= sp_head::IN_HANDLER; } sp_hcond_list sp_proc_stmt { @@ -2300,10 +2446,12 @@ sp_decl: sp->push_backpatch(i, lex->spcont->last_label()); /* Block end */ } lex->sphead->backpatch(hlab); - sp->m_flags&= ~sp_head::IN_HANDLER; + + lex->spcont= ctx->pop_context(); + $$.vars= $$.conds= $$.curs= 0; $$.hndlrs= $6; - ctx->add_handlers($6); + lex->spcont->add_handlers($6); } | DECLARE_SYM ident CURSOR_SYM FOR_SYM sp_cursor_stmt { @@ -2317,7 +2465,7 @@ sp_decl: { my_error(ER_SP_DUP_CURS, MYF(0), $2.str); delete $5; - YYABORT; + MYSQL_YYABORT; } i= new sp_instr_cpush(sp->instructions(), ctx, $5, ctx->current_cursor_count()); @@ -2348,13 +2496,13 @@ sp_cursor_stmt: { my_message(ER_SP_BAD_CURSOR_QUERY, ER(ER_SP_BAD_CURSOR_QUERY), MYF(0)); - YYABORT; + MYSQL_YYABORT; } if (lex->result) { my_message(ER_SP_BAD_CURSOR_SELECT, ER(ER_SP_BAD_CURSOR_SELECT), MYF(0)); - YYABORT; + MYSQL_YYABORT; } lex->sp_lex_in_use= TRUE; $$= lex; @@ -2369,16 +2517,23 @@ sp_handler_type: ; sp_hcond_list: + sp_hcond_element + { $$= 1; } + | sp_hcond_list ',' sp_hcond_element + { $$+= 1; } + ; + +sp_hcond_element: sp_hcond { LEX *lex= Lex; sp_head *sp= lex->sphead; - sp_pcontext *ctx= lex->spcont; + sp_pcontext *ctx= lex->spcont->parent_context(); if (ctx->find_handler($1)) { my_message(ER_SP_DUP_HANDLER, ER(ER_SP_DUP_HANDLER), MYF(0)); - YYABORT; + MYSQL_YYABORT; } else { @@ -2387,28 +2542,6 @@ sp_hcond_list: i->add_condition($1); ctx->push_handler($1); - $$= 1; - } - } - | sp_hcond_list ',' sp_hcond - { - LEX *lex= Lex; - sp_head *sp= lex->sphead; - sp_pcontext *ctx= lex->spcont; - - if (ctx->find_handler($3)) - { - my_message(ER_SP_DUP_HANDLER, ER(ER_SP_DUP_HANDLER), MYF(0)); - YYABORT; - } - else - { - sp_instr_hpush_jump *i= - (sp_instr_hpush_jump *)sp->last_instruction(); - - i->add_condition($3); - ctx->push_handler($3); - $$= $1 + 1; } } ; @@ -2425,7 +2558,7 @@ sp_cond: if (!sp_cond_check(&$3)) { my_error(ER_SP_BAD_SQLSTATE, MYF(0), $3.str); - YYABORT; + MYSQL_YYABORT; } $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t)); $$->type= sp_cond_type_t::state; @@ -2450,7 +2583,7 @@ sp_hcond: if ($$ == NULL) { my_error(ER_SP_COND_MISMATCH, MYF(0), $1.str); - YYABORT; + MYSQL_YYABORT; } } | SQLWARNING_SYM /* SQLSTATEs 01??? */ @@ -2481,7 +2614,7 @@ sp_decl_idents: if (spc->find_variable(&$1, TRUE)) { my_error(ER_SP_DUP_VAR, MYF(0), $1.str); - YYABORT; + MYSQL_YYABORT; } spc->push_variable(&$1, (enum_field_types)0, sp_param_in); $$= 1; @@ -2496,7 +2629,7 @@ sp_decl_idents: if (spc->find_variable(&$3, TRUE)) { my_error(ER_SP_DUP_VAR, MYF(0), $3.str); - YYABORT; + MYSQL_YYABORT; } spc->push_variable(&$3, (enum_field_types)0, sp_param_in); $$= $1 + 1; @@ -2544,7 +2677,7 @@ sp_proc_stmt_statement: if (lex->sql_command == SQLCOM_CHANGE_DB) { /* "USE db" doesn't work in a procedure */ my_error(ER_SP_BADSTATEMENT, MYF(0), "USE"); - YYABORT; + MYSQL_YYABORT; } /* Don't add an instruction for SET statements, since all @@ -2568,7 +2701,7 @@ sp_proc_stmt_statement: else i->m_query.length= lex->tok_end - sp->m_tmp_query; i->m_query.str= strmake_root(YYTHD->mem_root, - (char *)sp->m_tmp_query, + sp->m_tmp_query, i->m_query.length); sp->add_instr(i); } @@ -2587,7 +2720,7 @@ sp_proc_stmt_return: if (sp->m_type != TYPE_ENUM_FUNCTION) { my_message(ER_SP_BADRETURN, ER(ER_SP_BADRETURN), MYF(0)); - YYABORT; + MYSQL_YYABORT; } else { @@ -2627,7 +2760,7 @@ sp_proc_stmt_leave: if (! lab) { my_error(ER_SP_LILABEL_MISMATCH, MYF(0), "LEAVE", $2.str); - YYABORT; + MYSQL_YYABORT; } else { @@ -2659,7 +2792,7 @@ sp_proc_stmt_iterate: if (! lab || lab->type != SP_LAB_ITER) { my_error(ER_SP_LILABEL_MISMATCH, MYF(0), "ITERATE", $2.str); - YYABORT; + MYSQL_YYABORT; } else { @@ -2690,7 +2823,7 @@ sp_proc_stmt_open: if (! lex->spcont->find_cursor(&$2, &offset)) { my_error(ER_SP_CURSOR_MISMATCH, MYF(0), $2.str); - YYABORT; + MYSQL_YYABORT; } i= new sp_instr_copen(sp->instructions(), lex->spcont, offset); sp->add_instr(i); @@ -2708,7 +2841,7 @@ sp_proc_stmt_fetch: if (! lex->spcont->find_cursor(&$3, &offset)) { my_error(ER_SP_CURSOR_MISMATCH, MYF(0), $3.str); - YYABORT; + MYSQL_YYABORT; } i= new sp_instr_cfetch(sp->instructions(), lex->spcont, offset); sp->add_instr(i); @@ -2728,7 +2861,7 @@ sp_proc_stmt_close: if (! lex->spcont->find_cursor(&$2, &offset)) { my_error(ER_SP_CURSOR_MISMATCH, MYF(0), $2.str); - YYABORT; + MYSQL_YYABORT; } i= new sp_instr_cclose(sp->instructions(), lex->spcont, offset); sp->add_instr(i); @@ -2752,7 +2885,7 @@ sp_fetch_list: if (!spc || !(spv = spc->find_variable(&$1))) { my_error(ER_SP_UNDECLARED_VAR, MYF(0), $1.str); - YYABORT; + MYSQL_YYABORT; } else { @@ -2773,7 +2906,7 @@ sp_fetch_list: if (!spc || !(spv = spc->find_variable(&$3))) { my_error(ER_SP_UNDECLARED_VAR, MYF(0), $3.str); - YYABORT; + MYSQL_YYABORT; } else { @@ -2842,7 +2975,7 @@ simple_case_stmt: { LEX *lex= Lex; if (case_stmt_action_expr(lex, $3)) - YYABORT; + MYSQL_YYABORT; lex->sphead->restore_lex(YYTHD); /* For expr $3 */ } @@ -2945,7 +3078,7 @@ sp_labeled_control: if (lab) { my_error(ER_SP_LABEL_REDEFINE, MYF(0), $1.str); - YYABORT; + MYSQL_YYABORT; } else { @@ -2966,7 +3099,7 @@ sp_labeled_control: my_strcasecmp(system_charset_info, $5.str, lab->name) != 0) { my_error(ER_SP_LABEL_MISMATCH, MYF(0), $5.str); - YYABORT; + MYSQL_YYABORT; } } lex->sphead->backpatch(lex->spcont->pop_label()); @@ -2987,7 +3120,7 @@ sp_unlabeled_control: sp_label_t *lab= lex->spcont->last_label(); lab->type= SP_LAB_BEGIN; - lex->spcont= lex->spcont->push_context(); + lex->spcont= lex->spcont->push_context(LABEL_DEFAULT_SCOPE); } sp_decls sp_proc_stmts @@ -3081,11 +3214,11 @@ trg_event: ALTER TABLESPACE name CHANGE DATAFILE ... ALTER TABLESPACE name ADD DATAFILE ... ALTER TABLESPACE name access_mode - CREATE LOGFILE GROUP name ... - ALTER LOGFILE GROUP name ADD UNDOFILE .. - ALTER LOGFILE GROUP name ADD REDOFILE .. + CREATE LOGFILE GROUP_SYM name ... + ALTER LOGFILE GROUP_SYM name ADD UNDOFILE .. + ALTER LOGFILE GROUP_SYM name ADD REDOFILE .. DROP TABLESPACE name - DROP LOGFILE GROUP name + DROP LOGFILE GROUP_SYM name */ change_tablespace_access: tablespace_name @@ -3107,7 +3240,7 @@ tablespace_info: opt_logfile_group_name: /* empty */ {} - | USE_SYM LOGFILE_SYM GROUP ident + | USE_SYM LOGFILE_SYM GROUP_SYM ident { LEX *lex= Lex; lex->alter_tablespace_info->logfile_group_name= $4.str; @@ -3347,7 +3480,7 @@ opt_ts_nodegroup: if (lex->alter_tablespace_info->nodegroup_id != UNDEF_NODEGROUP) { my_error(ER_FILEGROUP_OPTION_ONLY_ONCE,MYF(0),"NODEGROUP"); - YYABORT; + MYSQL_YYABORT; } lex->alter_tablespace_info->nodegroup_id= $3; }; @@ -3359,7 +3492,7 @@ opt_ts_comment: if (lex->alter_tablespace_info->ts_comment != NULL) { my_error(ER_FILEGROUP_OPTION_ONLY_ONCE,MYF(0),"COMMENT"); - YYABORT; + MYSQL_YYABORT; } lex->alter_tablespace_info->ts_comment= $3.str; }; @@ -3372,7 +3505,7 @@ opt_ts_engine: { my_error(ER_FILEGROUP_OPTION_ONLY_ONCE,MYF(0), "STORAGE ENGINE"); - YYABORT; + MYSQL_YYABORT; } lex->alter_tablespace_info->storage_engine= $4; }; @@ -3394,7 +3527,7 @@ ts_wait: if (!(lex->alter_tablespace_info->wait_until_completed)) { my_error(ER_FILEGROUP_OPTION_ONLY_ONCE,MYF(0),"NO_WAIT"); - YYABORT; + MYSQL_YYABORT; } lex->alter_tablespace_info->wait_until_completed= FALSE; }; @@ -3428,20 +3561,20 @@ size_number: default: { my_error(ER_WRONG_SIZE_NUMBER, MYF(0)); - YYABORT; + MYSQL_YYABORT; } } if (prefix_number >> 31) { my_error(ER_SIZE_OVERFLOW_ERROR, MYF(0)); - YYABORT; + MYSQL_YYABORT; } number= prefix_number << text_shift_number; } else { my_error(ER_WRONG_SIZE_NUMBER, MYF(0)); - YYABORT; + MYSQL_YYABORT; } $$= number; } @@ -3461,11 +3594,11 @@ create2: THD *thd= YYTHD; LEX *lex= thd->lex; if (!(lex->like_name= $2)) - YYABORT; + MYSQL_YYABORT; if ($2->db.str == NULL && thd->copy_db_to(&($2->db.str), &($2->db.length))) { - YYABORT; + MYSQL_YYABORT; } } | '(' LIKE table_ident ')' @@ -3473,11 +3606,11 @@ create2: THD *thd= YYTHD; LEX *lex= thd->lex; if (!(lex->like_name= $3)) - YYABORT; + MYSQL_YYABORT; if ($3->db.str == NULL && thd->copy_db_to(&($3->db.str), &($3->db.length))) { - YYABORT; + MYSQL_YYABORT; } } ; @@ -3537,7 +3670,7 @@ partitioning: if (!lex->part_info) { mem_alloc_error(sizeof(partition_info)); - YYABORT; + MYSQL_YYABORT; } if (lex->sql_command == SQLCOM_ALTER_TABLE) { @@ -3546,7 +3679,7 @@ partitioning: #else my_error(ER_FEATURE_DISABLED, MYF(0), "partitioning", "--with-partition"); - YYABORT; + MYSQL_YYABORT; #endif } @@ -3559,8 +3692,8 @@ partition_entry: LEX *lex= Lex; if (!lex->part_info) { - yyerror(ER(ER_PARTITION_ENTRY_ERROR)); - YYABORT; + my_parse_error(ER(ER_PARTITION_ENTRY_ERROR)); + MYSQL_YYABORT; } /* We enter here when opening the frm file to translate @@ -3614,7 +3747,7 @@ part_field_item: if (Lex->part_info->part_field_list.push_back($1.str)) { mem_alloc_error(1); - YYABORT; + MYSQL_YYABORT; } } ; @@ -3653,7 +3786,7 @@ opt_no_parts: if (no_parts == 0) { my_error(ER_NO_PARTS_ERROR, MYF(0), "partitions"); - YYABORT; + MYSQL_YYABORT; } lex->part_info->no_parts= no_parts; @@ -3687,7 +3820,7 @@ sub_part_field_item: if (Lex->part_info->subpart_field_list.push_back($1.str)) { mem_alloc_error(1); - YYABORT; + MYSQL_YYABORT; } } ; @@ -3701,8 +3834,8 @@ part_func_expr: lex->safe_to_cache_query= 1; if (not_corr_func) { - yyerror(ER(ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR)); - YYABORT; + my_parse_error(ER(ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR)); + MYSQL_YYABORT; } $$=$1; } @@ -3717,7 +3850,7 @@ opt_no_subparts: if (no_parts == 0) { my_error(ER_NO_PARTS_ERROR, MYF(0), "subpartitions"); - YYABORT; + MYSQL_YYABORT; } lex->part_info->no_subparts= no_parts; lex->part_info->use_default_no_subpartitions= FALSE; @@ -3737,8 +3870,8 @@ part_defs: if (part_info->no_parts != count_curr_parts) { - yyerror(ER(ER_PARTITION_WRONG_NO_PART_ERROR)); - YYABORT; + my_parse_error(ER(ER_PARTITION_WRONG_NO_PART_ERROR)); + MYSQL_YYABORT; } } else if (count_curr_parts > 0) @@ -3764,7 +3897,7 @@ part_definition: if (!p_elem || part_info->partitions.push_back(p_elem)) { mem_alloc_error(sizeof(partition_element)); - YYABORT; + MYSQL_YYABORT; } p_elem->part_state= PART_NORMAL; part_info->curr_part_elem= p_elem; @@ -3798,13 +3931,13 @@ opt_part_values: { my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0), "RANGE", "LESS THAN"); - YYABORT; + MYSQL_YYABORT; } if (lex->part_info->part_type == LIST_PARTITION) { my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0), "LIST", "IN"); - YYABORT; + MYSQL_YYABORT; } } else @@ -3819,7 +3952,7 @@ opt_part_values: { my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0), "RANGE", "LESS THAN"); - YYABORT; + MYSQL_YYABORT; } } else @@ -3834,7 +3967,7 @@ opt_part_values: { my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0), "LIST", "IN"); - YYABORT; + MYSQL_YYABORT; } } else @@ -3848,8 +3981,8 @@ part_func_max: LEX *lex= Lex; if (lex->part_info->defined_max_value) { - yyerror(ER(ER_PARTITION_MAXVALUE_ERROR)); - YYABORT; + my_parse_error(ER(ER_PARTITION_MAXVALUE_ERROR)); + MYSQL_YYABORT; } lex->part_info->defined_max_value= TRUE; lex->part_info->curr_part_elem->max_value= TRUE; @@ -3859,13 +3992,13 @@ part_func_max: { if (Lex->part_info->defined_max_value) { - yyerror(ER(ER_PARTITION_MAXVALUE_ERROR)); - YYABORT; + my_parse_error(ER(ER_PARTITION_MAXVALUE_ERROR)); + MYSQL_YYABORT; } if (Lex->part_info->curr_part_elem->has_null_value) { - yyerror(ER(ER_NULL_IN_VALUES_LESS_THAN)); - YYABORT; + my_parse_error(ER(ER_NULL_IN_VALUES_LESS_THAN)); + MYSQL_YYABORT; } } ; @@ -3902,7 +4035,7 @@ part_list_item: list_val_list.push_back(value_ptr)) { mem_alloc_error(sizeof(part_elem_value)); - YYABORT; + MYSQL_YYABORT; } } ; @@ -3925,13 +4058,13 @@ part_bit_expr: if (!value_ptr) { mem_alloc_error(sizeof(part_elem_value)); - YYABORT; + MYSQL_YYABORT; } if (part_expr->walk(&Item::check_partition_func_processor, 0, NULL)) { my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0)); - YYABORT; + MYSQL_YYABORT; } if (part_expr->fix_fields(YYTHD, (Item**)0) || ((context->table_list= save_list), FALSE) || @@ -3939,7 +4072,7 @@ part_bit_expr: (!lex->safe_to_cache_query)) { my_error(ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR, MYF(0)); - YYABORT; + MYSQL_YYABORT; } thd->where= save_where; value_ptr->value= part_expr->val_int(); @@ -3952,14 +4085,14 @@ part_bit_expr: if (Lex->part_info->curr_part_elem->has_null_value) { my_error(ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR, MYF(0)); - YYABORT; + MYSQL_YYABORT; } Lex->part_info->curr_part_elem->has_null_value= TRUE; } else if (part_expr->result_type() != INT_RESULT) { - yyerror(ER(ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR)); - YYABORT; + my_parse_error(ER(ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR)); + MYSQL_YYABORT; } $$= value_ptr; } @@ -3971,8 +4104,8 @@ opt_sub_partition: if (Lex->part_info->no_subparts != 0 && !Lex->part_info->use_default_subpartitions) { - yyerror(ER(ER_PARTITION_WRONG_NO_SUBPART_ERROR)); - YYABORT; + my_parse_error(ER(ER_PARTITION_WRONG_NO_SUBPART_ERROR)); + MYSQL_YYABORT; } } | '(' sub_part_list ')' @@ -3984,16 +4117,16 @@ opt_sub_partition: if (part_info->no_subparts != part_info->count_curr_subparts) { - yyerror(ER(ER_PARTITION_WRONG_NO_SUBPART_ERROR)); - YYABORT; + my_parse_error(ER(ER_PARTITION_WRONG_NO_SUBPART_ERROR)); + MYSQL_YYABORT; } } else if (part_info->count_curr_subparts > 0) { if (part_info->partitions.elements > 1) { - yyerror(ER(ER_PARTITION_WRONG_NO_SUBPART_ERROR)); - YYABORT; + my_parse_error(ER(ER_PARTITION_WRONG_NO_SUBPART_ERROR)); + MYSQL_YYABORT; } part_info->no_subparts= part_info->count_curr_subparts; } @@ -4017,7 +4150,7 @@ sub_part_definition: curr_part->subpartitions.push_back(sub_p_elem)) { mem_alloc_error(sizeof(partition_element)); - YYABORT; + MYSQL_YYABORT; } part_info->curr_part_elem= sub_p_elem; part_info->use_default_subpartitions= FALSE; @@ -4169,8 +4302,8 @@ create_table_option: Lex->create_info.table_options|= HA_OPTION_PACK_KEYS; break; default: - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } Lex->create_info.used_fields|= HA_CREATE_USED_PACK_KEYS; } @@ -4225,7 +4358,7 @@ default_charset: my_error(ER_CONFLICTING_DECLARATIONS, MYF(0), "CHARACTER SET ", cinfo->default_table_charset->csname, "CHARACTER SET ", $4->csname); - YYABORT; + MYSQL_YYABORT; } Lex->create_info.default_table_charset= $4; Lex->create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET; @@ -4241,25 +4374,37 @@ default_collation: { my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), $4->name, cinfo->default_table_charset->csname); - YYABORT; + MYSQL_YYABORT; } Lex->create_info.default_table_charset= $4; Lex->create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET; }; +known_storage_engines: + ident_or_text + { + $$ = ha_resolve_by_name(YYTHD, &$1); + if ($$ == NULL) + { + my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), $1.str); + MYSQL_YYABORT; + } + } + ; + storage_engines: ident_or_text { $$ = ha_resolve_by_name(YYTHD, &$1); if ($$ == NULL) - if (YYTHD->variables.sql_mode & MODE_NO_ENGINE_SUBSTITUTION) - { - my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), $1.str); - YYABORT; - } - else { - push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_ERROR, + if (YYTHD->variables.sql_mode & MODE_NO_ENGINE_SUBSTITUTION) + { + my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), $1.str); + MYSQL_YYABORT; + } + + push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_STORAGE_ENGINE, ER(ER_UNKNOWN_STORAGE_ENGINE), $1.str); } @@ -4316,8 +4461,8 @@ key_def: LEX *lex=Lex; if ($1 != Key::FULLTEXT && lex->key_create_info.parser_name.str) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } lex->key_list.push_back(new Key($1,$2, &lex->key_create_info, 0, lex->col_list)); @@ -4396,7 +4541,7 @@ field_spec: &lex->comment, lex->change,&lex->interval_list,lex->charset, lex->uint_geom_type)) - YYABORT; + MYSQL_YYABORT; }; type: @@ -4465,7 +4610,7 @@ type: #else my_error(ER_FEATURE_DISABLED, MYF(0), sym_group_geom.name, sym_group_geom.needed_define); - YYABORT; + MYSQL_YYABORT; #endif } | MEDIUMBLOB { Lex->charset=&my_charset_bin; @@ -4626,7 +4771,7 @@ attribute: { my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), $2->name,Lex->charset->csname); - YYABORT; + MYSQL_YYABORT; } else { @@ -4651,7 +4796,7 @@ charset_name: if (!($$=get_charset_by_csname($1.str,MY_CS_PRIMARY,MYF(0)))) { my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), $1.str); - YYABORT; + MYSQL_YYABORT; } } | BINARY { $$= &my_charset_bin; } @@ -4661,6 +4806,10 @@ charset_name_or_default: charset_name { $$=$1; } | DEFAULT { $$=NULL; } ; +opt_load_data_charset: + /* Empty */ { $$= NULL; } + | charset charset_name_or_default { $$= $2; } + ; old_or_new_charset_name: ident_or_text @@ -4669,7 +4818,7 @@ old_or_new_charset_name: !($$=get_old_charset_by_name($1.str))) { my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), $1.str); - YYABORT; + MYSQL_YYABORT; } } | BINARY { $$= &my_charset_bin; } @@ -4685,7 +4834,7 @@ collation_name: if (!($$=get_charset_by_name($1.str,MYF(0)))) { my_error(ER_UNKNOWN_COLLATION, MYF(0), $1.str); - YYABORT; + MYSQL_YYABORT; } }; @@ -4712,7 +4861,7 @@ opt_binary: MY_CS_PRIMARY,MYF(0)))) { my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), "ucs2"); - YYABORT; + MYSQL_YYABORT; } } | charset charset_name opt_bin_mod { Lex->charset=$2; } @@ -4731,7 +4880,7 @@ opt_bin_charset: MY_CS_PRIMARY,MYF(0)))) { my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), "ucs2"); - YYABORT; + MYSQL_YYABORT; } } | charset charset_name { Lex->charset=$2; } ; @@ -4794,7 +4943,7 @@ key_type: #else my_error(ER_FEATURE_DISABLED, MYF(0), sym_group_geom.name, sym_group_geom.needed_define); - YYABORT; + MYSQL_YYABORT; #endif }; @@ -4827,7 +4976,7 @@ opt_unique_or_fulltext: #else my_error(ER_FEATURE_DISABLED, MYF(0), sym_group_geom.name, sym_group_geom.needed_define); - YYABORT; + MYSQL_YYABORT; #endif } ; @@ -4875,7 +5024,7 @@ key_opt: else { my_error(ER_FUNCTION_NOT_DEFINED, MYF(0), $3.str); - YYABORT; + MYSQL_YYABORT; } } ; @@ -4932,7 +5081,7 @@ alter: lex->duplicates= DUP_ERROR; if (!lex->select_lex.add_table_to_list(thd, $4, NULL, TL_OPTION_UPDATING)) - YYABORT; + MYSQL_YYABORT; lex->create_list.empty(); lex->key_list.empty(); lex->col_list.empty(); @@ -4964,7 +5113,7 @@ alter: lex->name= $3; if (lex->name.str == NULL && thd->copy_db_to(&lex->name.str, &lex->name.length)) - YYABORT; + MYSQL_YYABORT; } | ALTER PROCEDURE sp_name { @@ -4973,7 +5122,7 @@ alter: if (lex->sphead) { my_error(ER_SP_NO_DROP_SP, MYF(0), "PROCEDURE"); - YYABORT; + MYSQL_YYABORT; } bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics)); } @@ -4991,7 +5140,7 @@ alter: if (lex->sphead) { my_error(ER_SP_NO_DROP_SP, MYF(0), "FUNCTION"); - YYABORT; + MYSQL_YYABORT; } bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics)); } @@ -5002,19 +5151,26 @@ alter: lex->sql_command= SQLCOM_ALTER_FUNCTION; lex->spname= $3; } - | ALTER view_algorithm_opt definer view_suid - VIEW_SYM table_ident - { - THD *thd= YYTHD; - LEX *lex= thd->lex; - lex->sql_command= SQLCOM_CREATE_VIEW; - lex->create_view_mode= VIEW_ALTER; - /* first table in list is target VIEW name */ - lex->select_lex.add_table_to_list(thd, $6, NULL, TL_OPTION_UPDATING); - } - view_list_opt AS view_select view_check_option - {} - | ALTER EVENT_SYM sp_name + | ALTER view_algorithm definer + { + Lex->create_view_mode= VIEW_ALTER; + } + view_tail + {} + | ALTER definer + /* + We have two separate rules for ALTER VIEW rather that + optional view_algorithm above, to resolve the ambiguity + with the ALTER EVENT below. + */ + { + LEX *lex= Lex; + lex->create_view_algorithm= VIEW_ALGORITHM_UNDEFINED; + lex->create_view_mode= VIEW_ALTER; + } + view_tail + {} + | ALTER definer EVENT_SYM sp_name /* BE CAREFUL when you add a new rule to update the block where YYTHD->client_capabilities is set back to original value @@ -5029,8 +5185,8 @@ alter: */ if (!(Lex->event_parse_data= Event_parse_data::new_instance(YYTHD))) - YYABORT; - Lex->event_parse_data->identifier= $3; + MYSQL_YYABORT; + Lex->event_parse_data->identifier= $4; /* We have to turn off CLIENT_MULTI_QUERIES while parsing a @@ -5050,16 +5206,17 @@ alter: { /* $1 - ALTER - $2 - EVENT_SYM - $3 - sp_name - $4 - the block above + $2 - definer + $3 - EVENT_SYM + $4 - sp_name + $5 - the block above */ - YYTHD->client_capabilities |= $<ulong_num>4; + YYTHD->client_capabilities |= $<ulong_num>5; - if (!($5 || $6 || $7 || $8 || $9)) + if (!($6 || $7 || $8 || $9 || $10)) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } /* sql_command is set here because some rules in ev_sql_stmt @@ -5072,7 +5229,7 @@ alter: LEX *lex= Lex; lex->alter_tablespace_info->ts_cmd_type= ALTER_TABLESPACE; } - | ALTER LOGFILE_SYM GROUP alter_logfile_group_info + | ALTER LOGFILE_SYM GROUP_SYM alter_logfile_group_info { LEX *lex= Lex; lex->alter_tablespace_info->ts_cmd_type= ALTER_LOGFILE_GROUP; @@ -5218,7 +5375,7 @@ add_partition_rule: if (!lex->part_info) { mem_alloc_error(sizeof(partition_info)); - YYABORT; + MYSQL_YYABORT; } lex->alter_info.flags|= ALTER_ADD_PARTITION; lex->no_write_to_binlog= $3; @@ -5248,7 +5405,7 @@ reorg_partition_rule: if (!lex->part_info) { mem_alloc_error(sizeof(partition_info)); - YYABORT; + MYSQL_YYABORT; } lex->no_write_to_binlog= $3; } @@ -5283,7 +5440,7 @@ alt_part_name_item: if (Lex->alter_info.partition_names.push_back($1.str)) { mem_alloc_error(1); - YYABORT; + MYSQL_YYABORT; } } ; @@ -5341,7 +5498,7 @@ alter_list_item: &lex->comment, $3.str, &lex->interval_list, lex->charset, lex->uint_geom_type)) - YYABORT; + MYSQL_YYABORT; } opt_place | DROP opt_column field_ident opt_restrict @@ -5403,13 +5560,13 @@ alter_list_item: if (lex->select_lex.db == NULL && thd->copy_db_to(&lex->select_lex.db, &dummy)) { - YYABORT; + MYSQL_YYABORT; } if (check_table_name($3->table.str,$3->table.length) || $3->db.str && check_db_name(&$3->db)) { my_error(ER_WRONG_TABLE_NAME, MYF(0), $3->table.str); - YYABORT; + MYSQL_YYABORT; } lex->name= $3->table; lex->alter_info.flags|= ALTER_RENAME; @@ -5426,7 +5583,7 @@ alter_list_item: { my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), $5->name, $4->csname); - YYABORT; + MYSQL_YYABORT; } LEX *lex= Lex; lex->create_info.table_charset= @@ -5564,7 +5721,7 @@ slave_until: { my_message(ER_BAD_SLAVE_UNTIL_COND, ER(ER_BAD_SLAVE_UNTIL_COND), MYF(0)); - YYABORT; + MYSQL_YYABORT; } } @@ -5668,7 +5825,7 @@ check: if (lex->sphead) { my_error(ER_SP_BADSTATEMENT, MYF(0), "CHECK"); - YYABORT; + MYSQL_YYABORT; } lex->sql_command = SQLCOM_CHECK; lex->check_opt.init(); @@ -5735,12 +5892,12 @@ rename_list: user TO_SYM user { if (Lex->users_list.push_back($1) || Lex->users_list.push_back($3)) - YYABORT; + MYSQL_YYABORT; } | rename_list ',' user TO_SYM user { if (Lex->users_list.push_back($3) || Lex->users_list.push_back($5)) - YYABORT; + MYSQL_YYABORT; } ; @@ -5757,7 +5914,7 @@ table_to_table: TL_IGNORE) || !sl->add_table_to_list(lex->thd, $3,NULL,TL_OPTION_UPDATING, TL_IGNORE)) - YYABORT; + MYSQL_YYABORT; }; db_to_db: @@ -5768,7 +5925,7 @@ db_to_db: sql_memdup(&$1, sizeof(LEX_STRING))) || lex->db_list.push_back((LEX_STRING*) sql_memdup(&$3, sizeof(LEX_STRING)))) - YYABORT; + MYSQL_YYABORT; }; keycache: @@ -5787,13 +5944,9 @@ keycache_list: assign_to_keycache: table_ident cache_keys_spec { - LEX *lex=Lex; - SELECT_LEX *sel= &lex->select_lex; - if (!sel->add_table_to_list(lex->thd, $1, NULL, 0, - TL_READ, - sel->get_use_index(), - (List<String> *)0)) - YYABORT; + if (!Select->add_table_to_list(YYTHD, $1, NULL, 0, TL_READ, + Select->pop_index_hints())) + MYSQL_YYABORT; } ; @@ -5819,33 +5972,26 @@ preload_list: preload_keys: table_ident cache_keys_spec opt_ignore_leaves { - LEX *lex=Lex; - SELECT_LEX *sel= &lex->select_lex; - if (!sel->add_table_to_list(lex->thd, $1, NULL, $3, - TL_READ, - sel->get_use_index(), - (List<String> *)0)) - YYABORT; + if (!Select->add_table_to_list(YYTHD, $1, NULL, $3, TL_READ, + Select->pop_index_hints())) + MYSQL_YYABORT; } ; cache_keys_spec: - { Select->interval_list.empty(); } - cache_key_list_or_empty - { - LEX *lex=Lex; - SELECT_LEX *sel= &lex->select_lex; - sel->use_index= sel->interval_list; + { + Lex->select_lex.alloc_index_hints(YYTHD); + Select->set_index_hint_type(INDEX_HINT_USE, + global_system_variables.old_mode ? + INDEX_HINT_MASK_JOIN : + INDEX_HINT_MASK_ALL); } + cache_key_list_or_empty ; cache_key_list_or_empty: - /* empty */ { Lex->select_lex.use_index_ptr= 0; } - | opt_key_or_index '(' key_usage_list2 ')' - { - SELECT_LEX *sel= &Lex->select_lex; - sel->use_index_ptr= &sel->use_index; - } + /* empty */ { } + | key_or_index '(' opt_key_usage_list ')' ; opt_ignore_leaves: @@ -5880,16 +6026,16 @@ select_paren: SELECT_LEX * sel= lex->current_select; if (sel->set_braces(1)) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } if (sel->linkage == UNION_TYPE && !sel->master_unit()->first_select()->braces && sel->master_unit()->first_select()->linkage == UNION_TYPE) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } /* select in braces, can't contain global parameters */ if (sel->master_unit()->fake_select_lex) @@ -5905,14 +6051,14 @@ select_init2: SELECT_LEX * sel= lex->current_select; if (lex->current_select->set_braces(0)) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } if (sel->linkage == UNION_TYPE && sel->master_unit()->first_select()->braces) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } } union_clause @@ -5940,8 +6086,13 @@ select_into: | select_from into; select_from: - FROM join_table_list where_clause group_clause having_clause + FROM join_table_list where_clause group_clause having_clause opt_order_clause opt_limit_clause procedure_clause + { + Select->context.table_list= + Select->context.first_name_resolution_table= + (TABLE_LIST *) Select->table_list.first; + } | FROM DUAL_SYM where_clause opt_limit_clause /* oracle compatibility: oracle always requires FROM clause, and DUAL is system table without fields. @@ -5956,7 +6107,7 @@ select_options: if (Select->options & SELECT_DISTINCT && Select->options & SELECT_ALL) { my_error(ER_WRONG_USAGE, MYF(0), "ALL", "DISTINCT"); - YYABORT; + MYSQL_YYABORT; } } ; @@ -5970,7 +6121,7 @@ select_option: | HIGH_PRIORITY { if (check_simple_select()) - YYABORT; + MYSQL_YYABORT; Lex->lock_option= TL_READ_HIGH_PRIORITY; } | DISTINCT { Select->options|= SELECT_DISTINCT; } @@ -5979,13 +6130,13 @@ select_option: | SQL_BUFFER_RESULT { if (check_simple_select()) - YYABORT; + MYSQL_YYABORT; Select->options|= OPTION_BUFFER_RESULT; } | SQL_CALC_FOUND_ROWS { if (check_simple_select()) - YYABORT; + MYSQL_YYABORT; Select->options|= OPTION_FOUND_ROWS; } | SQL_NO_CACHE_SYM @@ -6034,7 +6185,7 @@ select_item_list: new Item_field(&thd->lex->current_select-> context, NULL, NULL, "*"))) - YYABORT; + MYSQL_YYABORT; (thd->lex->current_select->with_wild)++; }; @@ -6043,7 +6194,7 @@ select_item: remember_name select_item2 remember_end select_alias { if (add_item_to_list(YYTHD, $2)) - YYABORT; + MYSQL_YYABORT; if ($4.str) { $2->is_autogenerated_name= FALSE; @@ -6134,13 +6285,18 @@ bool_factor: | bool_test ; bool_test: - bool_pri IS TRUE_SYM { $$= is_truth_value(YYTHD, $1,1,0); } - | bool_pri IS not TRUE_SYM { $$= is_truth_value(YYTHD, $1,0,0); } - | bool_pri IS FALSE_SYM { $$= is_truth_value(YYTHD, $1,0,1); } - | bool_pri IS not FALSE_SYM { $$= is_truth_value(YYTHD, $1,1,1); } - | bool_pri IS UNKNOWN_SYM { $$= new Item_func_isnull($1); } - | bool_pri IS not UNKNOWN_SYM { $$= new Item_func_isnotnull($1); } - | bool_pri ; + bool_pri IS TRUE_SYM + { $$= new (YYTHD->mem_root) Item_func_istrue($1); } + | bool_pri IS not TRUE_SYM + { $$= new (YYTHD->mem_root) Item_func_isnottrue($1); } + | bool_pri IS FALSE_SYM + { $$= new (YYTHD->mem_root) Item_func_isfalse($1); } + | bool_pri IS not FALSE_SYM + { $$= new (YYTHD->mem_root) Item_func_isnotfalse($1); } + | bool_pri IS UNKNOWN_SYM { $$= new Item_func_isnull($1); } + | bool_pri IS not UNKNOWN_SYM { $$= new Item_func_isnotnull($1); } + | bool_pri + ; bool_pri: bool_pri IS NULL_SYM { $$= new Item_func_isnull($1); } @@ -6153,31 +6309,37 @@ bool_pri: | predicate ; predicate: - bit_expr IN_SYM '(' subselect ')' - { $$= new Item_in_subselect($1, $4); } - | bit_expr not IN_SYM '(' subselect ')' - { $$= negate_expression(YYTHD, new Item_in_subselect($1, $5)); } + bit_expr IN_SYM '(' subselect ')' + { + $$= new (YYTHD->mem_root) Item_in_subselect($1, $4); + } + | bit_expr not IN_SYM '(' subselect ')' + { + THD *thd= YYTHD; + Item *item= new (thd->mem_root) Item_in_subselect($1, $5); + $$= negate_expression(thd, item); + } | bit_expr IN_SYM '(' expr ')' { - $$= new Item_func_eq($1, $4); + $$= handle_sql2003_note184_exception(YYTHD, $1, true, $4); } - | bit_expr IN_SYM '(' expr ',' expr_list ')' - { - $6->push_front($4); - $6->push_front($1); - $$= new Item_func_in(*$6); + | bit_expr IN_SYM '(' expr ',' expr_list ')' + { + $6->push_front($4); + $6->push_front($1); + $$= new (YYTHD->mem_root) Item_func_in(*$6); } | bit_expr not IN_SYM '(' expr ')' { - $$= new Item_func_ne($1, $5); + $$= handle_sql2003_note184_exception(YYTHD, $1, false, $5); } - | bit_expr not IN_SYM '(' expr ',' expr_list ')' + | bit_expr not IN_SYM '(' expr ',' expr_list ')' { - $7->push_front($5); - $7->push_front($1); - Item_func_in *item = new Item_func_in(*$7); - item->negate(); - $$= item; + $7->push_front($5); + $7->push_front($1); + Item_func_in *item = new (YYTHD->mem_root) Item_func_in(*$7); + item->negate(); + $$= item; } | bit_expr BETWEEN_SYM bit_expr AND_SYM predicate { $$= new Item_func_between($1,$3,$5); } @@ -6323,7 +6485,7 @@ simple_expr: lex->dec ? atoi(lex->dec) : 0, lex->charset); if (!$$) - YYABORT; + MYSQL_YYABORT; } | CASE_SYM opt_expr when_list opt_else END { $$= new (YYTHD->mem_root) Item_func_case(* $3, $2, $4 ); } @@ -6334,7 +6496,7 @@ simple_expr: Lex->dec ? atoi(Lex->dec) : 0, Lex->charset); if (!$$) - YYABORT; + MYSQL_YYABORT; } | CONVERT_SYM '(' expr USING charset_name ')' { $$= new (YYTHD->mem_root) Item_func_conv_charset($3,$5); } @@ -6345,7 +6507,7 @@ simple_expr: Item_splocal *il= static_cast<Item_splocal *>($3); my_error(ER_WRONG_COLUMN_NAME, MYF(0), il->my_name()->str); - YYABORT; + MYSQL_YYABORT; } $$= new (YYTHD->mem_root) Item_default_value(Lex->current_context(), $3); @@ -6360,8 +6522,8 @@ simple_expr: { if ($1->type() != Item::ROW_ITEM) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } $$= new (YYTHD->mem_root) Item_func_interval((Item_row *)$1); } @@ -6602,7 +6764,7 @@ function_call_conflict: #else my_error(ER_FEATURE_DISABLED, MYF(0), sym_group_geom.name, sym_group_geom.needed_define); - YYABORT; + MYSQL_YYABORT; #endif } ; @@ -6681,8 +6843,8 @@ function_call_generic: { if (lex->current_select->inc_in_sum_expr()) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } } /* Temporary placing the result of find_udf in $3 */ @@ -6735,7 +6897,7 @@ function_call_generic: if (! ($$= item)) { - YYABORT; + MYSQL_YYABORT; } } | ident '.' ident '(' opt_expr_list ')' @@ -6764,7 +6926,7 @@ function_call_generic: if (! ($$= item)) { - YYABORT; + MYSQL_YYABORT; } } ; @@ -6893,7 +7055,7 @@ variable: if (! Lex->parsing_options.allows_variable) { my_error(ER_VIEW_SELECT_VARIABLE, MYF(0)); - YYABORT; + MYSQL_YYABORT; } } variable_aux @@ -6919,11 +7081,11 @@ variable_aux: { if ($3.str && $4.str && check_reserved_words(&$3)) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } if (!($$= get_system_var(YYTHD, $2, $3, $4))) - YYABORT; + MYSQL_YYABORT; } ; @@ -6957,8 +7119,8 @@ in_sum_expr: LEX *lex= Lex; if (lex->current_select->inc_in_sum_expr()) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } } expr @@ -7038,12 +7200,12 @@ table_ref: { LEX *lex= Lex; if (!($$= lex->current_select->nest_last_join(lex->thd))) - YYABORT; + MYSQL_YYABORT; } ; join_table_list: - derived_table_list { YYERROR_UNLESS($$=$1); } + derived_table_list { MYSQL_YYABORT_UNLESS($$=$1); } ; /* Warning - may return NULL in case of incomplete SELECT */ @@ -7051,7 +7213,7 @@ derived_table_list: table_ref { $$=$1; } | derived_table_list ',' table_ref { - YYERROR_UNLESS($1 && ($$=$3)); + MYSQL_YYABORT_UNLESS($1 && ($$=$3)); } ; @@ -7070,16 +7232,16 @@ join_table: left-associative joins. */ table_ref %prec TABLE_REF_PRIORITY normal_join table_ref - { YYERROR_UNLESS($1 && ($$=$3)); } + { MYSQL_YYABORT_UNLESS($1 && ($$=$3)); } | table_ref STRAIGHT_JOIN table_factor - { YYERROR_UNLESS($1 && ($$=$3)); $3->straight=1; } + { MYSQL_YYABORT_UNLESS($1 && ($$=$3)); $3->straight=1; } | table_ref normal_join table_ref ON { - YYERROR_UNLESS($1 && $3); + MYSQL_YYABORT_UNLESS($1 && $3); /* Change the current name resolution context to a local context. */ if (push_new_name_resolution_context(YYTHD, $1, $3)) - YYABORT; + MYSQL_YYABORT; Select->parsing_place= IN_ON; } expr @@ -7091,10 +7253,10 @@ join_table: | table_ref STRAIGHT_JOIN table_factor ON { - YYERROR_UNLESS($1 && $3); + MYSQL_YYABORT_UNLESS($1 && $3); /* Change the current name resolution context to a local context. */ if (push_new_name_resolution_context(YYTHD, $1, $3)) - YYABORT; + MYSQL_YYABORT; Select->parsing_place= IN_ON; } expr @@ -7107,13 +7269,13 @@ join_table: | table_ref normal_join table_ref USING { - YYERROR_UNLESS($1 && $3); + MYSQL_YYABORT_UNLESS($1 && $3); } '(' using_list ')' { add_join_natural($1,$3,$7,Select); $$=$3; } | table_ref NATURAL JOIN_SYM table_factor { - YYERROR_UNLESS($1 && ($$=$4)); + MYSQL_YYABORT_UNLESS($1 && ($$=$4)); add_join_natural($1,$4,NULL,Select); } @@ -7121,10 +7283,10 @@ join_table: | table_ref LEFT opt_outer JOIN_SYM table_ref ON { - YYERROR_UNLESS($1 && $5); + MYSQL_YYABORT_UNLESS($1 && $5); /* Change the current name resolution context to a local context. */ if (push_new_name_resolution_context(YYTHD, $1, $5)) - YYABORT; + MYSQL_YYABORT; Select->parsing_place= IN_ON; } expr @@ -7137,7 +7299,7 @@ join_table: } | table_ref LEFT opt_outer JOIN_SYM table_factor { - YYERROR_UNLESS($1 && $5); + MYSQL_YYABORT_UNLESS($1 && $5); } USING '(' using_list ')' { @@ -7147,7 +7309,7 @@ join_table: } | table_ref NATURAL LEFT opt_outer JOIN_SYM table_factor { - YYERROR_UNLESS($1 && $6); + MYSQL_YYABORT_UNLESS($1 && $6); add_join_natural($1,$6,NULL,Select); $6->outer_join|=JOIN_TYPE_LEFT; $$=$6; @@ -7157,39 +7319,39 @@ join_table: | table_ref RIGHT opt_outer JOIN_SYM table_ref ON { - YYERROR_UNLESS($1 && $5); + MYSQL_YYABORT_UNLESS($1 && $5); /* Change the current name resolution context to a local context. */ if (push_new_name_resolution_context(YYTHD, $1, $5)) - YYABORT; + MYSQL_YYABORT; Select->parsing_place= IN_ON; } expr { LEX *lex= Lex; if (!($$= lex->current_select->convert_right_join())) - YYABORT; + MYSQL_YYABORT; add_join_on($$, $8); Lex->pop_context(); Select->parsing_place= NO_MATTER; } | table_ref RIGHT opt_outer JOIN_SYM table_factor { - YYERROR_UNLESS($1 && $5); + MYSQL_YYABORT_UNLESS($1 && $5); } USING '(' using_list ')' { LEX *lex= Lex; if (!($$= lex->current_select->convert_right_join())) - YYABORT; + MYSQL_YYABORT; add_join_natural($$,$5,$9,Select); } | table_ref NATURAL RIGHT opt_outer JOIN_SYM table_factor { - YYERROR_UNLESS($1 && $6); + MYSQL_YYABORT_UNLESS($1 && $6); add_join_natural($6,$1,NULL,Select); LEX *lex= Lex; if (!($$= lex->current_select->convert_right_join())) - YYABORT; + MYSQL_YYABORT; }; normal_join: @@ -7202,39 +7364,35 @@ normal_join: table_factor: { SELECT_LEX *sel= Select; - sel->use_index_ptr=sel->ignore_index_ptr=0; sel->table_join_options= 0; } table_ident opt_table_alias opt_key_definition { - LEX *lex= Lex; - SELECT_LEX *sel= lex->current_select; - if (!($$= sel->add_table_to_list(lex->thd, $2, $3, - sel->get_table_join_options(), - lex->lock_option, - sel->get_use_index(), - sel->get_ignore_index()))) - YYABORT; - sel->add_joined_table($$); + if (!($$= Select->add_table_to_list(YYTHD, $2, $3, + Select->get_table_join_options(), + Lex->lock_option, + Select->pop_index_hints()))) + MYSQL_YYABORT; + Select->add_joined_table($$); } | '{' ident table_ref LEFT OUTER JOIN_SYM table_ref ON { /* Change the current name resolution context to a local context. */ if (push_new_name_resolution_context(YYTHD, $3, $7)) - YYABORT; + MYSQL_YYABORT; } expr '}' { LEX *lex= Lex; - YYERROR_UNLESS($3 && $7); + MYSQL_YYABORT_UNLESS($3 && $7); add_join_on($7,$10); Lex->pop_context(); $7->outer_join|=JOIN_TYPE_LEFT; $$=$7; if (!($$= lex->current_select->nest_last_join(lex->thd))) - YYABORT; + MYSQL_YYABORT; } | select_derived_init get_select_lex select_derived2 { @@ -7244,8 +7402,8 @@ table_factor: { if (sel->set_braces(1)) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } /* select in braces, can't contain global parameters */ if (sel->master_unit()->fake_select_lex) @@ -7253,7 +7411,7 @@ table_factor: sel->master_unit()->fake_select_lex; } if ($2->init_nested_join(lex->thd)) - YYABORT; + MYSQL_YYABORT; $$= 0; /* incomplete derived tables return NULL, we must be nested in select_derived rule to be here. */ @@ -7284,10 +7442,9 @@ table_factor: lex->current_select= sel= unit->outer_select(); if (!($$= sel-> add_table_to_list(lex->thd, new Table_ident(unit), $6, 0, - TL_READ,(List<String> *)0, - (List<String> *)0))) + TL_READ))) - YYABORT; + MYSQL_YYABORT; sel->add_joined_table($$); lex->pop_context(); } @@ -7295,8 +7452,8 @@ table_factor: if ($4 || $6) { /* simple nested joins cannot have aliases or unions */ - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } else $$= $3; @@ -7309,7 +7466,7 @@ select_derived: { LEX *lex= Lex; if ($1->init_nested_join(lex->thd)) - YYABORT; + MYSQL_YYABORT; } derived_table_list { @@ -7318,11 +7475,11 @@ select_derived: for derived tables, both must equal NULL */ if (!($$= $1->end_nested_join(lex->thd)) && $3) - YYABORT; + MYSQL_YYABORT; if (!$3 && $$) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } } ; @@ -7333,12 +7490,12 @@ select_derived2: lex->derived_tables|= DERIVED_SUBQUERY; if (!lex->expr_allows_subselect) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE || mysql_new_select(lex, 1)) - YYABORT; + MYSQL_YYABORT; mysql_init_select(lex); lex->current_select->linkage= DERIVED_TABLE_TYPE; lex->current_select->parsing_place= SELECT_LIST; @@ -7362,7 +7519,7 @@ select_derived_init: if (! lex->parsing_options.allows_derived) { my_error(ER_VIEW_SELECT_DERIVED, MYF(0)); - YYABORT; + MYSQL_YYABORT; } SELECT_LEX *sel= lex->current_select; @@ -7370,8 +7527,8 @@ select_derived_init: if (!sel->embedding || sel->end_nested_join(lex->thd)) { /* we are not in parentheses */ - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } embedding= Select->embedding; $$= embedding && @@ -7384,58 +7541,73 @@ opt_outer: /* empty */ {} | OUTER {}; +index_hint_clause: + /* empty */ + { + $$= global_system_variables.old_mode ? + INDEX_HINT_MASK_JOIN : INDEX_HINT_MASK_ALL; + } + | FOR_SYM JOIN_SYM { $$= INDEX_HINT_MASK_JOIN; } + | FOR_SYM ORDER_SYM BY { $$= INDEX_HINT_MASK_ORDER; } + | FOR_SYM GROUP_SYM BY { $$= INDEX_HINT_MASK_GROUP; } + ; + +index_hint_type: + FORCE_SYM { $$= INDEX_HINT_FORCE; } + | IGNORE_SYM { $$= INDEX_HINT_IGNORE; } + ; + +index_hint_definition: + index_hint_type key_or_index index_hint_clause + { + Select->set_index_hint_type($1, $3); + } + '(' key_usage_list ')' + | USE_SYM key_or_index index_hint_clause + { + Select->set_index_hint_type(INDEX_HINT_USE, $3); + } + '(' opt_key_usage_list ')' + ; + +index_hints_list: + index_hint_definition + | index_hints_list index_hint_definition + ; + +opt_index_hints_list: + /* empty */ + | { Select->alloc_index_hints(YYTHD); } index_hints_list + ; + opt_key_definition: - /* empty */ {} - | USE_SYM key_usage_list - { - SELECT_LEX *sel= Select; - sel->use_index= *$2; - sel->use_index_ptr= &sel->use_index; - } - | FORCE_SYM key_usage_list - { - SELECT_LEX *sel= Select; - sel->use_index= *$2; - sel->use_index_ptr= &sel->use_index; - sel->table_join_options|= TL_OPTION_FORCE_INDEX; - } - | IGNORE_SYM key_usage_list - { - SELECT_LEX *sel= Select; - sel->ignore_index= *$2; - sel->ignore_index_ptr= &sel->ignore_index; - }; + { Select->clear_index_hints(); } + opt_index_hints_list + ; -key_usage_list: - key_or_index { Select->interval_list.empty(); } - '(' key_list_or_empty ')' - { $$= &Select->interval_list; } +opt_key_usage_list: + /* empty */ { Select->add_index_hint(YYTHD, NULL, 0); } + | key_usage_list {} ; -key_list_or_empty: - /* empty */ {} - | key_usage_list2 {} - ; +key_usage_element: + ident { Select->add_index_hint(YYTHD, $1.str, $1.length); } + | PRIMARY_SYM + { + Select->add_index_hint(YYTHD, (char *)"PRIMARY", 7); + } + ; -key_usage_list2: - key_usage_list2 ',' ident - { Select-> - interval_list.push_back(new (YYTHD->mem_root) String((const char*) $3.str, $3.length, - system_charset_info)); } - | ident - { Select-> - interval_list.push_back(new (YYTHD->mem_root) String((const char*) $1.str, $1.length, - system_charset_info)); } - | PRIMARY_SYM - { Select-> - interval_list.push_back(new (YYTHD->mem_root) String("PRIMARY", 7, - system_charset_info)); }; +key_usage_list: + key_usage_element + | key_usage_list ',' key_usage_element + ; using_list: ident { if (!($$= new List<String>)) - YYABORT; + MYSQL_YYABORT; $$->push_back(new (YYTHD->mem_root) String((const char *) $1.str, $1.length, system_charset_info)); @@ -7551,13 +7723,13 @@ opt_escape: group_clause: /* empty */ - | GROUP BY group_list olap_opt; + | GROUP_SYM BY group_list olap_opt; group_list: group_list ',' order_ident order_dir - { if (add_group_to_list(YYTHD, $3,(bool) $4)) YYABORT; } + { if (add_group_to_list(YYTHD, $3,(bool) $4)) MYSQL_YYABORT; } | order_ident order_dir - { if (add_group_to_list(YYTHD, $1,(bool) $2)) YYABORT; }; + { if (add_group_to_list(YYTHD, $1,(bool) $2)) MYSQL_YYABORT; }; olap_opt: /* empty */ {} @@ -7568,11 +7740,11 @@ olap_opt: { my_error(ER_WRONG_USAGE, MYF(0), "WITH CUBE", "global union parameters"); - YYABORT; + MYSQL_YYABORT; } lex->current_select->olap= CUBE_TYPE; my_error(ER_NOT_SUPPORTED_YET, MYF(0), "CUBE"); - YYABORT; /* To be deleted in 5.1 */ + MYSQL_YYABORT; /* To be deleted in 5.1 */ } | WITH ROLLUP_SYM { @@ -7581,7 +7753,7 @@ olap_opt: { my_error(ER_WRONG_USAGE, MYF(0), "WITH ROLLUP", "global union parameters"); - YYABORT; + MYSQL_YYABORT; } lex->current_select->olap= ROLLUP_TYPE; } @@ -7606,7 +7778,7 @@ alter_order_item: THD *thd= YYTHD; bool ascending= ($2 == 1) ? true : false; if (add_order_to_list(thd, $1, ascending)) - YYABORT; + MYSQL_YYABORT; } ; @@ -7629,7 +7801,7 @@ order_clause: { my_error(ER_WRONG_USAGE, MYF(0), "CUBE/ROLLUP", "ORDER BY"); - YYABORT; + MYSQL_YYABORT; } if (lex->sql_command != SQLCOM_ALTER_TABLE && !unit->fake_select_lex) { @@ -7646,15 +7818,15 @@ order_clause: (first_sl->order_list.elements || first_sl->select_limit) && unit->add_fake_select_lex(lex->thd)) - YYABORT; + MYSQL_YYABORT; } } order_list; order_list: order_list ',' order_ident order_dir - { if (add_order_to_list(YYTHD, $3,(bool) $4)) YYABORT; } + { if (add_order_to_list(YYTHD, $3,(bool) $4)) MYSQL_YYABORT; } | order_ident order_dir - { if (add_order_to_list(YYTHD, $1,(bool) $2)) YYABORT; }; + { if (add_order_to_list(YYTHD, $1,(bool) $2)) MYSQL_YYABORT; }; order_dir: /* empty */ { $$ = 1; } @@ -7739,7 +7911,7 @@ real_ulong_num: | HEX_NUM { $$= (ulong) strtol($1.str, (char**) 0, 16); } | LONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } | ULONGLONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } - | dec_num_error { YYABORT; } + | dec_num_error { MYSQL_YYABORT; } ; ulonglong_num: @@ -7754,12 +7926,12 @@ real_ulonglong_num: NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } | ULONGLONG_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } | LONG_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } - | dec_num_error { YYABORT; } + | dec_num_error { MYSQL_YYABORT; } ; dec_num_error: dec_num - { yyerror(ER(ER_ONLY_INTEGERS_ALLOWED)); } + { my_parse_error(ER(ER_ONLY_INTEGERS_ALLOWED)); } ; dec_num: @@ -7776,13 +7948,13 @@ procedure_clause: if (! lex->parsing_options.allows_select_procedure) { my_error(ER_VIEW_SELECT_CLAUSE, MYF(0), "PROCEDURE"); - YYABORT; + MYSQL_YYABORT; } if (&lex->select_lex != lex->current_select) { my_error(ER_WRONG_USAGE, MYF(0), "PROCEDURE", "subquery"); - YYABORT; + MYSQL_YYABORT; } lex->proc_list.elements=0; lex->proc_list.first=0; @@ -7791,7 +7963,7 @@ procedure_clause: current_select-> context, NULL,NULL,$2.str))) - YYABORT; + MYSQL_YYABORT; Lex->uncacheable(UNCACHEABLE_SIDEEFFECT); } '(' procedure_list ')'; @@ -7810,7 +7982,7 @@ procedure_item: { LEX *lex= Lex; if (add_proc_to_list(lex->thd, $2)) - YYABORT; + MYSQL_YYABORT; if (!$2->name) $2->set_name($1,(uint) ((char*) lex->tok_end - $1), YYTHD->charset()); @@ -7822,7 +7994,7 @@ select_var_list_init: { LEX *lex=Lex; if (!lex->describe && (!(lex->result= new select_dumpvar()))) - YYABORT; + MYSQL_YYABORT; } select_var_list {} @@ -7854,7 +8026,7 @@ select_var_ident: if (!lex->spcont || !(t=lex->spcont->find_variable(&$1))) { my_error(ER_SP_UNDECLARED_VAR, MYF(0), $1.str); - YYABORT; + MYSQL_YYABORT; } if (lex->result) { @@ -7883,7 +8055,7 @@ into: if (! Lex->parsing_options.allows_select_into) { my_error(ER_VIEW_SELECT_CLAUSE, MYF(0), "INTO"); - YYABORT; + MYSQL_YYABORT; } } into_destination @@ -7896,7 +8068,7 @@ into_destination: lex->uncacheable(UNCACHEABLE_SIDEEFFECT); if (!(lex->exchange= new sql_exchange($2.str, 0)) || !(lex->result= new select_export(lex->exchange))) - YYABORT; + MYSQL_YYABORT; } opt_field_term opt_line_term | DUMPFILE TEXT_STRING_filesystem @@ -7906,9 +8078,9 @@ into_destination: { lex->uncacheable(UNCACHEABLE_SIDEEFFECT); if (!(lex->exchange= new sql_exchange($2.str,1))) - YYABORT; + MYSQL_YYABORT; if (!(lex->result= new select_dump(lex->exchange))) - YYABORT; + MYSQL_YYABORT; } } | select_var_list_init @@ -7954,7 +8126,7 @@ drop: $3.str)); if (!lex->current_select->add_table_to_list(lex->thd, $5, NULL, TL_OPTION_UPDATING)) - YYABORT; + MYSQL_YYABORT; } | DROP DATABASE if_exists ident { @@ -7969,7 +8141,7 @@ drop: if (lex->sphead) { my_error(ER_SP_NO_DROP_SP, MYF(0), "FUNCTION"); - YYABORT; + MYSQL_YYABORT; } lex->sql_command = SQLCOM_DROP_FUNCTION; lex->drop_if_exists= $3; @@ -7981,7 +8153,7 @@ drop: if (lex->sphead) { my_error(ER_SP_NO_DROP_SP, MYF(0), "PROCEDURE"); - YYABORT; + MYSQL_YYABORT; } lex->sql_command = SQLCOM_DROP_PROCEDURE; lex->drop_if_exists= $3; @@ -8015,7 +8187,7 @@ drop: LEX *lex= Lex; lex->alter_tablespace_info->ts_cmd_type= DROP_TABLESPACE; } - | DROP LOGFILE_SYM GROUP logfile_group_name opt_ts_engine opt_ts_wait + | DROP LOGFILE_SYM GROUP_SYM logfile_group_name opt_ts_engine opt_ts_wait { LEX *lex= Lex; lex->alter_tablespace_info->ts_cmd_type= DROP_LOGFILE_GROUP; @@ -8037,7 +8209,7 @@ table_name: table_ident { if (!Select->add_table_to_list(YYTHD, $1, NULL, TL_OPTION_UPDATING)) - YYABORT; + MYSQL_YYABORT; } ; @@ -8135,7 +8307,7 @@ insert_field_spec: LEX *lex=Lex; if (!(lex->insert_list = new List_item) || lex->many_values.push_back(lex->insert_list)) - YYABORT; + MYSQL_YYABORT; } ident_eq_list; @@ -8165,7 +8337,7 @@ ident_eq_value: LEX *lex=Lex; if (lex->field_list.push_back($1) || lex->insert_list->push_back($3)) - YYABORT; + MYSQL_YYABORT; }; equal: EQ {} @@ -8181,13 +8353,13 @@ no_braces: '(' { if (!(Lex->insert_list = new List_item)) - YYABORT; + MYSQL_YYABORT; } opt_values ')' { LEX *lex=Lex; if (lex->many_values.push_back(lex->insert_list)) - YYABORT; + MYSQL_YYABORT; }; opt_values: @@ -8198,12 +8370,12 @@ values: values ',' expr_or_default { if (Lex->insert_list->push_back($3)) - YYABORT; + MYSQL_YYABORT; } | expr_or_default { if (Lex->insert_list->push_back($1)) - YYABORT; + MYSQL_YYABORT; } ; @@ -8240,7 +8412,7 @@ update: /* it is single table update and it is update of derived table */ my_error(ER_NON_UPDATABLE_TABLE, MYF(0), lex->select_lex.get_table_list()->alias, "UPDATE"); - YYABORT; + MYSQL_YYABORT; } /* In case of multi-update setting write lock for all tables may @@ -8260,7 +8432,7 @@ update_elem: simple_ident_nospvar equal expr_or_default { if (add_item_to_list(YYTHD, $1) || add_value_to_list(YYTHD, $3)) - YYABORT; + MYSQL_YYABORT; }; insert_update_list: @@ -8273,7 +8445,7 @@ insert_update_elem: LEX *lex= Lex; if (lex->update_list.push_back($1) || lex->value_list.push_back($3)) - YYABORT; + MYSQL_YYABORT; }; opt_low_priority: @@ -8300,7 +8472,7 @@ single_multi: { if (!Select->add_table_to_list(YYTHD, $2, NULL, TL_OPTION_UPDATING, Lex->lock_option)) - YYABORT; + MYSQL_YYABORT; } where_clause opt_order_clause delete_limit_clause {} @@ -8309,14 +8481,14 @@ single_multi: FROM join_table_list where_clause { if (multi_delete_set_locks_and_link_aux_tables(Lex)) - YYABORT; + MYSQL_YYABORT; } | FROM table_wild_list { mysql_init_multi_delete(Lex); } USING join_table_list where_clause { if (multi_delete_set_locks_and_link_aux_tables(Lex)) - YYABORT; + MYSQL_YYABORT; } ; @@ -8330,7 +8502,7 @@ table_wild_one: if (!Select->add_table_to_list(YYTHD, new Table_ident($1), $3, TL_OPTION_UPDATING | TL_OPTION_ALIAS, Lex->lock_option)) - YYABORT; + MYSQL_YYABORT; } | ident '.' ident opt_wild opt_table_alias { @@ -8340,7 +8512,7 @@ table_wild_one: TL_OPTION_UPDATING | TL_OPTION_ALIAS, Lex->lock_option)) - YYABORT; + MYSQL_YYABORT; } ; @@ -8394,7 +8566,7 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_DATABASES; if (prepare_schema_table(YYTHD, lex, 0, SCH_SCHEMATA)) - YYABORT; + MYSQL_YYABORT; } | opt_full TABLES opt_db wild_and_where { @@ -8402,7 +8574,7 @@ show_param: lex->sql_command= SQLCOM_SHOW_TABLES; lex->select_lex.db= $3; if (prepare_schema_table(YYTHD, lex, 0, SCH_TABLE_NAMES)) - YYABORT; + MYSQL_YYABORT; } | opt_full TRIGGERS_SYM opt_db wild_and_where { @@ -8410,7 +8582,7 @@ show_param: lex->sql_command= SQLCOM_SHOW_TRIGGERS; lex->select_lex.db= $3; if (prepare_schema_table(YYTHD, lex, 0, SCH_TRIGGERS)) - YYABORT; + MYSQL_YYABORT; } | EVENTS_SYM opt_db wild_and_where { @@ -8418,7 +8590,7 @@ show_param: lex->sql_command= SQLCOM_SHOW_EVENTS; lex->select_lex.db= $2; if (prepare_schema_table(YYTHD, lex, 0, SCH_EVENTS)) - YYABORT; + MYSQL_YYABORT; } | TABLE_SYM STATUS_SYM opt_db wild_and_where { @@ -8426,7 +8598,7 @@ show_param: lex->sql_command= SQLCOM_SHOW_TABLE_STATUS; lex->select_lex.db= $3; if (prepare_schema_table(YYTHD, lex, 0, SCH_TABLES)) - YYABORT; + MYSQL_YYABORT; } | OPEN_SYM TABLES opt_db wild_and_where { @@ -8434,7 +8606,7 @@ show_param: lex->sql_command= SQLCOM_SHOW_OPEN_TABLES; lex->select_lex.db= $3; if (prepare_schema_table(YYTHD, lex, 0, SCH_OPEN_TABLES)) - YYABORT; + MYSQL_YYABORT; } | opt_full PLUGIN_SYM { @@ -8442,21 +8614,19 @@ show_param: WARN_DEPRECATED(yythd, "5.2", "SHOW PLUGIN", "'SHOW PLUGINS'"); lex->sql_command= SQLCOM_SHOW_PLUGINS; if (prepare_schema_table(YYTHD, lex, 0, SCH_PLUGINS)) - YYABORT; + MYSQL_YYABORT; } | PLUGINS_SYM { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_PLUGINS; if (prepare_schema_table(YYTHD, lex, 0, SCH_PLUGINS)) - YYABORT; + MYSQL_YYABORT; } - | ENGINE_SYM storage_engines + | ENGINE_SYM known_storage_engines show_engine_param { Lex->create_info.db_type= $2; } - show_engine_param - | ENGINE_SYM ALL + | ENGINE_SYM ALL show_engine_param { Lex->create_info.db_type= NULL; } - show_engine_param | opt_full COLUMNS from_or_in table_ident opt_db wild_and_where { LEX *lex= Lex; @@ -8464,7 +8634,7 @@ show_param: if ($5) $4->change_db($5); if (prepare_schema_table(YYTHD, lex, $4, SCH_COLUMNS)) - YYABORT; + MYSQL_YYABORT; } | NEW_SYM MASTER_SYM FOR_SYM SLAVE WITH MASTER_LOG_FILE_SYM EQ TEXT_STRING_sys AND_SYM MASTER_LOG_POS_SYM EQ ulonglong_num @@ -8496,7 +8666,7 @@ show_param: if ($4) $3->change_db($4); if (prepare_schema_table(YYTHD, lex, $3, SCH_STATISTICS)) - YYABORT; + MYSQL_YYABORT; } | COLUMN_SYM TYPES_SYM { @@ -8514,7 +8684,7 @@ show_param: LEX *lex=Lex; lex->sql_command= SQLCOM_SHOW_STORAGE_ENGINES; if (prepare_schema_table(YYTHD, lex, 0, SCH_ENGINES)) - YYABORT; + MYSQL_YYABORT; } | AUTHORS_SYM { @@ -8545,7 +8715,7 @@ show_param: lex->sql_command= SQLCOM_SHOW_STATUS; lex->option_type= $1; if (prepare_schema_table(YYTHD, lex, 0, SCH_STATUS)) - YYABORT; + MYSQL_YYABORT; } | INNOBASE_SYM STATUS_SYM { @@ -8555,7 +8725,7 @@ show_param: ha_resolve_by_legacy_type(YYTHD, DB_TYPE_INNODB))) { my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), "InnoDB"); - YYABORT; + MYSQL_YYABORT; } WARN_DEPRECATED(yythd, "5.2", "SHOW INNODB STATUS", "'SHOW ENGINE INNODB STATUS'"); } @@ -8567,7 +8737,7 @@ show_param: ha_resolve_by_legacy_type(YYTHD, DB_TYPE_INNODB))) { my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), "InnoDB"); - YYABORT; + MYSQL_YYABORT; } WARN_DEPRECATED(yythd, "5.2", "SHOW MUTEX STATUS", "'SHOW ENGINE INNODB MUTEX'"); } @@ -8579,21 +8749,21 @@ show_param: lex->sql_command= SQLCOM_SHOW_VARIABLES; lex->option_type= $1; if (prepare_schema_table(YYTHD, lex, 0, SCH_VARIABLES)) - YYABORT; + MYSQL_YYABORT; } | charset wild_and_where { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_CHARSETS; if (prepare_schema_table(YYTHD, lex, 0, SCH_CHARSETS)) - YYABORT; + MYSQL_YYABORT; } | COLLATION_SYM wild_and_where { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_COLLATIONS; if (prepare_schema_table(YYTHD, lex, 0, SCH_COLLATIONS)) - YYABORT; + MYSQL_YYABORT; } | GRANTS { @@ -8601,7 +8771,7 @@ show_param: lex->sql_command= SQLCOM_SHOW_GRANTS; LEX_USER *curr_user; if (!(curr_user= (LEX_USER*) lex->thd->alloc(sizeof(st_lex_user)))) - YYABORT; + MYSQL_YYABORT; bzero(curr_user, sizeof(st_lex_user)); lex->grant_user= curr_user; } @@ -8623,7 +8793,7 @@ show_param: LEX *lex= Lex; lex->sql_command = SQLCOM_SHOW_CREATE; if (!lex->select_lex.add_table_to_list(YYTHD, $3, NULL,0)) - YYABORT; + MYSQL_YYABORT; lex->only_view= 0; lex->create_info.storage_media= HA_SM_DEFAULT; } @@ -8632,7 +8802,7 @@ show_param: LEX *lex= Lex; lex->sql_command = SQLCOM_SHOW_CREATE; if (!lex->select_lex.add_table_to_list(YYTHD, $3, NULL, 0)) - YYABORT; + MYSQL_YYABORT; lex->only_view= 1; } | MASTER_SYM STATUS_SYM @@ -8662,24 +8832,24 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_STATUS_PROC; if (!sp_add_to_query_tables(YYTHD, lex, "mysql", "proc", TL_READ)) - YYABORT; + MYSQL_YYABORT; if (prepare_schema_table(YYTHD, lex, 0, SCH_PROCEDURES)) - YYABORT; + MYSQL_YYABORT; } | FUNCTION_SYM STATUS_SYM wild_and_where { LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_STATUS_FUNC; if (!sp_add_to_query_tables(YYTHD, lex, "mysql", "proc", TL_READ)) - YYABORT; + MYSQL_YYABORT; if (prepare_schema_table(YYTHD, lex, 0, SCH_PROCEDURES)) - YYABORT; + MYSQL_YYABORT; } | PROCEDURE CODE_SYM sp_name { #ifdef DBUG_OFF - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; #else Lex->sql_command= SQLCOM_SHOW_PROC_CODE; Lex->spname= $3; @@ -8688,8 +8858,8 @@ show_param: | FUNCTION_SYM CODE_SYM sp_name { #ifdef DBUG_OFF - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; #else Lex->sql_command= SQLCOM_SHOW_FUNC_CODE; Lex->spname= $3; @@ -8764,7 +8934,7 @@ describe: lex->select_lex.db= 0; lex->verbose= 0; if (prepare_schema_table(YYTHD, lex, $2, SCH_COLUMNS)) - YYABORT; + MYSQL_YYABORT; } opt_describe_column {} | describe_command opt_extended_describe @@ -8909,7 +9079,7 @@ load: LOAD DATA_SYM if (lex->sphead) { my_error(ER_SP_BADSTATEMENT, MYF(0), "LOAD DATA"); - YYABORT; + MYSQL_YYABORT; } lex->fname_start= lex->ptr; } @@ -8924,11 +9094,11 @@ load: LOAD DATA_SYM if (lex->sphead) { my_error(ER_SP_BADSTATEMENT, MYF(0), "LOAD TABLE"); - YYABORT; + MYSQL_YYABORT; } lex->sql_command = SQLCOM_LOAD_MASTER_TABLE; if (!Select->add_table_to_list(YYTHD, $3, NULL, TL_OPTION_UPDATING)) - YYABORT; + MYSQL_YYABORT; }; load_data: @@ -8941,7 +9111,7 @@ load_data: lex->duplicates= DUP_ERROR; lex->ignore= 0; if (!(lex->exchange= new sql_exchange($4.str, 0))) - YYABORT; + MYSQL_YYABORT; } opt_duplicate INTO { @@ -8953,11 +9123,13 @@ load_data: LEX *lex=Lex; if (!Select->add_table_to_list(YYTHD, $10, NULL, TL_OPTION_UPDATING, lex->lock_option)) - YYABORT; + MYSQL_YYABORT; lex->field_list.empty(); lex->update_list.empty(); lex->value_list.empty(); } + opt_load_data_charset + { Lex->exchange->cs= $12; } opt_field_term opt_line_term opt_ignore_lines opt_field_or_var_spec opt_load_data_set_spec {} @@ -9130,13 +9302,13 @@ param_marker: if (! lex->parsing_options.allows_variable) { my_error(ER_VIEW_SELECT_VARIABLE, MYF(0)); - YYABORT; + MYSQL_YYABORT; } - item= new Item_param((uint) (lex->tok_start - (uchar *) thd->query)); + item= new Item_param((uint) (lex->tok_start - thd->query)); if (!($$= item) || lex->param_list.push_back(item)) { my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); - YYABORT; + MYSQL_YYABORT; } } ; @@ -9202,7 +9374,7 @@ NUM_literal: $$= new Item_decimal($1.str, $1.length, YYTHD->charset()); if (YYTHD->net.report_error) { - YYABORT; + MYSQL_YYABORT; } } | FLOAT_NUM @@ -9210,7 +9382,7 @@ NUM_literal: $$ = new Item_float($1.str, $1.length); if (YYTHD->net.report_error) { - YYABORT; + MYSQL_YYABORT; } } ; @@ -9255,7 +9427,7 @@ simple_ident: if (! lex->parsing_options.allows_variable) { my_error(ER_VIEW_SELECT_VARIABLE, MYF(0)); - YYABORT; + MYSQL_YYABORT; } Item_splocal *splocal; @@ -9315,14 +9487,14 @@ simple_ident_q: !new_row) { my_error(ER_TRG_NO_SUCH_ROW_IN_TRG, MYF(0), "OLD", "on INSERT"); - YYABORT; + MYSQL_YYABORT; } if (lex->trg_chistics.event == TRG_EVENT_DELETE && new_row) { my_error(ER_TRG_NO_SUCH_ROW_IN_TRG, MYF(0), "NEW", "on DELETE"); - YYABORT; + MYSQL_YYABORT; } DBUG_ASSERT(!new_row || @@ -9337,7 +9509,7 @@ simple_ident_q: $3.str, SELECT_ACL, read_only))) - YYABORT; + MYSQL_YYABORT; /* Let us add this item to list of all Item_trigger_field objects @@ -9408,13 +9580,13 @@ field_ident: if (my_strcasecmp(table_alias_charset, $1.str, table->db)) { my_error(ER_WRONG_DB_NAME, MYF(0), $1.str); - YYABORT; + MYSQL_YYABORT; } if (my_strcasecmp(table_alias_charset, $3.str, table->table_name)) { my_error(ER_WRONG_TABLE_NAME, MYF(0), $3.str); - YYABORT; + MYSQL_YYABORT; } $$=$5; } @@ -9424,7 +9596,7 @@ field_ident: if (my_strcasecmp(table_alias_charset, $1.str, table->alias)) { my_error(ER_WRONG_TABLE_NAME, MYF(0), $1.str); - YYABORT; + MYSQL_YYABORT; } $$=$3; } @@ -9456,7 +9628,7 @@ IDENT_sys: { my_error(ER_INVALID_CHARACTER_STRING, MYF(0), cs->csname, $1.str + wlen); - YYABORT; + MYSQL_YYABORT; } $$= $1; } @@ -9539,32 +9711,32 @@ user: { THD *thd= YYTHD; if (!($$=(LEX_USER*) thd->alloc(sizeof(st_lex_user)))) - YYABORT; + MYSQL_YYABORT; $$->user = $1; $$->host.str= (char *) "%"; $$->host.length= 1; if (check_string_length(&$$->user, ER(ER_USERNAME), USERNAME_LENGTH)) - YYABORT; + MYSQL_YYABORT; } | ident_or_text '@' ident_or_text { THD *thd= YYTHD; if (!($$=(LEX_USER*) thd->alloc(sizeof(st_lex_user)))) - YYABORT; + MYSQL_YYABORT; $$->user = $1; $$->host=$3; if (check_string_length(&$$->user, ER(ER_USERNAME), USERNAME_LENGTH) || check_string_length(&$$->host, ER(ER_HOSTNAME), HOSTNAME_LENGTH)) - YYABORT; + MYSQL_YYABORT; } | CURRENT_USER optional_braces { if (!($$=(LEX_USER*) YYTHD->alloc(sizeof(st_lex_user)))) - YYABORT; + MYSQL_YYABORT; /* empty LEX_USER means current_user and will be handled in the get_current_user() function @@ -9666,6 +9838,7 @@ keyword_sp: | COMPLETION_SYM {} | COMPRESSED_SYM {} | CONCURRENT {} + | CONNECTION_SYM {} | CONSISTENT_SYM {} | CONTRIBUTORS_SYM {} | CUBE_SYM {} @@ -9830,6 +10003,7 @@ keyword_sp: | SIMPLE_SYM {} | SHARE_SYM {} | SHUTDOWN {} + | SLAVESIDE_DISABLE_SYM {} | SNAPSHOT_SYM {} | SOUNDS_SYM {} | SQL_CACHE_SYM {} @@ -9952,7 +10126,7 @@ option_type_value: if (!(i= new sp_instr_stmt(sp->instructions(), lex->spcont, lex))) - YYABORT; + MYSQL_YYABORT; /* Extract the query statement from the tokenizer. The @@ -9965,9 +10139,9 @@ option_type_value: qbuff.length= lex->tok_end - sp->m_tmp_query; if (!(qbuff.str= alloc_root(YYTHD->mem_root, qbuff.length + 5))) - YYABORT; + MYSQL_YYABORT; - strmake(strmake(qbuff.str, "SET ", 4), (char *)sp->m_tmp_query, + strmake(strmake(qbuff.str, "SET ", 4), sp->m_tmp_query, qbuff.length); qbuff.length+= 4; i->m_query= qbuff; @@ -10021,8 +10195,8 @@ sys_option_value: LINT_INIT(sp_fld); if ($1) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } if ($4) it= $4; @@ -10044,7 +10218,7 @@ sys_option_value: lex->spcont, trg_fld, it, lex))) - YYABORT; + MYSQL_YYABORT; /* Let us add this item to list of all Item_trigger_field @@ -10071,8 +10245,8 @@ sys_option_value: Item *it; if ($1) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } spv= ctx->find_variable(&$2.base_name); @@ -10127,9 +10301,9 @@ option_value: if (spc && spc->find_variable(&names)) my_error(ER_SP_BAD_VAR_SHADOW, MYF(0), names.str); else - yyerror(ER(ER_SYNTAX_ERROR)); + my_parse_error(ER(ER_SYNTAX_ERROR)); - YYABORT; + MYSQL_YYABORT; } | NAMES_SYM charset_name_or_default opt_collate { @@ -10140,7 +10314,7 @@ option_value: { my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), $3->name, $2->csname); - YYABORT; + MYSQL_YYABORT; } lex->var_list.push_back(new set_var_collation_client($3,$3,$3)); } @@ -10157,10 +10331,10 @@ option_value: if (spc && spc->find_variable(&pw)) { my_error(ER_SP_BAD_VAR_SHADOW, MYF(0), pw.str); - YYABORT; + MYSQL_YYABORT; } if (!(user=(LEX_USER*) thd->alloc(sizeof(LEX_USER)))) - YYABORT; + MYSQL_YYABORT; user->host=null_lex_str; user->user.str=thd->security_ctx->priv_user; thd->lex->var_list.push_back(new set_var_password(user, $3)); @@ -10184,17 +10358,10 @@ internal_variable_name: /* Not an SP local variable */ sys_var *tmp=find_sys_var($1.str, $1.length); if (!tmp) - YYABORT; + MYSQL_YYABORT; $$.var= tmp; $$.base_name= null_lex_str; - /* - If this is time_zone variable we should open time zone - describing tables - */ - if (tmp == &sys_time_zone && - lex->add_time_zone_tables_to_query_tables(YYTHD)) - YYABORT; - else if (spc && tmp == &sys_autocommit) + if (spc && tmp == &sys_autocommit) { /* We don't allow setting AUTOCOMMIT from a stored function @@ -10215,8 +10382,8 @@ internal_variable_name: LEX *lex= Lex; if (check_reserved_words(&$1)) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } if (lex->sphead && lex->sphead->m_type == TYPE_ENUM_TRIGGER && (!my_strcasecmp(system_charset_info, $1.str, "NEW") || @@ -10225,18 +10392,18 @@ internal_variable_name: if ($1.str[0]=='O' || $1.str[0]=='o') { my_error(ER_TRG_CANT_CHANGE_ROW, MYF(0), "OLD", ""); - YYABORT; + MYSQL_YYABORT; } if (lex->trg_chistics.event == TRG_EVENT_DELETE) { my_error(ER_TRG_NO_SUCH_ROW_IN_TRG, MYF(0), "NEW", "on DELETE"); - YYABORT; + MYSQL_YYABORT; } if (lex->trg_chistics.action_time == TRG_ACTION_AFTER) { my_error(ER_TRG_CANT_CHANGE_ROW, MYF(0), "NEW", "after "); - YYABORT; + MYSQL_YYABORT; } /* This special combination will denote field of NEW row */ $$.var= trg_new_row_fake_var; @@ -10246,7 +10413,7 @@ internal_variable_name: { sys_var *tmp=find_sys_var($3.str, $3.length); if (!tmp) - YYABORT; + MYSQL_YYABORT; if (!tmp->is_struct()) my_error(ER_VARIABLE_IS_NOT_STRUCT, MYF(0), $3.str); $$.var= tmp; @@ -10257,7 +10424,7 @@ internal_variable_name: { sys_var *tmp=find_sys_var($3.str, $3.length); if (!tmp) - YYABORT; + MYSQL_YYABORT; if (!tmp->is_struct()) my_error(ER_VARIABLE_IS_NOT_STRUCT, MYF(0), $3.str); $$.var= tmp; @@ -10309,7 +10476,7 @@ lock: if (lex->sphead) { my_error(ER_SP_BADSTATEMENT, MYF(0), "LOCK"); - YYABORT; + MYSQL_YYABORT; } lex->sql_command= SQLCOM_LOCK_TABLES; } @@ -10329,7 +10496,7 @@ table_lock: table_ident opt_table_alias lock_option { if (!Select->add_table_to_list(YYTHD, $1, $2, 0, (thr_lock_type) $3)) - YYABORT; + MYSQL_YYABORT; } ; @@ -10348,7 +10515,7 @@ unlock: if (lex->sphead) { my_error(ER_SP_BADSTATEMENT, MYF(0), "UNLOCK"); - YYABORT; + MYSQL_YYABORT; } lex->sql_command= SQLCOM_UNLOCK_TABLES; } @@ -10368,11 +10535,11 @@ handler: if (lex->sphead) { my_error(ER_SP_BADSTATEMENT, MYF(0), "HANDLER"); - YYABORT; + MYSQL_YYABORT; } lex->sql_command = SQLCOM_HA_OPEN; if (!lex->current_select->add_table_to_list(lex->thd, $2, $4, 0)) - YYABORT; + MYSQL_YYABORT; } | HANDLER_SYM table_ident_nodb CLOSE_SYM { @@ -10380,11 +10547,11 @@ handler: if (lex->sphead) { my_error(ER_SP_BADSTATEMENT, MYF(0), "HANDLER"); - YYABORT; + MYSQL_YYABORT; } lex->sql_command = SQLCOM_HA_CLOSE; if (!lex->current_select->add_table_to_list(lex->thd, $2, 0, 0)) - YYABORT; + MYSQL_YYABORT; } | HANDLER_SYM table_ident_nodb READ_SYM { @@ -10392,7 +10559,7 @@ handler: if (lex->sphead) { my_error(ER_SP_BADSTATEMENT, MYF(0), "HANDLER"); - YYABORT; + MYSQL_YYABORT; } lex->expr_allows_subselect= FALSE; lex->sql_command = SQLCOM_HA_READ; @@ -10400,7 +10567,7 @@ handler: lex->current_select->select_limit= new Item_int((int32) 1); lex->current_select->offset_limit= 0; if (!lex->current_select->add_table_to_list(lex->thd, $2, 0, 0)) - YYABORT; + MYSQL_YYABORT; } handler_read_or_scan where_clause opt_limit_clause { @@ -10429,7 +10596,7 @@ handler_rkey_function: lex->ha_read_mode = RKEY; lex->ha_rkey_mode=$1; if (!(lex->insert_list = new List_item)) - YYABORT; + MYSQL_YYABORT; } '(' values ')' { } ; @@ -10461,8 +10628,8 @@ revoke_command: LEX *lex= Lex; if (lex->columns.elements) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } lex->sql_command= SQLCOM_REVOKE; lex->type= TYPE_ENUM_FUNCTION; @@ -10474,8 +10641,8 @@ revoke_command: LEX *lex= Lex; if (lex->columns.elements) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } lex->sql_command= SQLCOM_REVOKE; lex->type= TYPE_ENUM_PROCEDURE; @@ -10507,8 +10674,8 @@ grant_command: LEX *lex= Lex; if (lex->columns.elements) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } lex->sql_command= SQLCOM_GRANT; lex->type= TYPE_ENUM_FUNCTION; @@ -10520,8 +10687,8 @@ grant_command: LEX *lex= Lex; if (lex->columns.elements) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } lex->sql_command= SQLCOM_GRANT; lex->type= TYPE_ENUM_PROCEDURE; @@ -10600,7 +10767,7 @@ require_list_element: if (lex->x509_subject) { my_error(ER_DUP_ARGUMENT, MYF(0), "SUBJECT"); - YYABORT; + MYSQL_YYABORT; } lex->x509_subject=$2.str; } @@ -10610,7 +10777,7 @@ require_list_element: if (lex->x509_issuer) { my_error(ER_DUP_ARGUMENT, MYF(0), "ISSUER"); - YYABORT; + MYSQL_YYABORT; } lex->x509_issuer=$2.str; } @@ -10620,7 +10787,7 @@ require_list_element: if (lex->ssl_cipher) { my_error(ER_DUP_ARGUMENT, MYF(0), "CIPHER"); - YYABORT; + MYSQL_YYABORT; } lex->ssl_cipher=$2.str; } @@ -10633,14 +10800,14 @@ grant_ident: LEX *lex= thd->lex; uint dummy; if (thd->copy_db_to(&lex->current_select->db, &dummy)) - YYABORT; + MYSQL_YYABORT; if (lex->grant == GLOBAL_ACLS) lex->grant = DB_ACLS & ~GRANT_ACL; else if (lex->columns.elements) { my_message(ER_ILLEGAL_GRANT_FOR_TABLE, ER(ER_ILLEGAL_GRANT_FOR_TABLE), MYF(0)); - YYABORT; + MYSQL_YYABORT; } } | ident '.' '*' @@ -10653,7 +10820,7 @@ grant_ident: { my_message(ER_ILLEGAL_GRANT_FOR_TABLE, ER(ER_ILLEGAL_GRANT_FOR_TABLE), MYF(0)); - YYABORT; + MYSQL_YYABORT; } } | '*' '.' '*' @@ -10666,14 +10833,14 @@ grant_ident: { my_message(ER_ILLEGAL_GRANT_FOR_TABLE, ER(ER_ILLEGAL_GRANT_FOR_TABLE), MYF(0)); - YYABORT; + MYSQL_YYABORT; } } | table_ident { LEX *lex=Lex; if (!lex->current_select->add_table_to_list(lex->thd, $1,NULL,0)) - YYABORT; + MYSQL_YYABORT; if (lex->grant == GLOBAL_ACLS) lex->grant = TABLE_ACLS & ~GRANT_ACL; } @@ -10681,21 +10848,21 @@ grant_ident: user_list: - user { if (Lex->users_list.push_back($1)) YYABORT;} + user { if (Lex->users_list.push_back($1)) MYSQL_YYABORT;} | user_list ',' user { if (Lex->users_list.push_back($3)) - YYABORT; + MYSQL_YYABORT; } ; grant_list: - grant_user { if (Lex->users_list.push_back($1)) YYABORT;} + grant_user { if (Lex->users_list.push_back($1)) MYSQL_YYABORT;} | grant_list ',' grant_user { if (Lex->users_list.push_back($3)) - YYABORT; + MYSQL_YYABORT; } ; @@ -10914,21 +11081,21 @@ union_list: UNION_SYM union_option { LEX *lex=Lex; - if (lex->exchange) + if (lex->result) { /* Only the last SELECT can have INTO...... */ my_error(ER_WRONG_USAGE, MYF(0), "UNION", "INTO"); - YYABORT; + MYSQL_YYABORT; } if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } /* This counter shouldn't be incremented for UNION parts */ Lex->nest_level--; if (mysql_new_select(lex, 0)) - YYABORT; + MYSQL_YYABORT; mysql_init_select(lex); lex->current_select->linkage=UNION_TYPE; if ($2) /* UNION DISTINCT - remember position */ @@ -11021,8 +11188,8 @@ subselect_start: LEX *lex=Lex; if (!lex->expr_allows_subselect) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } /* we are making a "derived table" for the parenthesis @@ -11032,7 +11199,7 @@ subselect_start: SELECT * FROM ((SELECT ...) UNION ...) */ if (mysql_new_select(Lex, 1)) - YYABORT; + MYSQL_YYABORT; }; subselect_end: @@ -11043,6 +11210,12 @@ subselect_end: lex->current_select = lex->current_select->return_after_parsing(); lex->nest_level--; lex->current_select->n_child_sum_items += child->n_sum_items; + /* + A subselect can add fields to an outer select. Reserve space for + them. + */ + lex->current_select->select_n_where_fields+= + child->select_n_where_fields; }; /************************************************************************** @@ -11122,13 +11295,6 @@ view_algorithm: { Lex->create_view_algorithm= VIEW_ALGORITHM_TMPTABLE; } ; -view_algorithm_opt: - /* empty */ - { Lex->create_view_algorithm= VIEW_ALGORITHM_UNDEFINED; } - | view_algorithm - {} - ; - view_suid: /* empty */ { Lex->create_view_suid= VIEW_SUID_DEFAULT; } @@ -11146,7 +11312,7 @@ view_tail: lex->sql_command= SQLCOM_CREATE_VIEW; /* first table in list is target VIEW name */ if (!lex->select_lex.add_table_to_list(thd, $3, NULL, TL_OPTION_UPDATING)) - YYABORT; + MYSQL_YYABORT; } view_list_opt AS view_select view_check_option {} @@ -11194,18 +11360,16 @@ view_select_aux: { THD *thd= YYTHD; LEX *lex= thd->lex; - char *stmt_beg= (lex->sphead ? - (char *)lex->sphead->m_tmp_query : - thd->query); + const char *stmt_beg= (lex->sphead ? + lex->sphead->m_tmp_query : thd->query); lex->create_view_select_start= $2 - stmt_beg; } | '(' remember_name select_paren ')' union_opt { THD *thd= YYTHD; LEX *lex= thd->lex; - char *stmt_beg= (lex->sphead ? - (char *)lex->sphead->m_tmp_query : - thd->query); + const char *stmt_beg= (lex->sphead ? + lex->sphead->m_tmp_query : thd->query); lex->create_view_select_start= $2 - stmt_beg; } ; @@ -11237,11 +11401,11 @@ trigger_tail: if (lex->sphead) { my_error(ER_SP_NO_RECURSIVE_CREATE, MYF(0), "TRIGGER"); - YYABORT; + MYSQL_YYABORT; } if (!(sp= new sp_head())) - YYABORT; + MYSQL_YYABORT; sp->reset_thd_mem_root(YYTHD); sp->init(lex); sp->init_sp_name(YYTHD, $3); @@ -11279,7 +11443,7 @@ trigger_tail: sp->restore_thd_mem_root(YYTHD); if (sp->is_not_allowed_in_function("trigger")) - YYABORT; + MYSQL_YYABORT; /* We have to do it after parsing trigger body, because some of @@ -11290,7 +11454,7 @@ trigger_tail: (LEX_STRING*) 0, TL_OPTION_UPDATING, TL_IGNORE)) - YYABORT; + MYSQL_YYABORT; } ; @@ -11318,7 +11482,7 @@ sp_tail: if (lex->sphead) { my_error(ER_SP_NO_RECURSIVE_CREATE, MYF(0), "PROCEDURE"); - YYABORT; + MYSQL_YYABORT; } lex->stmt_definition_begin= $2; @@ -11407,23 +11571,23 @@ xa: XA_SYM begin_or_start xid opt_join_or_resume xid: text_string { - YYERROR_UNLESS($1->length() <= MAXGTRIDSIZE); + MYSQL_YYABORT_UNLESS($1->length() <= MAXGTRIDSIZE); if (!(Lex->xid=(XID *)YYTHD->alloc(sizeof(XID)))) - YYABORT; + MYSQL_YYABORT; Lex->xid->set(1L, $1->ptr(), $1->length(), 0, 0); } | text_string ',' text_string { - YYERROR_UNLESS($1->length() <= MAXGTRIDSIZE && $3->length() <= MAXBQUALSIZE); + MYSQL_YYABORT_UNLESS($1->length() <= MAXGTRIDSIZE && $3->length() <= MAXBQUALSIZE); if (!(Lex->xid=(XID *)YYTHD->alloc(sizeof(XID)))) - YYABORT; + MYSQL_YYABORT; Lex->xid->set(1L, $1->ptr(), $1->length(), $3->ptr(), $3->length()); } | text_string ',' text_string ',' ulong_num { - YYERROR_UNLESS($1->length() <= MAXGTRIDSIZE && $3->length() <= MAXBQUALSIZE); + MYSQL_YYABORT_UNLESS($1->length() <= MAXGTRIDSIZE && $3->length() <= MAXBQUALSIZE); if (!(Lex->xid=(XID *)YYTHD->alloc(sizeof(XID)))) - YYABORT; + MYSQL_YYABORT; Lex->xid->set($5, $1->ptr(), $1->length(), $3->ptr(), $3->length()); } ; diff --git a/sql/stacktrace.c b/sql/stacktrace.c index d8e9b7fd883..078f62c6b2b 100644 --- a/sql/stacktrace.c +++ b/sql/stacktrace.c @@ -53,21 +53,6 @@ void safe_print_str(const char* name, const char* val, int max_len) #define SIGRETURN_FRAME_OFFSET 23 #endif -static my_bool is_nptl; - -/* Check if we are using NPTL or LinuxThreads on Linux */ -void check_thread_lib(void) -{ - char buf[5]; - -#ifdef _CS_GNU_LIBPTHREAD_VERSION - confstr(_CS_GNU_LIBPTHREAD_VERSION, buf, sizeof(buf)); - is_nptl = !strncasecmp(buf, "NPTL", sizeof(buf)); -#else - is_nptl = 0; -#endif -} - #if defined(__alpha__) && defined(__GNUC__) /* The only way to backtrace without a symbol table on alpha @@ -173,8 +158,8 @@ terribly wrong...\n"); #endif /* __alpha__ */ /* We are 1 frame above signal frame with NPTL and 2 frames above with LT */ - sigreturn_frame_count = is_nptl ? 1 : 2; - + sigreturn_frame_count = thd_lib_detected == THD_LIB_LT ? 2 : 1; + while (fp < (uchar**) stack_bottom) { #if defined(__i386__) || defined(__x86_64__) diff --git a/sql/stacktrace.h b/sql/stacktrace.h index f5c92e54e1c..56b9877180a 100644 --- a/sql/stacktrace.h +++ b/sql/stacktrace.h @@ -27,11 +27,9 @@ extern char* heap_start; #define init_stacktrace() do { \ heap_start = (char*) &__bss_start; \ - check_thread_lib(); \ } while(0); void print_stacktrace(gptr stack_bottom, ulong thread_stack); void safe_print_str(const char* name, const char* val, int max_len); -void check_thread_lib(void); #endif /* (defined (__i386__) || (defined(__alpha__) && defined(__GNUC__))) */ #endif /* TARGET_OS_LINUX */ diff --git a/sql/strfunc.cc b/sql/strfunc.cc index 71b52a5145d..9ffc5fd127f 100644 --- a/sql/strfunc.cc +++ b/sql/strfunc.cc @@ -104,7 +104,8 @@ ulonglong find_set(TYPELIB *lib, const char *str, uint length, CHARSET_INFO *cs, > 0 position in TYPELIB->type_names +1 */ -uint find_type(TYPELIB *lib, const char *find, uint length, bool part_match) +uint find_type(const TYPELIB *lib, const char *find, uint length, + bool part_match) { uint found_count=0, found_pos=0; const char *end= find+length; @@ -144,7 +145,8 @@ uint find_type(TYPELIB *lib, const char *find, uint length, bool part_match) >0 Offset+1 in typelib for matched string */ -uint find_type2(TYPELIB *typelib, const char *x, uint length, CHARSET_INFO *cs) +uint find_type2(const TYPELIB *typelib, const char *x, uint length, + CHARSET_INFO *cs) { int pos; const char *j; diff --git a/sql/table.cc b/sql/table.cc index fca9711115c..71e510bf0ac 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -120,7 +120,6 @@ TABLE_SHARE *alloc_table_share(TABLE_LIST *table_list, char *key, share->normalized_path.length= path_length; share->version= refresh_version; - share->flush_version= flush_version; /* This constant is used to mark that no table map version has been @@ -246,6 +245,50 @@ void free_table_share(TABLE_SHARE *share) } +/** + Return TRUE if a table name matches one of the system table names. + Currently these are: + + help_category, help_keyword, help_relation, help_topic, + proc, + time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, + time_zone_transition_type + + This function trades accuracy for speed, so may return false + positives. Presumably mysql.* database is for internal purposes only + and should not contain user tables. +*/ + +inline bool is_system_table_name(const char *name, uint length) +{ + CHARSET_INFO *ci= system_charset_info; + + return ( + /* mysql.proc table */ + length == 4 && + my_tolower(ci, name[0]) == 'p' && + my_tolower(ci, name[1]) == 'r' && + my_tolower(ci, name[2]) == 'o' && + my_tolower(ci, name[3]) == 'c' || + + length > 4 && + ( + /* one of mysql.help* tables */ + my_tolower(ci, name[0]) == 'h' && + my_tolower(ci, name[1]) == 'e' && + my_tolower(ci, name[2]) == 'l' && + my_tolower(ci, name[3]) == 'p' || + + /* one of mysql.time_zone* tables */ + my_tolower(ci, name[0]) == 't' && + my_tolower(ci, name[1]) == 'i' && + my_tolower(ci, name[2]) == 'm' && + my_tolower(ci, name[3]) == 'e' + ) + ); +} + + /* Read table definition from a binary / text based .frm file @@ -366,11 +409,9 @@ int open_table_def(THD *thd, TABLE_SHARE *share, uint db_flags) allow to lock such tables for writing with any other tables (even with other system tables) and some privilege tables need this. */ - if (!(lower_case_table_names ? - my_strcasecmp(system_charset_info, share->table_name.str, "proc") : - strcmp(share->table_name.str, "proc"))) - share->system_table= 1; - else + share->system_table= is_system_table_name(share->table_name.str, + share->table_name.length); + if (!share->system_table) { share->log_table= check_if_log_table(share->db.length, share->db.str, share->table_name.length, @@ -641,8 +682,8 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, if ((share->partition_info_len= partition_info_len)) { if (!(share->partition_info= - (uchar*) memdup_root(&share->mem_root, next_chunk + 4, - partition_info_len + 1))) + memdup_root(&share->mem_root, next_chunk + 4, + partition_info_len + 1))) { my_free(buff, MYF(0)); goto err; @@ -1223,12 +1264,12 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, if ((int) (share->next_number_index= (uint) find_ref_key(share->key_info, share->keys, share->default_values, reg_field, - &share->next_number_key_offset)) < 0) + &share->next_number_key_offset, + &share->next_number_keypart)) < 0) { /* Wrong field definition */ - DBUG_ASSERT(0); - reg_field->unireg_check= Field::NONE; /* purecov: inspected */ - share->found_next_number_field= 0; + error= 4; + goto err; } else reg_field->flags |= AUTO_INCREMENT_FLAG; @@ -1339,7 +1380,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, if (!(outparam->alias= my_strdup(alias, MYF(MY_WME)))) goto err; outparam->quick_keys.init(); - outparam->used_keys.init(); + outparam->covering_keys.init(); outparam->keys_in_use_for_query.init(); /* Allocate handler */ @@ -1487,7 +1528,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, tmp= mysql_unpack_partition(thd, share->partition_info, share->partition_info_len, - (uchar*)share->part_state, + share->part_state, share->part_state_len, outparam, is_create_table, share->default_part_db_type); @@ -2245,6 +2286,30 @@ char *get_field(MEM_ROOT *mem, Field *field) return to; } +/* + DESCRIPTION + given a buffer with a key value, and a map of keyparts + that are present in this value, returns the length of the value +*/ +uint calculate_key_len(TABLE *table, uint key, const byte *buf, + key_part_map keypart_map) +{ + /* works only with key prefixes */ + DBUG_ASSERT(((keypart_map + 1) & keypart_map) == 0); + + KEY *key_info= table->s->key_info+key; + KEY_PART_INFO *key_part= key_info->key_part; + KEY_PART_INFO *end_key_part= key_part + key_info->key_parts; + uint length= 0; + + while (key_part < end_key_part && keypart_map) + { + length+= key_part->store_length; + keypart_map >>= 1; + key_part++; + } + return length; +} /* Check if database name is valid @@ -2891,7 +2956,9 @@ void st_table_list::hide_view_error(THD *thd) thd->net.last_errno == ER_SP_DOES_NOT_EXIST || thd->net.last_errno == ER_PROCACCESS_DENIED_ERROR || thd->net.last_errno == ER_COLUMNACCESS_DENIED_ERROR || - thd->net.last_errno == ER_TABLEACCESS_DENIED_ERROR) + thd->net.last_errno == ER_TABLEACCESS_DENIED_ERROR || + thd->net.last_errno == ER_TABLE_NOT_LOCKED || + thd->net.last_errno == ER_NO_SUCH_TABLE) { TABLE_LIST *top= top_table(); thd->clear_error(); @@ -3937,7 +4004,7 @@ void st_table::mark_auto_increment_column() */ bitmap_set_bit(read_set, found_next_number_field->field_index); bitmap_set_bit(write_set, found_next_number_field->field_index); - if (s->next_number_key_offset) + if (s->next_number_keypart) mark_columns_used_by_index_no_reset(s->next_number_index, read_set); file->column_bitmaps_signal(); } @@ -4086,7 +4153,7 @@ void st_table_list::reinit_before_use(THD *thd) */ table= 0; /* Reset is_schema_table_processed value(needed for I_S tables */ - is_schema_table_processed= FALSE; + schema_table_state= NOT_PROCESSED; TABLE_LIST *embedded; /* The table at the current level of nesting. */ TABLE_LIST *parent_embedding= this; /* The parent nested table reference. */ @@ -4119,6 +4186,175 @@ Item_subselect *st_table_list::containing_subselect() return (select_lex ? select_lex->master_unit()->item : 0); } +/* + Compiles the tagged hints list and fills up the bitmasks. + + SYNOPSIS + process_index_hints() + table the TABLE to operate on. + + DESCRIPTION + The parser collects the index hints for each table in a "tagged list" + (st_table_list::index_hints). Using the information in this tagged list + this function sets the members st_table::keys_in_use_for_query, + st_table::keys_in_use_for_group_by, st_table::keys_in_use_for_order_by, + st_table::force_index and st_table::covering_keys. + + Current implementation of the runtime does not allow mixing FORCE INDEX + and USE INDEX, so this is checked here. Then the FORCE INDEX list + (if non-empty) is appended to the USE INDEX list and a flag is set. + + Multiple hints of the same kind are processed so that each clause + is applied to what is computed in the previous clause. + For example: + USE INDEX (i1) USE INDEX (i2) + is equivalent to + USE INDEX (i1,i2) + and means "consider only i1 and i2". + + Similarly + USE INDEX () USE INDEX (i1) + is equivalent to + USE INDEX (i1) + and means "consider only the index i1" + + It is OK to have the same index several times, e.g. "USE INDEX (i1,i1)" is + not an error. + + Different kind of hints (USE/FORCE/IGNORE) are processed in the following + order: + 1. All indexes in USE (or FORCE) INDEX are added to the mask. + 2. All IGNORE INDEX + + e.g. "USE INDEX i1, IGNORE INDEX i1, USE INDEX i1" will not use i1 at all + as if we had "USE INDEX i1, USE INDEX i1, IGNORE INDEX i1". + + As an optimization if there is a covering index, and we have + IGNORE INDEX FOR GROUP/ORDER, and this index is used for the JOIN part, + then we have to ignore the IGNORE INDEX FROM GROUP/ORDER. + + RETURN VALUE + FALSE no errors found + TRUE found and reported an error. +*/ +bool st_table_list::process_index_hints(TABLE *table) +{ + /* initialize the result variables */ + table->keys_in_use_for_query= table->keys_in_use_for_group_by= + table->keys_in_use_for_order_by= table->s->keys_in_use; + + /* index hint list processing */ + if (index_hints) + { + key_map index_join[INDEX_HINT_FORCE + 1]; + key_map index_order[INDEX_HINT_FORCE + 1]; + key_map index_group[INDEX_HINT_FORCE + 1]; + index_hint *hint; + int type; + bool have_empty_use_join= FALSE, have_empty_use_order= FALSE, + have_empty_use_group= FALSE; + List_iterator <index_hint> iter(*index_hints); + + /* initialize temporary variables used to collect hints of each kind */ + for (type= INDEX_HINT_IGNORE; type <= INDEX_HINT_FORCE; type++) + { + index_join[type].clear_all(); + index_order[type].clear_all(); + index_group[type].clear_all(); + } + + /* iterate over the hints list */ + while ((hint= iter++)) + { + uint pos; + + /* process empty USE INDEX () */ + if (hint->type == INDEX_HINT_USE && !hint->key_name.str) + { + if (hint->clause & INDEX_HINT_MASK_JOIN) + { + index_join[hint->type].clear_all(); + have_empty_use_join= TRUE; + } + if (hint->clause & INDEX_HINT_MASK_ORDER) + { + index_order[hint->type].clear_all(); + have_empty_use_order= TRUE; + } + if (hint->clause & INDEX_HINT_MASK_GROUP) + { + index_group[hint->type].clear_all(); + have_empty_use_group= TRUE; + } + continue; + } + + /* + Check if an index with the given name exists and get his offset in + the keys bitmask for the table + */ + if (table->s->keynames.type_names == 0 || + (pos= find_type(&table->s->keynames, hint->key_name.str, + hint->key_name.length, 1)) <= 0) + { + my_error(ER_KEY_DOES_NOT_EXITS, MYF(0), hint->key_name.str, alias); + return 1; + } + + pos--; + + /* add to the appropriate clause mask */ + if (hint->clause & INDEX_HINT_MASK_JOIN) + index_join[hint->type].set_bit (pos); + if (hint->clause & INDEX_HINT_MASK_ORDER) + index_order[hint->type].set_bit (pos); + if (hint->clause & INDEX_HINT_MASK_GROUP) + index_group[hint->type].set_bit (pos); + } + + /* cannot mix USE INDEX and FORCE INDEX */ + if ((!index_join[INDEX_HINT_FORCE].is_clear_all() || + !index_order[INDEX_HINT_FORCE].is_clear_all() || + !index_group[INDEX_HINT_FORCE].is_clear_all()) && + (!index_join[INDEX_HINT_USE].is_clear_all() || have_empty_use_join || + !index_order[INDEX_HINT_USE].is_clear_all() || have_empty_use_order || + !index_group[INDEX_HINT_USE].is_clear_all() || have_empty_use_group)) + { + my_error(ER_WRONG_USAGE, MYF(0), index_hint_type_name[INDEX_HINT_USE], + index_hint_type_name[INDEX_HINT_FORCE]); + return 1; + } + + /* process FORCE INDEX as USE INDEX with a flag */ + if (!index_join[INDEX_HINT_FORCE].is_clear_all() || + !index_order[INDEX_HINT_FORCE].is_clear_all() || + !index_group[INDEX_HINT_FORCE].is_clear_all()) + { + table->force_index= TRUE; + index_join[INDEX_HINT_USE].merge(index_join[INDEX_HINT_FORCE]); + index_order[INDEX_HINT_USE].merge(index_order[INDEX_HINT_FORCE]); + index_group[INDEX_HINT_USE].merge(index_group[INDEX_HINT_FORCE]); + } + + /* apply USE INDEX */ + if (!index_join[INDEX_HINT_USE].is_clear_all() || have_empty_use_join) + table->keys_in_use_for_query.intersect(index_join[INDEX_HINT_USE]); + if (!index_order[INDEX_HINT_USE].is_clear_all() || have_empty_use_order) + table->keys_in_use_for_order_by.intersect (index_order[INDEX_HINT_USE]); + if (!index_group[INDEX_HINT_USE].is_clear_all() || have_empty_use_group) + table->keys_in_use_for_group_by.intersect (index_group[INDEX_HINT_USE]); + + /* apply IGNORE INDEX */ + table->keys_in_use_for_query.subtract (index_join[INDEX_HINT_IGNORE]); + table->keys_in_use_for_order_by.subtract (index_order[INDEX_HINT_IGNORE]); + table->keys_in_use_for_group_by.subtract (index_group[INDEX_HINT_IGNORE]); + } + + /* make sure covering_keys don't include indexes disabled with a hint */ + table->covering_keys.intersect(table->keys_in_use_for_query); + return 0; +} + /***************************************************************************** ** Instansiate templates *****************************************************************************/ diff --git a/sql/table.cc.rej b/sql/table.cc.rej deleted file mode 100644 index fd728ba9965..00000000000 --- a/sql/table.cc.rej +++ /dev/null @@ -1,17 +0,0 @@ -*************** -*** 2246,2252 **** - - bool check_db_name(char *name) - { -! char *start=name; - /* Used to catch empty names and names with end space */ - bool last_char_is_space= TRUE; - ---- 2257,2263 ---- - - bool check_db_name(char *name) - { -! char *start= name; - /* Used to catch empty names and names with end space */ - bool last_char_is_space= TRUE; - diff --git a/sql/table.h b/sql/table.h index 2923eb1db7b..e05b444251c 100644 --- a/sql/table.h +++ b/sql/table.h @@ -58,7 +58,7 @@ typedef struct st_grant_info enum tmp_table_type { - NO_TMP_TABLE, TMP_TABLE, TRANSACTIONAL_TMP_TABLE, + NO_TMP_TABLE, NON_TRANSACTIONAL_TMP_TABLE, TRANSACTIONAL_TMP_TABLE, INTERNAL_TMP_TABLE, SYSTEM_TMP_TABLE }; @@ -168,7 +168,7 @@ typedef struct st_table_share ha_rows min_rows, max_rows; /* create information */ ulong avg_row_length; /* create information */ ulong raid_chunksize; - ulong version, flush_version, mysql_version; + ulong version, mysql_version; ulong timestamp_offset; /* Set to offset+1 of record */ ulong reclength; /* Recordlength */ @@ -197,8 +197,9 @@ typedef struct st_table_share uint rowid_field_offset; /* Field_nr +1 to rowid field */ /* Index of auto-updated TIMESTAMP field in field array */ uint primary_key; - uint next_number_index; - uint next_number_key_offset; + uint next_number_index; /* autoincrement key number */ + uint next_number_key_offset; /* autoinc keypart offset in a key */ + uint next_number_keypart; /* autoinc keypart number in a key */ uint error, open_errno, errarg; /* error from open_table_def() */ uint column_bitmap_size; uchar frm_version; @@ -235,9 +236,9 @@ typedef struct st_table_share bool log_table; #ifdef WITH_PARTITION_STORAGE_ENGINE bool auto_partitioned; - const uchar *partition_info; + const char *partition_info; uint partition_info_len; - const uchar *part_state; + const char *part_state; uint part_state_len; handlerton *default_part_db_type; #endif @@ -299,6 +300,12 @@ typedef struct st_table_share /* Information for one open table */ +enum index_hint_type +{ + INDEX_HINT_IGNORE, + INDEX_HINT_USE, + INDEX_HINT_FORCE +}; struct st_table { st_table() {} /* Remove gcc warning */ @@ -318,8 +325,12 @@ struct st_table { byte *write_row_record; /* Used as optimisation in THD::write_row */ byte *insert_values; /* used by INSERT ... UPDATE */ - key_map quick_keys, used_keys; - + /* + Map of keys that can be used to retrieve all data from this table + needed by the query without reading the row. + */ + key_map covering_keys; + key_map quick_keys, merge_keys; /* A set of keys that can be used in the query that references this table. @@ -332,7 +343,10 @@ struct st_table { The set is implemented as a bitmap. */ key_map keys_in_use_for_query; - key_map merge_keys; + /* Map of keys that can be used to calculate GROUP BY without sorting */ + key_map keys_in_use_for_group_by; + /* Map of keys that can be used to calculate ORDER BY without sorting */ + key_map keys_in_use_for_order_by; KEY *key_info; /* data of keys in database */ Field *next_number_field; /* Set if next_number is activated */ @@ -408,6 +422,10 @@ struct st_table { /* If true, the current table row is considered to have all columns set to NULL, including columns declared as "not null" (see maybe_null). + + TODO: Each of these flags take up 8 bits. They can just as easily + be put into one single unsigned long and instead of taking up 18 + bytes, it would take up 4. */ my_bool null_row; my_bool force_index; @@ -415,11 +433,17 @@ struct st_table { my_bool key_read, no_keyread; my_bool locked_by_flush; my_bool locked_by_logger; + my_bool no_replicate; my_bool locked_by_name; my_bool fulltext_searched; my_bool no_cache; /* To signal that we should reset query_id for tables and cols */ my_bool clear_query_id; + /* + To indicate that a non-null value of the auto_increment field + was provided by the user or retrieved from the current record. + Used only in the MODE_NO_AUTO_VALUE_ON_ZERO mode. + */ my_bool auto_increment_field_not_null; my_bool insert_or_update; /* Can be used by the handler */ my_bool alias_name_used; /* true if table_name is alias */ @@ -471,6 +495,12 @@ struct st_table { }; +enum enum_schema_table_state +{ + NOT_PROCESSED= 0, + PROCESSED_BY_CREATE_SORT_INDEX, + PROCESSED_BY_JOIN_EXEC +}; typedef struct st_foreign_key_info { @@ -655,6 +685,7 @@ public: (TABLE_LIST::join_using_fields != NULL) */ +class index_hint; typedef struct st_table_list { st_table_list() {} /* Remove gcc warning */ @@ -711,7 +742,7 @@ typedef struct st_table_list */ struct st_table_list *next_name_resolution_table; /* Index names in a "... JOIN ... USE/IGNORE INDEX ..." clause. */ - List<String> *use_index, *ignore_index; + List<index_hint> *index_hints; TABLE *table; /* opened table */ uint table_id; /* table id (from binlog) for opened table */ /* @@ -730,7 +761,6 @@ typedef struct st_table_list st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */ ST_SCHEMA_TABLE *schema_table; /* Information_schema table */ st_select_lex *schema_select_lex; - bool is_schema_table_processed; /* True when the view field translation table is used to convert schema table fields for backwards compatibility with SHOW command. @@ -840,12 +870,13 @@ typedef struct st_table_list */ bool prelocking_placeholder; + enum enum_schema_table_state schema_table_state; void calc_md5(char *buffer); void set_underlying_merge(); int view_check_option(THD *thd, bool ignore_failure); bool setup_underlying(THD *thd); void cleanup_items(); - bool placeholder() {return derived || view; } + bool placeholder() {return derived || view || schema_table || !table; } void print(THD *thd, String *str); bool check_single_table(st_table_list **table, table_map map, st_table_list *view); @@ -885,6 +916,13 @@ typedef struct st_table_list void reinit_before_use(THD *thd); Item_subselect *containing_subselect(); + /* + Compiles the tagged hints list and fills up st_table::keys_in_use_for_query, + st_table::keys_in_use_for_group_by, st_table::keys_in_use_for_order_by, + st_table::force_index and st_table::covering_keys. + */ + bool process_index_hints(TABLE *table); + private: bool prep_check_option(THD *thd, uint8 check_opt_type); bool prep_where(THD *thd, Item **conds, bool no_where_clause); diff --git a/sql/tztime.cc b/sql/tztime.cc index 2cdc863565a..65a1a59a5d0 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -546,8 +546,8 @@ sec_to_TIME(TIME * tmp, my_time_t t, long offset) int yleap; const uint *ip; - days= t / SECS_PER_DAY; - rem= t % SECS_PER_DAY; + days= (long) (t / SECS_PER_DAY); + rem= (long) (t % SECS_PER_DAY); /* We do this as separate step after dividing t, because this @@ -780,6 +780,8 @@ gmt_sec_to_TIME(TIME *tmp, my_time_t sec_in_utc, const TIME_ZONE_INFO *sp) static my_time_t sec_since_epoch(int year, int mon, int mday, int hour, int min ,int sec) { + /* Guard against my_time_t overflow(on system with 32 bit my_time_t) */ + DBUG_ASSERT(!(year == TIMESTAMP_MAX_YEAR && mon == 1 && mday > 17)); #ifndef WE_WANT_TO_HANDLE_UNORMALIZED_DATES /* It turns out that only whenever month is normalized or unnormalized @@ -960,12 +962,12 @@ TIME_to_gmt_sec(const TIME *t, const TIME_ZONE_INFO *sp, */ if (shift) { - if (local_t > (my_time_t) (TIMESTAMP_MAX_VALUE - shift*86400L + + if (local_t > (my_time_t) (TIMESTAMP_MAX_VALUE - shift * SECS_PER_DAY + sp->revtis[i].rt_offset - saved_seconds)) { DBUG_RETURN(0); /* my_time_t overflow */ } - local_t+= shift*86400L; + local_t+= shift * SECS_PER_DAY; } if (sp->revtis[i].rt_type) @@ -1353,6 +1355,7 @@ my_time_t Time_zone_offset::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const { my_time_t local_t; + int shift= 0; /* Check timestamp range.we have to do this as calling function relies on @@ -1361,10 +1364,24 @@ Time_zone_offset::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const if (!validate_timestamp_range(t)) return 0; - local_t= sec_since_epoch(t->year, t->month, t->day, + /* + Do a temporary shift of the boundary dates to avoid + overflow of my_time_t if the time value is near it's + maximum range + */ + if ((t->year == TIMESTAMP_MAX_YEAR) && (t->month == 1) && t->day > 4) + shift= 2; + + local_t= sec_since_epoch(t->year, t->month, (t->day - shift), t->hour, t->minute, t->second) - offset; + if (shift) + { + /* Add back the shifted time */ + local_t+= shift * SECS_PER_DAY; + } + if (local_t >= TIMESTAMP_MIN_VALUE && local_t <= TIMESTAMP_MAX_VALUE) return local_t; @@ -1488,26 +1505,20 @@ extern "C" byte* my_offset_tzs_get_key(Time_zone_offset *entry, uint *length, /* - Prepare table list with time zone related tables from preallocated array - and add to global table list. + Prepare table list with time zone related tables from preallocated array. SYNOPSIS tz_init_table_list() tz_tabs - pointer to preallocated array of MY_TZ_TABLES_COUNT TABLE_LIST objects - global_next_ptr - pointer to variable which points to global_next member - of last element of global table list (or list root - then list is empty) (in/out). DESCRIPTION This function prepares list of TABLE_LIST objects which can be used - for opening of time zone tables from preallocated array. It also links - this list to the end of global table list (it will read and update - accordingly variable pointed by global_next_ptr for this). + for opening of time zone tables from preallocated array. */ static void -tz_init_table_list(TABLE_LIST *tz_tabs, TABLE_LIST ***global_next_ptr) +tz_init_table_list(TABLE_LIST *tz_tabs) { bzero(tz_tabs, sizeof(TABLE_LIST) * MY_TZ_TABLES_COUNT); @@ -1524,64 +1535,6 @@ tz_init_table_list(TABLE_LIST *tz_tabs, TABLE_LIST ***global_next_ptr) if (i != 0) tz_tabs[i].prev_global= &tz_tabs[i-1].next_global; } - - /* Link into global list */ - tz_tabs[0].prev_global= *global_next_ptr; - **global_next_ptr= tz_tabs; - /* Update last-global-pointer to point to pointer in last table */ - *global_next_ptr= &tz_tabs[MY_TZ_TABLES_COUNT-1].next_global; -} - - -/* - Fake table list object, pointer to which is returned by - my_tz_get_tables_list() as indication of error. -*/ -TABLE_LIST fake_time_zone_tables_list; - -/* - Create table list with time zone related tables and add it to the end - of global table list. - - SYNOPSIS - my_tz_get_table_list() - thd - current thread object - global_next_ptr - pointer to variable which points to global_next member - of last element of global table list (or list root - then list is empty) (in/out). - - DESCRIPTION - This function creates list of TABLE_LIST objects allocated in thd's - memroot, which can be used for opening of time zone tables. It will also - link this list to the end of global table list (it will read and update - accordingly variable pointed by global_next_ptr for this). - - NOTE - my_tz_check_n_skip_implicit_tables() function depends on fact that - elements of list created are allocated as TABLE_LIST[MY_TZ_TABLES_COUNT] - array. - - RETURN VALUES - Returns pointer to first TABLE_LIST object, (could be 0 if time zone - tables don't exist) and &fake_time_zone_tables_list in case of error. -*/ - -TABLE_LIST * -my_tz_get_table_list(THD *thd, TABLE_LIST ***global_next_ptr) -{ - TABLE_LIST *tz_tabs; - DBUG_ENTER("my_tz_get_table_list"); - - if (!time_zone_tables_exist) - DBUG_RETURN(0); - - if (!(tz_tabs= (TABLE_LIST *)thd->alloc(sizeof(TABLE_LIST) * - MY_TZ_TABLES_COUNT))) - DBUG_RETURN(&fake_time_zone_tables_list); - - tz_init_table_list(tz_tabs, global_next_ptr); - - DBUG_RETURN(tz_tabs); } @@ -1614,8 +1567,8 @@ my_bool my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) { THD *thd; - TABLE_LIST *tables= 0; - TABLE_LIST tables_buff[1+MY_TZ_TABLES_COUNT], **last_global_next_ptr; + TABLE_LIST tz_tables[1+MY_TZ_TABLES_COUNT]; + Open_tables_state open_tables_state_backup; TABLE *table; Tz_names_entry *tmp_tzname; my_bool return_val= 1; @@ -1677,19 +1630,23 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) */ thd->set_db(db, sizeof(db)-1); - bzero((char*) &tables_buff, sizeof(TABLE_LIST)); - tables_buff[0].alias= tables_buff[0].table_name= + bzero((char*) &tz_tables[0], sizeof(TABLE_LIST)); + tz_tables[0].alias= tz_tables[0].table_name= (char*)"time_zone_leap_second"; - tables_buff[0].lock_type= TL_READ; - tables_buff[0].db= db; + tz_tables[0].table_name_length= 21; + tz_tables[0].db= db; + tz_tables[0].db_length= sizeof(db)-1; + tz_tables[0].lock_type= TL_READ; + + tz_init_table_list(tz_tables+1); + tz_tables[0].next_global= tz_tables[0].next_local= &tz_tables[1]; + tz_tables[1].prev_global= &tz_tables[0].next_global; + /* - Fill TABLE_LIST for the rest of the time zone describing tables - and link it to first one. + We need to open only mysql.time_zone_leap_second, but we try to + open all time zone tables to see if they exist. */ - last_global_next_ptr= &(tables_buff[0].next_global); - tz_init_table_list(tables_buff + 1, &last_global_next_ptr); - - if (simple_open_n_lock_tables(thd, tables_buff)) + if (open_system_tables_for_read(thd, tz_tables, &open_tables_state_backup)) { sql_print_warning("Can't open and lock time zone table: %s " "trying to live without them", thd->net.last_error); @@ -1697,7 +1654,6 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) return_val= time_zone_tables_exist= 0; goto end_with_setting_default_tz; } - tables= tables_buff + 1; /* Now we are going to load leap seconds descriptions that are shared @@ -1713,7 +1669,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) goto end_with_close; } - table= tables_buff[0].table; + table= tz_tables[0].table; /* It is OK to ignore ha_index_init()/ha_index_end() return values since mysql.time_zone* tables are MyISAM and these operations always succeed @@ -1770,7 +1726,12 @@ end_with_setting_default_tz: if (default_tzname) { String tmp_tzname2(default_tzname, &my_charset_latin1); - if (!(global_system_variables.time_zone= my_tz_find(&tmp_tzname2, tables))) + /* + Time zone tables may be open here, and my_tz_find() may open + most of them once more, but this is OK for system tables open + for READ. + */ + if (!(global_system_variables.time_zone= my_tz_find(thd, &tmp_tzname2))) { sql_print_error("Fatal error: Illegal or unknown default time zone '%s'", default_tzname); @@ -1779,8 +1740,11 @@ end_with_setting_default_tz: } end_with_close: - thd->version--; /* Force close to free memory */ - close_thread_tables(thd); + if (time_zone_tables_exist) + { + thd->version--; /* Force close to free memory */ + close_system_tables(thd, &open_tables_state_backup); + } end_with_cleanup: @@ -1889,7 +1853,6 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) */ table= tz_tables->table; tz_tables= tz_tables->next_local; - table->use_all_columns(); table->field[0]->store(tz_name->ptr(), tz_name->length(), &my_charset_latin1); /* @@ -1900,9 +1863,9 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) (void)table->file->ha_index_init(0, 1); if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr, - 0, HA_READ_KEY_EXACT)) + HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { -#ifdef EXTRA_DEBUG +#ifdef EXTRA_DEBUG /* Most probably user has mistyped time zone name, so no need to bark here unless we need it for debugging. @@ -1922,13 +1885,12 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) using the only index in this table). */ table= tz_tables->table; - table->use_all_columns(); tz_tables= tz_tables->next_local; table->field[0]->store((longlong) tzid, TRUE); (void)table->file->ha_index_init(0, 1); if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr, - 0, HA_READ_KEY_EXACT)) + HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { sql_print_error("Can't find description of time zone '%u'", tzid); goto end; @@ -1950,14 +1912,12 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) Right - using special index. */ table= tz_tables->table; - table->use_all_columns(); tz_tables= tz_tables->next_local; table->field[0]->store((longlong) tzid, TRUE); (void)table->file->ha_index_init(0, 1); - // FIXME Is there any better approach than explicitly specifying 4 ??? res= table->file->index_read(table->record[0], (byte*)table->field[0]->ptr, - 4, HA_READ_KEY_EXACT); + (key_part_map)1, HA_READ_KEY_EXACT); while (!res) { ttid= (uint)table->field[1]->val_int(); @@ -2024,13 +1984,11 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) in ascending order by index scan also satisfies us. */ table= tz_tables->table; - table->use_all_columns(); table->field[0]->store((longlong) tzid, TRUE); (void)table->file->ha_index_init(0, 1); - // FIXME Is there any better approach than explicitly specifying 4 ??? res= table->file->index_read(table->record[0], (byte*)table->field[0]->ptr, - 4, HA_READ_KEY_EXACT); + (key_part_map)1, HA_READ_KEY_EXACT); while (!res) { ttime= (my_time_t)table->field[1]->val_int(); @@ -2234,8 +2192,8 @@ str_to_offset(const char *str, uint length, long *offset) SYNOPSIS my_tz_find() + thd - pointer to thread THD structure name - time zone specification - tz_tables - list of opened'n'locked time zone describing tables DESCRIPTION This function checks if name is one of time zones described in db, @@ -2257,11 +2215,10 @@ str_to_offset(const char *str, uint length, long *offset) values as parameter without additional external check and this property is used by @@time_zone variable handling code). - It will perform lookup in system tables (mysql.time_zone*) if needed - using tz_tables as list of already opened tables (for info about this - list look at tz_load_from_open_tables() description). It won't perform - such lookup if no time zone describing tables were found during server - start up. + It will perform lookup in system tables (mysql.time_zone*), + opening and locking them, and closing afterwards. It won't perform + such lookup if no time zone describing tables were found during + server start up. RETURN VALUE Pointer to corresponding Time_zone object. 0 - in case of bad time zone @@ -2269,7 +2226,7 @@ str_to_offset(const char *str, uint length, long *offset) */ Time_zone * -my_tz_find(const String * name, TABLE_LIST *tz_tables) +my_tz_find(THD *thd, const String *name) { Tz_names_entry *tmp_tzname; Time_zone *result_tz= 0; @@ -2277,8 +2234,6 @@ my_tz_find(const String * name, TABLE_LIST *tz_tables) DBUG_ENTER("my_tz_find"); DBUG_PRINT("enter", ("time zone name='%s'", name ? ((String *)name)->c_ptr_safe() : "NULL")); - DBUG_ASSERT(!time_zone_tables_exist || tz_tables || - current_thd->slave_thread); if (!name) DBUG_RETURN(0); @@ -2310,8 +2265,19 @@ my_tz_find(const String * name, TABLE_LIST *tz_tables) (const byte *)name->ptr(), name->length()))) result_tz= tmp_tzname->tz; - else if (time_zone_tables_exist && tz_tables) - result_tz= tz_load_from_open_tables(name, tz_tables); + else if (time_zone_tables_exist) + { + TABLE_LIST tz_tables[MY_TZ_TABLES_COUNT]; + Open_tables_state open_tables_state_backup; + + tz_init_table_list(tz_tables); + if (!open_system_tables_for_read(thd, tz_tables, + &open_tables_state_backup)) + { + result_tz= tz_load_from_open_tables(name, tz_tables); + close_system_tables(thd, &open_tables_state_backup); + } + } } VOID(pthread_mutex_unlock(&tz_LOCK)); @@ -2320,58 +2286,6 @@ my_tz_find(const String * name, TABLE_LIST *tz_tables) } -/* - A more standalone version of my_tz_find(): will open tz tables if needed. - This is so far only used by replication, where time zone setting does not - happen in the usual query context. - - SYNOPSIS - my_tz_find_with_opening_tz_tables() - thd - pointer to thread's THD structure - name - time zone specification - - DESCRIPTION - This function tries to find a time zone which matches the named passed in - argument. If it fails, it will open time zone tables and re-try the - search. - This function is needed for the slave SQL thread, which does not do the - addition of time zone tables which is usually done during query parsing - (as time zone setting by slave does not happen in mysql_parse() but - before). So it needs to open tz tables by itself if needed. - See notes of my_tz_find() as they also apply here. - - RETURN VALUE - Pointer to corresponding Time_zone object. 0 - in case of bad time zone - specification or other error. -*/ - -Time_zone *my_tz_find_with_opening_tz_tables(THD *thd, const String *name) -{ - Time_zone *tz; - DBUG_ENTER("my_tz_find_with_opening_tables"); - DBUG_ASSERT(thd); - DBUG_ASSERT(thd->slave_thread); // intended for use with slave thread only - - if (!(tz= my_tz_find(name, 0)) && time_zone_tables_exist) - { - /* - Probably we have not loaded this time zone yet so let us look it up in - our time zone tables. Note that if we don't have tz tables on this - slave, we don't even try. - */ - TABLE_LIST tables[MY_TZ_TABLES_COUNT]; - TABLE_LIST *dummy; - TABLE_LIST **dummyp= &dummy; - tz_init_table_list(tables, &dummyp); - if (simple_open_n_lock_tables(thd, tables)) - DBUG_RETURN(0); - tz= my_tz_find(name, tables); - /* We need to close tables _now_ to not pollute coming query */ - close_thread_tables(thd); - } - DBUG_RETURN(tz); -} - #endif /* !defined(TESTTIME) && !defined(TZINFO2SQL) */ diff --git a/sql/tztime.h b/sql/tztime.h index 248a638074b..b6af4b37468 100644 --- a/sql/tztime.h +++ b/sql/tztime.h @@ -59,15 +59,11 @@ public: extern Time_zone * my_tz_UTC; extern Time_zone * my_tz_SYSTEM; -extern TABLE_LIST * my_tz_get_table_list(THD *thd, TABLE_LIST ***global_next_ptr); -extern Time_zone * my_tz_find(const String *name, TABLE_LIST *tz_tables); -extern Time_zone * my_tz_find_with_opening_tz_tables(THD *thd, const String *name); +extern Time_zone * my_tz_find(THD *thd, const String *name); extern my_bool my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap); extern void my_tz_free(); extern my_time_t sec_since_epoch_TIME(TIME *t); -extern TABLE_LIST fake_time_zone_tables_list; - /* Number of elements in table list produced by my_tz_get_table_list() (this table list contains tables which are needed for dynamical loading @@ -77,34 +73,5 @@ extern TABLE_LIST fake_time_zone_tables_list; static const int MY_TZ_TABLES_COUNT= 4; -/* - Check if we have pointer to the begining of list of implicitly used time - zone tables, set SELECT_ACL for them and fast-forward to its end. - - SYNOPSIS - my_tz_check_n_skip_implicit_tables() - table - (in/out) pointer to element of table list to check - tz_tables - list of implicitly used time zone tables received - from my_tz_get_table_list() function. - - NOTE - This function relies on my_tz_get_table_list() implementation. - - RETURN VALUE - TRUE - if table points to the beggining of tz_tables list - FALSE - otherwise. -*/ -inline bool my_tz_check_n_skip_implicit_tables(TABLE_LIST **table, - TABLE_LIST *tz_tables) -{ - if (*table == tz_tables) - { - for (int i= 0; i < MY_TZ_TABLES_COUNT; i++) - (*table)[i].grant.privilege= SELECT_ACL; - (*table)+= MY_TZ_TABLES_COUNT - 1; - return TRUE; - } - return FALSE; -} #endif /* !defined(TESTTIME) && !defined(TZINFO2SQL) */ diff --git a/sql/udf_example.c b/sql/udf_example.c index 2bb4fe92d2f..d37c6505ced 100644 --- a/sql/udf_example.c +++ b/sql/udf_example.c @@ -684,7 +684,7 @@ longlong sequence(UDF_INIT *initid __attribute__((unused)), UDF_ARGS *args, ****************************************************************************/ #ifdef __WIN__ -#include <winsock.h> +#include <winsock2.h> #else #include <sys/socket.h> #include <netinet/in.h> diff --git a/sql/udf_example.def b/sql/udf_example.def index ee107d58e51..7a87147d7b6 100644 --- a/sql/udf_example.def +++ b/sql/udf_example.def @@ -1,5 +1,4 @@ LIBRARY udf_example -DESCRIPTION 'MySQL Sample for UDF' VERSION 1.0 EXPORTS lookup diff --git a/sql/unireg.cc b/sql/unireg.cc index 5faacb02d5f..d90420313a6 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -913,7 +913,10 @@ static bool make_empty_rec(THD *thd, File file,enum legacy_db_type table_type, field->interval, field->field_name); if (!regfield) + { + error= 1; goto err; // End of memory + } /* save_in_field() will access regfield->table->in_use */ regfield->init(&table); |