diff options
author | unknown <msvensson@pilot.blaudden> | 2007-04-24 11:11:45 +0200 |
---|---|---|
committer | unknown <msvensson@pilot.blaudden> | 2007-04-24 11:11:45 +0200 |
commit | 67a9f239f064b3fa1be5be3e514016da66c06bf5 (patch) | |
tree | af2058646f9c18fec1543128f882e7026e7b2fc6 /sql | |
parent | ea397372a36f21d1617844e0b276ec527aa31fdf (diff) | |
parent | 0d5a18b53bd30af029b27e39579b52f05b8f7115 (diff) | |
download | mariadb-git-67a9f239f064b3fa1be5be3e514016da66c06bf5.tar.gz |
Merge pilot.blaudden:/home/msvensson/mysql/my51-m-mysql_upgrade
into pilot.blaudden:/home/msvensson/mysql/mysql-5.1-maint
scripts/mysql_system_tables_fix.sql:
Auto merged
sql/sql_plugin.cc:
Auto merged
sql/sql_udf.cc:
Auto merged
Diffstat (limited to 'sql')
140 files changed, 12977 insertions, 8306 deletions
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 002aabb91b0..fe919d6442d 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -47,7 +47,9 @@ ADD_EXECUTABLE(mysqld ../sql-common/client.c derror.cc des_key_file.cc hostname.cc init.cc item.cc item_buff.cc item_cmpfunc.cc item_create.cc item_func.cc item_geofunc.cc item_row.cc item_strfunc.cc item_subselect.cc item_sum.cc item_timefunc.cc - key.cc log.cc lock.cc log_event.cc message.rc + key.cc log.cc lock.cc message.rc + log_event.cc rpl_record.cc + log_event_old.cc rpl_record_old.cc message.h mf_iocache.cc my_decimal.cc ../sql-common/my_time.c mysqld.cc net_serv.cc nt_servc.cc nt_servc.h opt_range.cc opt_range.h opt_sum.cc @@ -79,7 +81,7 @@ ADD_EXECUTABLE(mysqld ../sql-common/client.c derror.cc des_key_file.cc ${PROJECT_SOURCE_DIR}/sql/sql_builtin.cc ${PROJECT_SOURCE_DIR}/sql/lex_hash.h) TARGET_LINK_LIBRARIES(mysqld heap myisam myisammrg mysys yassl zlib dbug yassl - taocrypt strings vio regex wsock32) + taocrypt strings vio regex wsock32 ws2_32) IF(WITH_ARCHIVE_STORAGE_ENGINE) TARGET_LINK_LIBRARIES(mysqld archive) ENDIF(WITH_ARCHIVE_STORAGE_ENGINE) diff --git a/sql/Makefile.am b/sql/Makefile.am index ac14ed3e883..a04386611f8 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -21,15 +21,13 @@ MYSQLBASEdir= $(prefix) MYSQLLIBdir= $(pkglibdir) INCLUDES = @ZLIB_INCLUDES@ \ -I$(top_builddir)/include -I$(top_srcdir)/include \ - -I$(top_srcdir)/regex -I$(srcdir) \ - $(openssl_includes) + -I$(top_srcdir)/regex -I$(srcdir) $(openssl_includes) WRAPLIBS= @WRAPLIBS@ SUBDIRS = share libexec_PROGRAMS = mysqld EXTRA_PROGRAMS = gen_lex_hash bin_PROGRAMS = mysql_tzinfo_to_sql -gen_lex_hash_LDFLAGS = @NOINST_LDFLAGS@ -SUPPORTING_LIBS = $(top_builddir)/vio/libvio.a \ +SUPPORTING_LIBS = $(top_builddir)/vio/libvio.a \ $(top_builddir)/mysys/libmysys.a \ $(top_builddir)/dbug/libdbug.a \ $(top_builddir)/regex/libregex.a \ @@ -52,13 +50,14 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ sql_error.h field.h handler.h mysqld_suffix.h \ ha_partition.h \ ha_ndbcluster.h ha_ndbcluster_binlog.h \ - ha_ndbcluster_tables.h \ + ha_ndbcluster_tables.h rpl_constants.h \ opt_range.h protocol.h rpl_tblmap.h rpl_utility.h \ log.h sql_show.h rpl_rli.h rpl_mi.h \ sql_select.h structs.h table.h sql_udf.h hash_filo.h \ lex.h lex_symbol.h sql_acl.h sql_crypt.h \ - log_event.h sql_repl.h slave.h rpl_filter.h \ - rpl_injector.h \ + sql_repl.h slave.h rpl_filter.h rpl_injector.h \ + log_event.h rpl_record.h \ + log_event_old.h rpl_record_old.h \ stacktrace.h sql_sort.h sql_cache.h set_var.h \ spatial.h gstream.h client_settings.h tzfile.h \ tztime.h my_decimal.h\ @@ -70,6 +69,7 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ event_data_objects.h event_scheduler.h \ sql_partition.h partition_info.h partition_element.h \ contributors.h sql_servers.h + mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \ item.cc item_sum.cc item_buff.cc item_func.cc \ item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \ @@ -86,8 +86,10 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \ sql_prepare.cc sql_error.cc sql_locale.cc \ sql_update.cc sql_delete.cc uniques.cc sql_do.cc \ procedure.cc sql_test.cc \ - log.cc log_event.cc init.cc derror.cc sql_acl.cc \ + log.cc init.cc derror.cc sql_acl.cc \ unireg.cc des_key_file.cc \ + log_event.cc rpl_record.cc \ + log_event_old.cc rpl_record_old.cc \ discover.cc time.cc opt_range.cc opt_sum.cc \ records.cc filesort.cc handler.cc \ ha_partition.cc \ @@ -111,11 +113,11 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \ sql_builtin.cc sql_tablespace.cc partition_info.cc \ sql_servers.cc - gen_lex_hash_SOURCES = gen_lex_hash.cc -gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS) -mysql_tzinfo_to_sql_SOURCES = mysql_tzinfo_to_sql.cc -mysql_tzinfo_to_sql_LDADD = @MYSQLD_EXTRA_LDFLAGS@ $(LDADD) $(CXXLDFLAGS) +gen_lex_hash_LDFLAGS = @NOINST_LDFLAGS@ + +mysql_tzinfo_to_sql_SOURCES = tztime.cc +mysql_tzinfo_to_sql_CXXFLAGS= -DTZINFO2SQL DEFS = -DMYSQL_SERVER \ -DDEFAULT_MYSQL_HOME="\"$(MYSQLBASEdir)\"" \ @@ -125,20 +127,16 @@ DEFS = -DMYSQL_SERVER \ @DEFS@ BUILT_MAINT_SRC = sql_yacc.cc sql_yacc.h -BUILT_SOURCES = $(BUILT_MAINT_SRC) lex_hash.h +BUILT_SOURCES = $(BUILT_MAINT_SRC) lex_hash.h link_sources EXTRA_DIST = udf_example.c udf_example.def $(BUILT_MAINT_SRC) \ nt_servc.cc nt_servc.h message.mc CMakeLists.txt \ udf_example.c udf_example.def -CLEANFILES = lex_hash.h sql_yacc.output +CLEANFILES = lex_hash.h sql_yacc.output link_sources DISTCLEANFILES = $(EXTRA_PROGRAMS) MAINTAINERCLEANFILES = $(BUILT_MAINT_SRC) AM_YFLAGS = -d --verbose -mysql_tzinfo_to_sql.cc: - rm -f mysql_tzinfo_to_sql.cc - @LN_CP_F@ $(srcdir)/tztime.cc mysql_tzinfo_to_sql.cc - -link_sources: mysql_tzinfo_to_sql.cc +link_sources: rm -f mini_client_errors.c @LN_CP_F@ $(top_srcdir)/libmysql/errmsg.c mini_client_errors.c rm -f pack.c @@ -150,9 +148,6 @@ link_sources: mysql_tzinfo_to_sql.cc rm -f my_user.c @LN_CP_F@ $(top_srcdir)/sql-common/my_user.c my_user.c -mysql_tzinfo_to_sql.o: $(mysql_tzinfo_to_sql_SOURCES) - $(CXXCOMPILE) -c $(INCLUDES) -DTZINFO2SQL $< - # This generates lex_hash.h # NOTE Built sources should depend on their sources not the tool # this avoid the rebuild of the built files in a source dist diff --git a/sql/authors.h b/sql/authors.h index 48b807c7884..dfe3b143e2f 100644 --- a/sql/authors.h +++ b/sql/authors.h @@ -66,6 +66,7 @@ struct show_table_authors_st show_table_authors[]= { "Parser, port to OS/2, storage engines and some random stuff" }, { "Yuri Dario", "", "OS/2 port" }, { "Andrei Elkin", "Espoo, Finland", "Replication" }, + { "Patrick Galbraith", "Sharon, NH", "Federated Engine, mysqlslap" }, { "Sergei Golubchik", "Kerpen, Germany", "Full-text search, precision math" }, { "Lenz Grimmer", "Hamburg, Germany", diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc index 9999424ad60..f5847e603e8 100644 --- a/sql/event_data_objects.cc +++ b/sql/event_data_objects.cc @@ -20,19 +20,9 @@ #include "event_db_repository.h" #include "sp_head.h" -/* That's a provisional solution */ -extern Event_db_repository events_event_db_repository; #define EVEX_MAX_INTERVAL_VALUE 1000000000L -static bool -event_change_security_context(THD *thd, LEX_STRING user, LEX_STRING host, - LEX_STRING db, Security_context *backup); - -static void -event_restore_security_context(THD *thd, Security_context *backup); - - /* Initiliazes dbname and name of an Event_queue_element_for_exec object @@ -81,7 +71,7 @@ Event_queue_element_for_exec::~Event_queue_element_for_exec() RETURN VALUE Address or NULL in case of error - + NOTE Created on THD's mem_root */ @@ -101,7 +91,9 @@ Event_parse_data::new_instance(THD *thd) */ Event_parse_data::Event_parse_data() - :on_completion(ON_COMPLETION_DROP), status(ENABLED), + :on_completion(Event_basic::ON_COMPLETION_DROP), + status(Event_basic::ENABLED), + do_not_create(FALSE), item_starts(NULL), item_ends(NULL), item_execute_at(NULL), starts_null(TRUE), ends_null(TRUE), execute_at_null(TRUE), item_expression(NULL), expression(0) @@ -109,9 +101,7 @@ Event_parse_data::Event_parse_data() DBUG_ENTER("Event_parse_data::Event_parse_data"); /* Actually in the parser STARTS is always set */ - set_zero_time(&starts, MYSQL_TIMESTAMP_DATETIME); - set_zero_time(&ends, MYSQL_TIMESTAMP_DATETIME); - set_zero_time(&execute_at, MYSQL_TIMESTAMP_DATETIME); + starts= ends= execute_at= 0; body.str= comment.str= NULL; body.length= comment.length= 0; @@ -170,13 +160,13 @@ Event_parse_data::init_body(THD *thd) (long) body_begin, (long) thd->lex->ptr)); body.length= thd->lex->ptr - body_begin; - const uchar *body_end= body_begin + body.length - 1; + const char *body_end= body_begin + body.length - 1; /* Trim nuls or close-comments ('*'+'/') or spaces at the end */ while (body_begin < body_end) { - if ((*body_end == '\0') || + if ((*body_end == '\0') || (my_isspace(thd->variables.character_set_client, *body_end))) { /* consume NULs and meaningless whitespace */ --body.length; @@ -188,7 +178,7 @@ Event_parse_data::init_body(THD *thd) consume closing comments This is arguably wrong, but it's the best we have until the parser is - changed to be smarter. FIXME PARSER + changed to be smarter. FIXME PARSER See also the sp_head code, where something like this is done also. @@ -217,13 +207,62 @@ Event_parse_data::init_body(THD *thd) ++body_begin; --body.length; } - body.str= thd->strmake((char *)body_begin, body.length); + body.str= thd->strmake(body_begin, body.length); DBUG_VOID_RETURN; } /* + This function is called on CREATE EVENT or ALTER EVENT. When either + ENDS or AT is in the past, we are trying to create an event that + will never be executed. If it has ON COMPLETION NOT PRESERVE + (default), then it would normally be dropped already, so on CREATE + EVENT we give a warning, and do not create anyting. On ALTER EVENT + we give a error, and do not change the event. + + If the event has ON COMPLETION PRESERVE, then we see if the event is + created or altered to the ENABLED (default) state. If so, then we + give a warning, and change the state to DISABLED. + + Otherwise it is a valid event in ON COMPLETION PRESERVE DISABLE + state. +*/ + +void +Event_parse_data::check_if_in_the_past(THD *thd, my_time_t ltime_utc) +{ + if (ltime_utc >= (my_time_t) thd->query_start()) + return; + + if (on_completion == Event_basic::ON_COMPLETION_DROP) + { + switch (thd->lex->sql_command) { + case SQLCOM_CREATE_EVENT: + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_EVENT_CANNOT_CREATE_IN_THE_PAST, + ER(ER_EVENT_CANNOT_CREATE_IN_THE_PAST)); + break; + case SQLCOM_ALTER_EVENT: + my_error(ER_EVENT_CANNOT_ALTER_IN_THE_PAST, MYF(0)); + break; + default: + DBUG_ASSERT(0); + } + + do_not_create= TRUE; + } + else if (status == Event_basic::ENABLED) + { + status= Event_basic::DISABLED; + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_EVENT_EXEC_TIME_IN_THE_PAST, + ER(ER_EVENT_EXEC_TIME_IN_THE_PAST)); + } +} + + +/* Sets time for execution for one-time event. SYNOPSIS @@ -239,9 +278,8 @@ int Event_parse_data::init_execute_at(THD *thd) { my_bool not_used; - TIME ltime; - my_time_t t; - TIME time_tmp; + MYSQL_TIME ltime; + my_time_t ltime_utc; DBUG_ENTER("Event_parse_data::init_execute_at"); @@ -250,41 +288,26 @@ Event_parse_data::init_execute_at(THD *thd) if (item_execute_at->fix_fields(thd, &item_execute_at)) goto wrong_value; - + /* no starts and/or ends in case of execute_at */ DBUG_PRINT("info", ("starts_null && ends_null should be 1 is %d", (starts_null && ends_null))); DBUG_ASSERT(starts_null && ends_null); - /* let's check whether time is in the past */ - thd->variables.time_zone->gmt_sec_to_TIME(&time_tmp, - (my_time_t) thd->query_start()); - if ((not_used= item_execute_at->get_date(<ime, TIME_NO_ZERO_DATE))) goto wrong_value; - if (TIME_to_ulonglong_datetime(<ime) < - TIME_to_ulonglong_datetime(&time_tmp)) - { - my_error(ER_EVENT_EXEC_TIME_IN_THE_PAST, MYF(0)); - DBUG_RETURN(ER_WRONG_VALUE); - } - - /* - This may result in a 1970-01-01 date if ltime is > 2037-xx-xx. - CONVERT_TZ has similar problem. - mysql_priv.h currently lists - #define TIMESTAMP_MAX_YEAR 2038 (see TIME_to_timestamp()) - */ - my_tz_UTC->gmt_sec_to_TIME(<ime,t=TIME_to_timestamp(thd,<ime,¬_used)); - if (!t) + ltime_utc= TIME_to_timestamp(thd,<ime,¬_used); + if (!ltime_utc) { DBUG_PRINT("error", ("Execute AT after year 2037")); goto wrong_value; } + check_if_in_the_past(thd, ltime_utc); + execute_at_null= FALSE; - execute_at= ltime; + execute_at= ltime_utc; DBUG_RETURN(0); wrong_value: @@ -424,8 +447,8 @@ int Event_parse_data::init_starts(THD *thd) { my_bool not_used; - TIME ltime, time_tmp; - my_time_t t; + MYSQL_TIME ltime; + my_time_t ltime_utc; DBUG_ENTER("Event_parse_data::init_starts"); if (!item_starts) @@ -437,36 +460,15 @@ Event_parse_data::init_starts(THD *thd) if ((not_used= item_starts->get_date(<ime, TIME_NO_ZERO_DATE))) goto wrong_value; - /* - Let's check whether time is in the past. - Note: This call is not post year 2038 safe. That's because - thd->query_start() is of time_t, while gmt_sec_to_TIME() - wants my_time_t. In the case time_t is larger than my_time_t - an overflow might happen and events subsystem will not work as - expected. - */ - thd->variables.time_zone->gmt_sec_to_TIME(&time_tmp, - (my_time_t) thd->query_start()); - - DBUG_PRINT("info",("now: %ld starts: %ld", - (long) TIME_to_ulonglong_datetime(&time_tmp), - (long) TIME_to_ulonglong_datetime(<ime))); - if (TIME_to_ulonglong_datetime(<ime) < - TIME_to_ulonglong_datetime(&time_tmp)) + ltime_utc= TIME_to_timestamp(thd, <ime, ¬_used); + if (!ltime_utc) goto wrong_value; - /* - Again, after 2038 this code won't work. As - mysql_priv.h currently lists - #define TIMESTAMP_MAX_YEAR 2038 (see TIME_to_timestamp()) - */ - my_tz_UTC->gmt_sec_to_TIME(<ime,t=TIME_to_timestamp(thd, <ime, - ¬_used)); - if (!t) - goto wrong_value; + DBUG_PRINT("info",("now: %ld starts: %ld", + (long) thd->query_start(), (long) ltime_utc)); - starts= ltime; starts_null= FALSE; + starts= ltime_utc; DBUG_RETURN(0); wrong_value: @@ -498,9 +500,9 @@ wrong_value: int Event_parse_data::init_ends(THD *thd) { - TIME ltime, ltime_now; my_bool not_used; - my_time_t t; + MYSQL_TIME ltime; + my_time_t ltime_utc; DBUG_ENTER("Event_parse_data::init_ends"); if (!item_ends) @@ -513,34 +515,19 @@ Event_parse_data::init_ends(THD *thd) if ((not_used= item_ends->get_date(<ime, TIME_NO_ZERO_DATE))) goto error_bad_params; - /* - Again, after 2038 this code won't work. As - mysql_priv.h currently lists - #define TIMESTAMP_MAX_YEAR 2038 (see TIME_to_timestamp()) - */ - DBUG_PRINT("info", ("get the UTC time")); - my_tz_UTC->gmt_sec_to_TIME(<ime,t=TIME_to_timestamp(thd, <ime, - ¬_used)); - if (!t) + ltime_utc= TIME_to_timestamp(thd, <ime, ¬_used); + if (!ltime_utc) goto error_bad_params; /* Check whether ends is after starts */ DBUG_PRINT("info", ("ENDS after STARTS?")); - if (!starts_null && my_time_compare(&starts, <ime) != -1) + if (!starts_null && starts >= ltime_utc) goto error_bad_params; - /* - The parser forces starts to be provided but one day STARTS could be - set before NOW() and in this case the following check should be done. - Check whether ENDS is not in the past. - */ - DBUG_PRINT("info", ("ENDS after NOW?")); - my_tz_UTC->gmt_sec_to_TIME(<ime_now, (my_time_t)thd->query_start()); - if (my_time_compare(<ime_now, <ime) == 1) - goto error_bad_params; + check_if_in_the_past(thd, ltime_utc); - ends= ltime; ends_null= FALSE; + ends= ltime_utc; DBUG_RETURN(0); error_bad_params: @@ -596,6 +583,7 @@ Event_parse_data::check_parse_data(THD *thd) ret= init_execute_at(thd) || init_interval(thd) || init_starts(thd) || init_ends(thd); + check_originator_id(thd); DBUG_RETURN(ret); } @@ -642,6 +630,31 @@ Event_parse_data::init_definer(THD *thd) } +/** + Set the originator id of the event to the server_id if executing on + the master or set to the server_id of the master if executing on + the slave. If executing on slave, also set status to SLAVESIDE_DISABLED. + + SYNOPSIS + Event_parse_data::check_originator_id() +*/ +void Event_parse_data::check_originator_id(THD *thd) +{ + /* Disable replicated events on slave. */ + if ((thd->system_thread == SYSTEM_THREAD_SLAVE_SQL) || + (thd->system_thread == SYSTEM_THREAD_SLAVE_IO)) + { + DBUG_PRINT("info", ("Invoked object status set to SLAVESIDE_DISABLED.")); + if ((status == Event_basic::ENABLED) || + (status == Event_basic::DISABLED)) + status = Event_basic::SLAVESIDE_DISABLED; + originator = thd->server_id; + } + else + originator = server_id; +} + + /* Constructor @@ -656,6 +669,7 @@ Event_basic::Event_basic() init_alloc_root(&mem_root, 256, 512); dbname.str= name.str= NULL; dbname.length= name.length= 0; + time_zone= NULL; DBUG_VOID_RETURN; } @@ -698,7 +712,7 @@ Event_basic::load_string_fields(Field **fields, ...) va_start(args, fields); field_name= (enum enum_events_table_field) va_arg(args, int); - while (field_name != ET_FIELD_COUNT) + while (field_name < ET_FIELD_COUNT) { field_value= va_arg(args, LEX_STRING *); if ((field_value->str= get_field(&mem_root, fields[field_name])) == NullS) @@ -706,7 +720,7 @@ Event_basic::load_string_fields(Field **fields, ...) ret= TRUE; break; } - field_value->length= strlen(field_value->str); + field_value->length= strlen(field_value->str); field_name= (enum enum_events_table_field) va_arg(args, int); } @@ -716,6 +730,16 @@ Event_basic::load_string_fields(Field **fields, ...) } +bool +Event_basic::load_time_zone(THD *thd, const LEX_STRING tz_name) +{ + String str(tz_name.str, &my_charset_latin1); + time_zone= my_tz_find(thd, &str); + + return (time_zone == NULL); +} + + /* Constructor @@ -730,10 +754,7 @@ Event_queue_element::Event_queue_element(): { DBUG_ENTER("Event_queue_element::Event_queue_element"); - set_zero_time(&starts, MYSQL_TIMESTAMP_DATETIME); - set_zero_time(&ends, MYSQL_TIMESTAMP_DATETIME); - set_zero_time(&execute_at, MYSQL_TIMESTAMP_DATETIME); - set_zero_time(&last_executed, MYSQL_TIMESTAMP_DATETIME); + starts= ends= execute_at= last_executed= 0; starts_null= ends_null= execute_at_null= TRUE; DBUG_VOID_RETURN; @@ -775,7 +796,7 @@ Event_timed::Event_timed(): */ Event_timed::~Event_timed() -{ +{ } @@ -787,27 +808,10 @@ Event_timed::~Event_timed() */ Event_job_data::Event_job_data() - :sphead(NULL), sql_mode(0) + :sql_mode(0) { } - -/* - Destructor - - SYNOPSIS - Event_timed::~Event_timed() -*/ - -Event_job_data::~Event_job_data() -{ - DBUG_ENTER("Event_job_data::~Event_job_data"); - delete sphead; - sphead= NULL; - DBUG_VOID_RETURN; -} - - /* Init all member variables @@ -833,7 +837,7 @@ Event_timed::init() Loads an event's body from a row from mysql.event SYNOPSIS - Event_job_data::load_from_row(MEM_ROOT *mem_root, TABLE *table) + Event_job_data::load_from_row(THD *thd, TABLE *table) RETURN VALUE 0 OK @@ -846,7 +850,7 @@ Event_timed::init() */ int -Event_job_data::load_from_row(TABLE *table) +Event_job_data::load_from_row(THD *thd, TABLE *table) { char *ptr; uint len; @@ -855,12 +859,21 @@ Event_job_data::load_from_row(TABLE *table) if (!table) goto error; - if (table->s->fields != ET_FIELD_COUNT) + if (table->s->fields < ET_FIELD_COUNT) goto error; - load_string_fields(table->field, ET_FIELD_DB, &dbname, ET_FIELD_NAME, &name, - ET_FIELD_BODY, &body, ET_FIELD_DEFINER, &definer, - ET_FIELD_COUNT); + LEX_STRING tz_name; + if (load_string_fields(table->field, + ET_FIELD_DB, &dbname, + ET_FIELD_NAME, &name, + ET_FIELD_BODY, &body, + ET_FIELD_DEFINER, &definer, + ET_FIELD_TIME_ZONE, &tz_name, + ET_FIELD_COUNT)) + goto error; + + if (load_time_zone(thd, tz_name)) + goto error; ptr= strchr(definer.str, '@'); @@ -887,7 +900,7 @@ error: Loads an event from a row from mysql.event SYNOPSIS - Event_queue_element::load_from_row(MEM_ROOT *mem_root, TABLE *table) + Event_queue_element::load_from_row(THD *thd, TABLE *table) RETURN VALUE 0 OK @@ -900,42 +913,62 @@ error: */ int -Event_queue_element::load_from_row(TABLE *table) +Event_queue_element::load_from_row(THD *thd, TABLE *table) { char *ptr; - bool res1, res2; + MYSQL_TIME time; + LEX_STRING tz_name; DBUG_ENTER("Event_queue_element::load_from_row"); if (!table) goto error; - if (table->s->fields != ET_FIELD_COUNT) + if (table->s->fields < ET_FIELD_COUNT) + goto error; + + if (load_string_fields(table->field, + ET_FIELD_DB, &dbname, + ET_FIELD_NAME, &name, + ET_FIELD_DEFINER, &definer, + ET_FIELD_TIME_ZONE, &tz_name, + ET_FIELD_COUNT)) goto error; - load_string_fields(table->field, ET_FIELD_DB, &dbname, ET_FIELD_NAME, &name, - ET_FIELD_DEFINER, &definer, ET_FIELD_COUNT); + if (load_time_zone(thd, tz_name)) + goto error; starts_null= table->field[ET_FIELD_STARTS]->is_null(); - res1= table->field[ET_FIELD_STARTS]->get_date(&starts, TIME_NO_ZERO_DATE); + if (!starts_null) + { + table->field[ET_FIELD_STARTS]->get_date(&time, TIME_NO_ZERO_DATE); + starts= sec_since_epoch_TIME(&time); + } ends_null= table->field[ET_FIELD_ENDS]->is_null(); - res2= table->field[ET_FIELD_ENDS]->get_date(&ends, TIME_NO_ZERO_DATE); + if (!ends_null) + { + table->field[ET_FIELD_ENDS]->get_date(&time, TIME_NO_ZERO_DATE); + ends= sec_since_epoch_TIME(&time); + } if (!table->field[ET_FIELD_INTERVAL_EXPR]->is_null()) expression= table->field[ET_FIELD_INTERVAL_EXPR]->val_int(); else expression= 0; /* - If res1 and res2 are TRUE then both fields are empty. + If neigher STARTS and ENDS is set, then both fields are empty. Hence, if ET_FIELD_EXECUTE_AT is empty there is an error. */ execute_at_null= table->field[ET_FIELD_EXECUTE_AT]->is_null(); DBUG_ASSERT(!(starts_null && ends_null && !expression && execute_at_null)); - if (!expression && - table->field[ET_FIELD_EXECUTE_AT]->get_date(&execute_at, - TIME_NO_ZERO_DATE)) - goto error; + if (!expression && !execute_at_null) + { + if (table->field[ET_FIELD_EXECUTE_AT]->get_date(&time, + TIME_NO_ZERO_DATE)) + goto error; + execute_at= sec_since_epoch_TIME(&time); + } /* We load the interval type from disk as string and then map it to @@ -962,17 +995,36 @@ Event_queue_element::load_from_row(TABLE *table) interval= (interval_type) i; } - table->field[ET_FIELD_LAST_EXECUTED]->get_date(&last_executed, - TIME_NO_ZERO_DATE); + if (!table->field[ET_FIELD_LAST_EXECUTED]->is_null()) + { + table->field[ET_FIELD_LAST_EXECUTED]->get_date(&time, + TIME_NO_ZERO_DATE); + last_executed= sec_since_epoch_TIME(&time); + } last_executed_changed= FALSE; - if ((ptr= get_field(&mem_root, table->field[ET_FIELD_STATUS])) == NullS) goto error; DBUG_PRINT("load_from_row", ("Event [%s] is [%s]", name.str, ptr)); - status= (ptr[0]=='E'? Event_queue_element::ENABLED: - Event_queue_element::DISABLED); + + /* Set event status (ENABLED | SLAVESIDE_DISABLED | DISABLED) */ + switch (ptr[0]) + { + case 'E' : + status = Event_queue_element::ENABLED; + break; + case 'S' : + status = Event_queue_element::SLAVESIDE_DISABLED; + break; + case 'D' : + default: + status = Event_queue_element::DISABLED; + break; + } + if ((ptr= get_field(&mem_root, table->field[ET_FIELD_ORIGINATOR])) == NullS) + goto error; + originator = table->field[ET_FIELD_ORIGINATOR]->val_int(); /* ToDo : Andrey . Find a way not to allocate ptr on event_mem_root */ if ((ptr= get_field(&mem_root, @@ -992,7 +1044,7 @@ error: Loads an event from a row from mysql.event SYNOPSIS - Event_timed::load_from_row(MEM_ROOT *mem_root, TABLE *table) + Event_timed::load_from_row(THD *thd, TABLE *table) RETURN VALUE 0 OK @@ -1005,17 +1057,21 @@ error: */ int -Event_timed::load_from_row(TABLE *table) +Event_timed::load_from_row(THD *thd, TABLE *table) { char *ptr; uint len; DBUG_ENTER("Event_timed::load_from_row"); - if (Event_queue_element::load_from_row(table)) + if (Event_queue_element::load_from_row(thd, table)) + goto error; + + if (load_string_fields(table->field, + ET_FIELD_BODY, &body, + ET_FIELD_COUNT)) goto error; - load_string_fields(table->field, ET_FIELD_BODY, &body, ET_FIELD_COUNT); ptr= strchr(definer.str, '@'); @@ -1048,11 +1104,30 @@ error: /* - Computes the sum of a timestamp plus interval. Presumed is that at least one - previous execution has occured. + add_interval() adds a specified interval to time 'ltime' in time + zone 'time_zone', and returns the result converted to the number of + seconds since epoch (aka Unix time; in UTC time zone). Zero result + means an error. +*/ +static +my_time_t +add_interval(MYSQL_TIME *ltime, const Time_zone *time_zone, + interval_type scale, INTERVAL interval) +{ + if (date_add_interval(ltime, scale, interval)) + return 0; + + my_bool not_used; + return time_zone->TIME_to_gmt_sec(ltime, ¬_used); +} + + +/* + Computes the sum of a timestamp plus interval. SYNOPSIS - get_next_time(TIME *start, int interval_value, interval_type interval) + get_next_time() + time_zone event time zone next the sum start add interval_value to this time time_now current time @@ -1069,26 +1144,19 @@ error: seconds as resolution for computation. 2) In all other cases - MONTH, QUARTER, YEAR we use MONTH as resolution and PERIOD_DIFF()'s implementation - 3) We get the difference between time_now and `start`, then divide it - by the months, respectively seconds and round up. Then we multiply - monts/seconds by the rounded value and add it to `start` -> we get - the next execution time. */ static -bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec, +bool get_next_time(const Time_zone *time_zone, my_time_t *next, + my_time_t start, my_time_t time_now, int i_value, interval_type i_type) { - bool ret; - INTERVAL interval; - TIME tmp; - longlong months=0, seconds=0; DBUG_ENTER("get_next_time"); - DBUG_PRINT("enter", ("start: %lu now: %lu", - (long) TIME_to_ulonglong_datetime(start), - (long) TIME_to_ulonglong_datetime(time_now))); + DBUG_PRINT("enter", ("start: %lu now: %lu", (long) start, (long) time_now)); - bzero(&interval, sizeof(interval)); + DBUG_ASSERT(start <= time_now); + + longlong months=0, seconds=0; switch (i_type) { case INTERVAL_YEAR: @@ -1135,84 +1203,151 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec, DBUG_ASSERT(0); } DBUG_PRINT("info", ("seconds: %ld months: %ld", (long) seconds, (long) months)); + + MYSQL_TIME local_start; + MYSQL_TIME local_now; + + /* Convert times from UTC to local. */ + { + time_zone->gmt_sec_to_TIME(&local_start, start); + time_zone->gmt_sec_to_TIME(&local_now, time_now); + } + + INTERVAL interval; + bzero(&interval, sizeof(interval)); + my_time_t next_time= 0; + if (seconds) { longlong seconds_diff; long microsec_diff; + bool negative= calc_time_diff(&local_now, &local_start, 1, + &seconds_diff, µsec_diff); + if (!negative) + { + /* + The formula below returns the interval that, when added to + local_start, will always give the time in the future. + */ + interval.second= seconds_diff - seconds_diff % seconds + seconds; + next_time= add_interval(&local_start, time_zone, + INTERVAL_SECOND, interval); + if (next_time == 0) + goto done; + } - if (calc_time_diff(time_now, start, 1, &seconds_diff, µsec_diff)) + if (next_time <= time_now) { - DBUG_PRINT("error", ("negative difference")); - DBUG_ASSERT(0); + /* + If 'negative' is true above, then 'next_time == 0', and + 'next_time <= time_now' is also true. If negative is false, + then next_time was set, but perhaps to the value that is less + then time_now. See below for elaboration. + */ + DBUG_ASSERT(negative || next_time > 0); + + /* + If local_now < local_start, i.e. STARTS time is in the future + according to the local time (it always in the past according + to UTC---this is a prerequisite of this function), then + STARTS is almost always in the past according to the local + time too. However, in the time zone that has backward + Daylight Saving Time shift, the following may happen: suppose + we have a backward DST shift at certain date after 2:59:59, + i.e. local time goes 1:59:59, 2:00:00, ... , 2:59:59, (shift + here) 2:00:00 (again), ... , 2:59:59 (again), 3:00:00, ... . + Now suppose the time has passed the first 2:59:59, has been + shifted backward, and now is (the second) 2:20:00. The user + does CREATE EVENT with STARTS 'current-date 2:40:00'. Local + time 2:40:00 from create statement is treated by time + functions as the first such time, so according to UTC it comes + before the second 2:20:00. But according to local time it is + obviously in the future, so we end up in this branch. + + Since we are in the second pass through 2:00:00--2:59:59, and + any local time form this interval is treated by system + functions as the time from the first pass, we have to find the + time for the next execution that is past the DST-affected + interval (past the second 2:59:59 for our example, + i.e. starting from 3:00:00). We do this in the loop until the + local time is mapped onto future UTC time. 'start' time is in + the past, so we may use 'do { } while' here, and add the first + interval right away. + + Alternatively, it could be that local_now >= local_start. Now + for the example above imagine we do CREATE EVENT with STARTS + 'current-date 2:10:00'. Local start 2:10 is in the past (now + is local 2:20), so we add an interval, and get next execution + time, say, 2:40. It is in the future according to local time, + but, again, since we are in the second pass through + 2:00:00--2:59:59, 2:40 will be converted into UTC time in the + past. So we will end up in this branch again, and may add + intervals in a 'do { } while' loop. + + Note that for any given event we may end up here only if event + next execution time will map to the time interval that is + passed twice, and only if the server was started during the + second pass, or the event is being created during the second + pass. After that, we never will get here (unless we again + start the server during the second pass). In other words, + such a condition is extremely rare. + */ + interval.second= seconds; + do + { + next_time= add_interval(&local_start, time_zone, + INTERVAL_SECOND, interval); + if (next_time == 0) + goto done; + } + while (next_time <= time_now); } - uint multiplier= (uint) (seconds_diff / seconds); - /* - Increase the multiplier is the modulus is not zero to make round up. - Or if time_now==start then we should not execute the same - event two times for the same time - get the next exec if the modulus is not - */ - DBUG_PRINT("info", ("multiplier: %d", multiplier)); - if (seconds_diff % seconds || (!seconds_diff && last_exec->year) || - TIME_to_ulonglong_datetime(time_now) == - TIME_to_ulonglong_datetime(last_exec)) - ++multiplier; - interval.second= seconds * multiplier; - DBUG_PRINT("info", ("multiplier: %lu interval.second: %lu", (ulong) multiplier, - (ulong) interval.second)); - tmp= *start; - if (!(ret= date_add_interval(&tmp, INTERVAL_SECOND, interval))) - *next= tmp; } else { - /* PRESUMED is that at least one execution took already place */ - int diff_months= (time_now->year - start->year)*12 + - (time_now->month - start->month); + long diff_months= (long) (local_now.year - local_start.year)*12 + + (local_now.month - local_start.month); /* - Note: If diff_months is 0 that means we are in the same month as the - last execution which is also the first execution. + Unlike for seconds above, the formula below returns the interval + that, when added to the local_start, will give the time in the + past, or somewhere in the current month. We are interested in + the latter case, to see if this time has already passed, or is + yet to come this month. + + Note that the time is guaranteed to be in the past unless + (diff_months % months == 0), but no good optimization is + possible here, because (diff_months % months == 0) is what will + happen most of the time, as get_next_time() will be called right + after the execution of the event. We could pass last_executed + time to this function, and see if the execution has already + happened this month, but for that we will have to convert + last_executed from seconds since epoch to local broken-down + time, and this will greatly reduce the effect of the + optimization. So instead we keep the code simple and clean. */ - /* - First we try with the smaller if not then + 1, because if we try with - directly with +1 we will be after the current date but it could be that - we will be 1 month ahead, so 2 steps are necessary. - */ - interval.month= (ulong) ((diff_months / months)*months); - /* - Check if the same month as last_exec (always set - prerequisite) - An event happens at most once per month so there is no way to - schedule it two times for the current month. This saves us from two - calls to date_add_interval() if the event was just executed. But if - the scheduler is started and there was at least 1 scheduled date - skipped this one does not help and two calls to date_add_interval() - will be done, which is a bit more expensive but compared to the - rareness of the case is neglectable. - */ - if (time_now->year == last_exec->year && - time_now->month == last_exec->month) - interval.month+= (ulong) months; - - tmp= *start; - if ((ret= date_add_interval(&tmp, INTERVAL_MONTH, interval))) + interval.month= (ulong) (diff_months - diff_months % months); + next_time= add_interval(&local_start, time_zone, + INTERVAL_MONTH, interval); + if (next_time == 0) goto done; - /* If `tmp` is still before time_now just add one more time the interval */ - if (my_time_compare(&tmp, time_now) == -1) - { - interval.month+= (ulong) months; - tmp= *start; - if ((ret= date_add_interval(&tmp, INTERVAL_MONTH, interval))) + if (next_time <= time_now) + { + interval.month= (ulong) months; + next_time= add_interval(&local_start, time_zone, + INTERVAL_MONTH, interval); + if (next_time == 0) goto done; } - *next= tmp; - /* assert on that the next is after now */ - DBUG_ASSERT(1==my_time_compare(next, time_now)); } + DBUG_ASSERT(time_now < next_time); + + *next= next_time; + done: - DBUG_PRINT("info", ("next: %lu", (long) TIME_to_ulonglong_datetime(next))); - DBUG_RETURN(ret); + DBUG_PRINT("info", ("next_time: %ld", (long) next_time)); + DBUG_RETURN(next_time == 0); } @@ -1227,23 +1362,20 @@ done: TRUE Error NOTES - The time is set in execute_at, if no more executions the latter is set to - 0000-00-00. + The time is set in execute_at, if no more executions the latter is + set to 0. */ bool Event_queue_element::compute_next_execution_time() { - TIME time_now; - int tmp; + my_time_t time_now; DBUG_ENTER("Event_queue_element::compute_next_execution_time"); DBUG_PRINT("enter", ("starts: %lu ends: %lu last_executed: %lu this: 0x%lx", - (long) TIME_to_ulonglong_datetime(&starts), - (long) TIME_to_ulonglong_datetime(&ends), - (long) TIME_to_ulonglong_datetime(&last_executed), + (long) starts, (long) ends, (long) last_executed, (long) this)); - if (status == Event_queue_element::DISABLED) + if (status != Event_queue_element::ENABLED) { DBUG_PRINT("compute_next_execution_time", ("Event %s is DISABLED", name.str)); @@ -1253,7 +1385,7 @@ Event_queue_element::compute_next_execution_time() if (!expression) { /* Let's check whether it was executed */ - if (last_executed.year) + if (last_executed) { DBUG_PRINT("info",("One-time event %s.%s of was already executed", dbname.str, name.str)); @@ -1266,24 +1398,22 @@ Event_queue_element::compute_next_execution_time() goto ret; } - my_tz_UTC->gmt_sec_to_TIME(&time_now, (my_time_t)current_thd->query_start()); + time_now= (my_time_t) current_thd->query_start(); - DBUG_PRINT("info",("NOW: [%lu]", - (ulong) TIME_to_ulonglong_datetime(&time_now))); + DBUG_PRINT("info",("NOW: [%lu]", (ulong) time_now)); /* if time_now is after ends don't execute anymore */ - if (!ends_null && (tmp= my_time_compare(&ends, &time_now)) == -1) + if (!ends_null && ends < time_now) { DBUG_PRINT("info", ("NOW after ENDS, don't execute anymore")); /* time_now is after ends. don't execute anymore */ - set_zero_time(&execute_at, MYSQL_TIMESTAMP_DATETIME); + execute_at= 0; execute_at_null= TRUE; if (on_completion == Event_queue_element::ON_COMPLETION_DROP) dropped= TRUE; DBUG_PRINT("info", ("Dropped: %d", dropped)); status= Event_queue_element::DISABLED; status_changed= TRUE; - dropped= TRUE; goto ret; } @@ -1293,12 +1423,11 @@ Event_queue_element::compute_next_execution_time() Let's check whether time_now is before starts. If so schedule for starts. */ - if (!starts_null && (tmp= my_time_compare(&time_now, &starts)) < 1) + if (!starts_null && time_now <= starts) { - if (tmp == 0 && my_time_compare(&starts, &last_executed) == 0) + if (time_now == starts && starts == last_executed) { /* - time_now = starts = last_executed do nothing or we will schedule for second time execution at starts. */ } @@ -1319,31 +1448,30 @@ Event_queue_element::compute_next_execution_time() { /* Both starts and m_ends are set and time_now is between them (incl.) - If last_executed is set then increase with m_expression. The new TIME is + If last_executed is set then increase with m_expression. The new MYSQL_TIME is after m_ends set execute_at to 0. And check for on_completion If not set then schedule for now. */ DBUG_PRINT("info", ("Both STARTS & ENDS are set")); - if (!last_executed.year) + if (!last_executed) { DBUG_PRINT("info", ("Not executed so far.")); } { - TIME next_exec; + my_time_t next_exec; - if (get_next_time(&next_exec, &starts, &time_now, - last_executed.year? &last_executed:&starts, + if (get_next_time(time_zone, &next_exec, starts, time_now, (int) expression, interval)) goto err; /* There was previous execution */ - if (my_time_compare(&ends, &next_exec) == -1) + if (ends < next_exec) { DBUG_PRINT("info", ("Next execution of %s after ENDS. Stop executing.", name.str)); /* Next execution after ends. No more executions */ - set_zero_time(&execute_at, MYSQL_TIMESTAMP_DATETIME); + execute_at= 0; execute_at_null= TRUE; if (on_completion == Event_queue_element::ON_COMPLETION_DROP) dropped= TRUE; @@ -1352,8 +1480,7 @@ Event_queue_element::compute_next_execution_time() } else { - DBUG_PRINT("info",("Next[%lu]", - (ulong) TIME_to_ulonglong_datetime(&next_exec))); + DBUG_PRINT("info",("Next[%lu]", (ulong) next_exec)); execute_at= next_exec; execute_at_null= FALSE; } @@ -1368,15 +1495,14 @@ Event_queue_element::compute_next_execution_time() Both starts and m_ends are not set, so we schedule for the next based on last_executed. */ - if (last_executed.year) + if (last_executed) { - TIME next_exec; - if (get_next_time(&next_exec, &starts, &time_now, &last_executed, + my_time_t next_exec; + if (get_next_time(time_zone, &next_exec, starts, time_now, (int) expression, interval)) goto err; execute_at= next_exec; - DBUG_PRINT("info",("Next[%lu]", - (ulong) TIME_to_ulonglong_datetime(&next_exec))); + DBUG_PRINT("info",("Next[%lu]", (ulong) next_exec)); } else { @@ -1398,20 +1524,18 @@ Event_queue_element::compute_next_execution_time() Hence schedule for starts + m_expression in case last_executed is not set, otherwise to last_executed + m_expression */ - if (!last_executed.year) + if (!last_executed) { DBUG_PRINT("info", ("Not executed so far.")); } { - TIME next_exec; - if (get_next_time(&next_exec, &starts, &time_now, - last_executed.year? &last_executed:&starts, + my_time_t next_exec; + if (get_next_time(time_zone, &next_exec, starts, time_now, (int) expression, interval)) goto err; execute_at= next_exec; - DBUG_PRINT("info",("Next[%lu]", - (ulong) TIME_to_ulonglong_datetime(&next_exec))); + DBUG_PRINT("info",("Next[%lu]", (ulong) next_exec)); } execute_at_null= FALSE; } @@ -1426,20 +1550,20 @@ Event_queue_element::compute_next_execution_time() If last_executed is not set then schedule for now */ - if (!last_executed.year) + if (!last_executed) execute_at= time_now; else { - TIME next_exec; + my_time_t next_exec; - if (get_next_time(&next_exec, &starts, &time_now, &last_executed, + if (get_next_time(time_zone, &next_exec, starts, time_now, (int) expression, interval)) goto err; - if (my_time_compare(&ends, &next_exec) == -1) + if (ends < next_exec) { DBUG_PRINT("info", ("Next execution after ENDS. Stop executing.")); - set_zero_time(&execute_at, MYSQL_TIMESTAMP_DATETIME); + execute_at= 0; execute_at_null= TRUE; status= Event_queue_element::DISABLED; status_changed= TRUE; @@ -1448,8 +1572,7 @@ Event_queue_element::compute_next_execution_time() } else { - DBUG_PRINT("info", ("Next[%lu]", - (ulong) TIME_to_ulonglong_datetime(&next_exec))); + DBUG_PRINT("info", ("Next[%lu]", (ulong) next_exec)); execute_at= next_exec; execute_at_null= FALSE; } @@ -1458,8 +1581,7 @@ Event_queue_element::compute_next_execution_time() goto ret; } ret: - DBUG_PRINT("info", ("ret: 0 execute_at: %lu", - (long) TIME_to_ulonglong_datetime(&execute_at))); + DBUG_PRINT("info", ("ret: 0 execute_at: %lu", (long) execute_at)); DBUG_RETURN(FALSE); err: DBUG_PRINT("info", ("ret=1")); @@ -1468,7 +1590,7 @@ err: /* - Set the internal last_executed TIME struct to now. NOW is the + Set the internal last_executed MYSQL_TIME struct to now. NOW is the time according to thd->query_start(), so the THD's clock. SYNOPSIS @@ -1479,14 +1601,11 @@ err: void Event_queue_element::mark_last_executed(THD *thd) { - TIME time_now; - thd->end_time(); - my_tz_UTC->gmt_sec_to_TIME(&time_now, (my_time_t) thd->query_start()); - last_executed= time_now; /* was execute_at */ + last_executed= (my_time_t) thd->query_start(); last_executed_changed= TRUE; - + execution_count++; } @@ -1507,10 +1626,8 @@ Event_queue_element::mark_last_executed(THD *thd) bool Event_queue_element::update_timing_fields(THD *thd) { - TABLE *table; - Field **fields; - Open_tables_state backup; - int ret= FALSE; + Event_db_repository *db_repository= Events::get_db_repository(); + int ret; DBUG_ENTER("Event_queue_element::update_timing_fields"); @@ -1520,44 +1637,34 @@ Event_queue_element::update_timing_fields(THD *thd) if (!(status_changed || last_executed_changed)) DBUG_RETURN(0); - thd->reset_n_backup_open_tables_state(&backup); - - if (events_event_db_repository.open_event_table(thd, TL_WRITE, &table)) - { - ret= TRUE; - goto done; - } - fields= table->field; - if ((ret= events_event_db_repository. - find_named_event(thd, dbname, name, table))) - goto done; - - store_record(table,record[1]); - /* Don't update create on row update. */ - table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; - - if (last_executed_changed) - { - fields[ET_FIELD_LAST_EXECUTED]->set_notnull(); - fields[ET_FIELD_LAST_EXECUTED]->store_time(&last_executed, - MYSQL_TIMESTAMP_DATETIME); - last_executed_changed= FALSE; - } - if (status_changed) - { - fields[ET_FIELD_STATUS]->set_notnull(); - fields[ET_FIELD_STATUS]->store((longlong)status, TRUE); - status_changed= FALSE; - } - - if ((table->file->ha_update_row(table->record[1], table->record[0]))) - ret= TRUE; + ret= db_repository->update_timing_fields_for_event(thd, + dbname, name, + last_executed_changed, + last_executed, + status_changed, + (ulonglong) status); + last_executed_changed= status_changed= FALSE; + DBUG_RETURN(ret); +} -done: - close_thread_tables(thd); - thd->restore_backup_open_tables_state(&backup); - DBUG_RETURN(ret); +static +void +append_datetime(String *buf, Time_zone *time_zone, my_time_t secs, + const char *name, uint len) +{ + char dtime_buff[20*2+32];/* +32 to make my_snprintf_{8bit|ucs2} happy */ + buf->append(STRING_WITH_LEN(" ")); + buf->append(name, len); + buf->append(STRING_WITH_LEN(" '")); + /* + Pass the buffer and the second param tells fills the buffer and + returns the number of chars to copy. + */ + MYSQL_TIME time; + time_zone->gmt_sec_to_TIME(&time, secs); + buf->append(dtime_buff, my_datetime_to_str(&time, dtime_buff)); + buf->append(STRING_WITH_LEN("'")); } @@ -1600,17 +1707,17 @@ Event_timed::get_create_event(THD *thd, String *buf) buf->append(' '); LEX_STRING *ival= &interval_type_to_name[interval]; buf->append(ival->str, ival->length); + + if (!starts_null) + append_datetime(buf, time_zone, starts, STRING_WITH_LEN("STARTS")); + + if (!ends_null) + append_datetime(buf, time_zone, ends, STRING_WITH_LEN("ENDS")); } else { - char dtime_buff[20*2+32];/* +32 to make my_snprintf_{8bit|ucs2} happy */ - buf->append(STRING_WITH_LEN(" ON SCHEDULE AT '")); - /* - Pass the buffer and the second param tells fills the buffer and - returns the number of chars to copy. - */ - buf->append(dtime_buff, my_datetime_to_str(&execute_at, dtime_buff)); - buf->append(STRING_WITH_LEN("'")); + append_datetime(buf, time_zone, execute_at, + STRING_WITH_LEN("ON SCHEDULE AT")); } if (on_completion == Event_timed::ON_COMPLETION_DROP) @@ -1620,6 +1727,8 @@ Event_timed::get_create_event(THD *thd, String *buf) if (status == Event_timed::ENABLED) buf->append(STRING_WITH_LEN("ENABLE")); + else if (status == Event_timed::SLAVESIDE_DISABLED) + buf->append(STRING_WITH_LEN("DISABLE ON SLAVE")); else buf->append(STRING_WITH_LEN("DISABLE")); @@ -1635,218 +1744,242 @@ Event_timed::get_create_event(THD *thd, String *buf) } -/* - Get SHOW CREATE EVENT as string +/** + Get an artificial stored procedure to parse as an event definition. +*/ - SYNOPSIS - Event_job_data::get_create_event(THD *thd, String *buf) - thd Thread - buf String*, should be already allocated. CREATE EVENT goes inside. +bool +Event_job_data::construct_sp_sql(THD *thd, String *sp_sql) +{ + LEX_STRING buffer; + const uint STATIC_SQL_LENGTH= 44; - RETURN VALUE - 0 OK - EVEX_MICROSECOND_UNSUP Error (for now if mysql.event has been - tampered and MICROSECONDS interval or - derivative has been put there. + DBUG_ENTER("Event_job_data::construct_sp_sql"); + + /* + Allocate a large enough buffer on the thread execution memory + root to avoid multiple [re]allocations on system heap + */ + buffer.length= STATIC_SQL_LENGTH + name.length + body.length; + if (! (buffer.str= (char*) thd->alloc(buffer.length))) + DBUG_RETURN(TRUE); + + sp_sql->set(buffer.str, buffer.length, system_charset_info); + sp_sql->length(0); + + + sp_sql->append(C_STRING_WITH_LEN("CREATE ")); + sp_sql->append(C_STRING_WITH_LEN("PROCEDURE ")); + /* + Let's use the same name as the event name to perhaps produce a + better error message in case it is a part of some parse error. + We're using append_identifier here to successfully parse + events with reserved names. + */ + append_identifier(thd, sp_sql, name.str, name.length); + + /* + The default SQL security of a stored procedure is DEFINER. We + have already activated the security context of the event, so + let's execute the procedure with the invoker rights to save on + resets of security contexts. + */ + sp_sql->append(C_STRING_WITH_LEN("() SQL SECURITY INVOKER ")); + + sp_sql->append(body.str, body.length); + + DBUG_RETURN(thd->is_fatal_error); +} + + +/** + Get DROP EVENT statement to binlog the drop of ON COMPLETION NOT + PRESERVE event. */ -int -Event_job_data::get_fake_create_event(THD *thd, String *buf) +bool +Event_job_data::construct_drop_event_sql(THD *thd, String *sp_sql) { - DBUG_ENTER("Event_job_data::get_create_event"); - buf->append(STRING_WITH_LEN("CREATE EVENT anonymous ON SCHEDULE " - "EVERY 3337 HOUR DO ")); - buf->append(body.str, body.length); + LEX_STRING buffer; + const uint STATIC_SQL_LENGTH= 14; - DBUG_RETURN(0); -} + DBUG_ENTER("Event_job_data::construct_drop_event_sql"); + buffer.length= STATIC_SQL_LENGTH + name.length*2 + dbname.length*2; + if (! (buffer.str= (char*) thd->alloc(buffer.length))) + DBUG_RETURN(TRUE); -/* - Executes the event (the underlying sp_head object); + sp_sql->set(buffer.str, buffer.length, system_charset_info); + sp_sql->length(0); - SYNOPSIS - Event_job_data::execute() - thd THD + sp_sql->append(C_STRING_WITH_LEN("DROP EVENT ")); + append_identifier(thd, sp_sql, dbname.str, dbname.length); + sp_sql->append('.'); + append_identifier(thd, sp_sql, name.str, name.length); - RETURN VALUE - 0 success - -99 No rights on this.dbname.str - others retcodes of sp_head::execute_procedure() + DBUG_RETURN(thd->is_fatal_error); +} + +/** + Compiles and executes the event (the underlying sp_head object) + + @retval TRUE error (reported to the error log) + @retval FALSE success */ -int -Event_job_data::execute(THD *thd) +bool +Event_job_data::execute(THD *thd, bool drop) { - Security_context save_ctx; - /* this one is local and not needed after exec */ - int ret= 0; + String sp_sql; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + Security_context event_sctx, *save_sctx= NULL; +#endif + CHARSET_INFO *charset_connection; + List<Item> empty_item_list; + bool ret= TRUE; DBUG_ENTER("Event_job_data::execute"); - DBUG_PRINT("info", ("EXECUTING %s.%s", dbname.str, name.str)); - if ((ret= compile(thd, NULL))) - goto done; + mysql_reset_thd_for_next_command(thd); - event_change_security_context(thd, definer_user, definer_host, dbname, - &save_ctx); /* - THD::~THD will clean this or if there is DROP DATABASE in the - SP then it will be free there. It should not point to our buffer - which is allocated on a mem_root. + MySQL parser currently assumes that current database is either + present in THD or all names in all statements are fully specified. + And yet not fully specified names inside stored programs must be + be supported, even if the current database is not set: + CREATE PROCEDURE db1.p1() BEGIN CREATE TABLE t1; END// + -- in this example t1 should be always created in db1 and the statement + must parse even if there is no current database. + + To support this feature and still address the parser limitation, + we need to set the current database here. + We don't have to call mysql_change_db, since the checks performed + in it are unnecessary for the purpose of parsing, and + mysql_change_db will be invoked anyway later, to activate the + procedure database before it's executed. */ - thd->db= my_strdup(dbname.str, MYF(0)); - thd->db_length= dbname.length; - if (!check_access(thd, EVENT_ACL,dbname.str, 0, 0, 0,is_schema_db(dbname.str))) - { - List<Item> empty_item_list; - empty_item_list.empty(); - if (thd->enable_slow_log) - sphead->m_flags|= sp_head::LOG_SLOW_STATEMENTS; - sphead->m_flags|= sp_head::LOG_GENERAL_LOG; + thd->set_db(dbname.str, dbname.length); - ret= sphead->execute_procedure(thd, &empty_item_list); +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (event_sctx.change_security_context(thd, + &definer_user, &definer_host, + &dbname, &save_sctx)) + { + sql_print_error("Event Scheduler: " + "[%s].[%s.%s] execution failed, " + "failed to authenticate the user.", + definer.str, dbname.str, name.str); + goto end; } - else +#endif + + if (check_access(thd, EVENT_ACL, dbname.str, + 0, 0, 0, is_schema_db(dbname.str))) { - DBUG_PRINT("error", ("%s@%s has no rights on %s", definer_user.str, - definer_host.str, dbname.str)); - ret= -99; + /* + This aspect of behavior is defined in the worklog, + and this is how triggers work too: if TRIGGER + privilege is revoked from trigger definer, + triggers are not executed. + */ + sql_print_error("Event Scheduler: " + "[%s].[%s.%s] execution failed, " + "user no longer has EVENT privilege.", + definer.str, dbname.str, name.str); + goto end; } - event_restore_security_context(thd, &save_ctx); -done: - thd->end_statement(); - thd->cleanup_after_query(); + if (construct_sp_sql(thd, &sp_sql)) + goto end; - DBUG_PRINT("info", ("EXECUTED %s.%s ret: %d", dbname.str, name.str, ret)); + /* + Set up global thread attributes to reflect the properties of + this Event. We can simply reset these instead of usual + backup/restore employed in stored programs since we know that + this is a top level statement and the worker thread is + allocated exclusively to execute this event. + */ + charset_connection= get_charset_by_csname("utf8", + MY_CS_PRIMARY, MYF(MY_WME)); + thd->variables.character_set_client= charset_connection; + thd->variables.character_set_results= charset_connection; + thd->variables.collation_connection= charset_connection; + thd->update_charset(); - DBUG_RETURN(ret); -} + thd->variables.sql_mode= sql_mode; + thd->variables.time_zone= time_zone; + thd->query= sp_sql.c_ptr_safe(); + thd->query_length= sp_sql.length(); -/* - Compiles an event before it's execution. Compiles the anonymous - sp_head object held by the event + lex_start(thd, thd->query, thd->query_length); - SYNOPSIS - Event_job_data::compile() - thd thread context, used for memory allocation mostly - mem_root if != NULL then this memory root is used for allocs - instead of thd->mem_root + if (MYSQLparse(thd) || thd->is_fatal_error) + { + sql_print_error("Event Scheduler: " + "%serror during compilation of %s.%s", + thd->is_fatal_error ? "fatal " : "", + (const char *) dbname.str, (const char *) name.str); + goto end; + } - RETURN VALUE - 0 success - EVEX_COMPILE_ERROR error during compilation - EVEX_MICROSECOND_UNSUP mysql.event was tampered -*/ + { + sp_head *sphead= thd->lex->sphead; -int -Event_job_data::compile(THD *thd, MEM_ROOT *mem_root) -{ - int ret= 0; - MEM_ROOT *tmp_mem_root= 0; - LEX *old_lex= thd->lex, lex; - char *old_db; - int old_db_length; - char *old_query; - uint old_query_len; - ulong old_sql_mode= thd->variables.sql_mode; - char create_buf[15 * STRING_BUFFER_USUAL_SIZE]; - String show_create(create_buf, sizeof(create_buf), system_charset_info); - CHARSET_INFO *old_character_set_client, - *old_collation_connection, - *old_character_set_results; - Security_context save_ctx; - - DBUG_ENTER("Event_job_data::compile"); - - show_create.length(0); - - switch (get_fake_create_event(thd, &show_create)) { - case EVEX_MICROSECOND_UNSUP: - DBUG_RETURN(EVEX_MICROSECOND_UNSUP); - case 0: - break; - default: - DBUG_ASSERT(0); - } + DBUG_ASSERT(sphead); - old_character_set_client= thd->variables.character_set_client; - old_character_set_results= thd->variables.character_set_results; - old_collation_connection= thd->variables.collation_connection; + if (thd->enable_slow_log) + sphead->m_flags|= sp_head::LOG_SLOW_STATEMENTS; + sphead->m_flags|= sp_head::LOG_GENERAL_LOG; - thd->variables.character_set_client= - thd->variables.character_set_results= - thd->variables.collation_connection= - get_charset_by_csname("utf8", MY_CS_PRIMARY, MYF(MY_WME)); + sphead->set_info(0, 0, &thd->lex->sp_chistics, sql_mode); + sphead->optimize(); - thd->update_charset(); + ret= sphead->execute_procedure(thd, &empty_item_list); + /* + There is no pre-locking and therefore there should be no + tables open and locked left after execute_procedure. + */ + } - DBUG_PRINT("info",("old_sql_mode: %lu new_sql_mode: %lu",old_sql_mode, sql_mode)); - thd->variables.sql_mode= this->sql_mode; - /* Change the memory root for the execution time */ - if (mem_root) +end: + if (drop && !thd->is_fatal_error) { - tmp_mem_root= thd->mem_root; - thd->mem_root= mem_root; + /* + We must do it here since here we're under the right authentication + ID of the event definer. + */ + sql_print_information("Event Scheduler: Dropping %s.%s", + (const char *) dbname.str, (const char *) name.str); + /* + Construct a query for the binary log, to ensure the event is dropped + on the slave + */ + if (construct_drop_event_sql(thd, &sp_sql)) + ret= 1; + else + { + thd->query= sp_sql.c_ptr_safe(); + thd->query_length= sp_sql.length(); + if (Events::drop_event(thd, dbname, name, FALSE)) + ret= 1; + } } - old_query_len= thd->query_length; - old_query= thd->query; - old_db= thd->db; - old_db_length= thd->db_length; - thd->db= dbname.str; - thd->db_length= dbname.length; - - thd->query= show_create.c_ptr_safe(); - thd->query_length= show_create.length(); - DBUG_PRINT("info", ("query: %s",thd->query)); - - event_change_security_context(thd, definer_user, definer_host, dbname, - &save_ctx); - thd->lex= &lex; - mysql_init_query(thd, (uchar*) thd->query, thd->query_length); - if (MYSQLparse((void *)thd) || thd->is_fatal_error) + if (thd->lex->sphead) /* NULL only if a parse error */ { - DBUG_PRINT("error", ("error during compile or thd->is_fatal_error: %d", - thd->is_fatal_error)); - lex.unit.cleanup(); - - sql_print_error("SCHEDULER: Error during compilation of %s.%s or " - "thd->is_fatal_error: %d", - dbname.str, name.str, thd->is_fatal_error); - - ret= EVEX_COMPILE_ERROR; - goto done; + delete thd->lex->sphead; + thd->lex->sphead= NULL; } - DBUG_PRINT("note", ("success compiling %s.%s", dbname.str, name.str)); - - sphead= lex.sphead; - - sphead->set_definer(definer.str, definer.length); - sphead->set_info(0, 0, &lex.sp_chistics, sql_mode); - sphead->optimize(); - ret= 0; -done: - - lex_end(&lex); - event_restore_security_context(thd, &save_ctx); - DBUG_PRINT("note", ("return old data on its place. set back NAMES")); - - thd->lex= old_lex; - thd->query= old_query; - thd->query_length= old_query_len; - thd->db= old_db; - - thd->variables.sql_mode= old_sql_mode; - thd->variables.character_set_client= old_character_set_client; - thd->variables.character_set_results= old_character_set_results; - thd->variables.collation_connection= old_collation_connection; - thd->update_charset(); +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (save_sctx) + event_sctx.restore_security_context(thd, save_sctx); +#endif + lex_end(thd->lex); + thd->lex->unit.cleanup(); + thd->end_statement(); + thd->cleanup_after_query(); - /* Change the memory root for the execution time. */ - if (mem_root) - thd->mem_root= tmp_mem_root; + DBUG_PRINT("info", ("EXECUTED %s.%s ret: %d", dbname.str, name.str, ret)); DBUG_RETURN(ret); } @@ -1892,64 +2025,3 @@ event_basic_identifier_equal(LEX_STRING db, LEX_STRING name, Event_basic *b) return !sortcmp_lex_string(name, b->name, system_charset_info) && !sortcmp_lex_string(db, b->dbname, system_charset_info); } - - -/* - Switches the security context. - - SYNOPSIS - event_change_security_context() - thd Thread - user The user - host The host of the user - db The schema for which the security_ctx will be loaded - backup Where to store the old context - - RETURN VALUE - FALSE OK - TRUE Error (generates error too) -*/ - -static bool -event_change_security_context(THD *thd, LEX_STRING user, LEX_STRING host, - LEX_STRING db, Security_context *backup) -{ - DBUG_ENTER("event_change_security_context"); - DBUG_PRINT("info",("%s@%s@%s", user.str, host.str, db.str)); -#ifndef NO_EMBEDDED_ACCESS_CHECKS - - *backup= thd->main_security_ctx; - if (acl_getroot_no_password(&thd->main_security_ctx, user.str, host.str, - host.str, db.str)) - { - my_error(ER_NO_SUCH_USER, MYF(0), user.str, host.str); - DBUG_RETURN(TRUE); - } - thd->security_ctx= &thd->main_security_ctx; -#endif - DBUG_RETURN(FALSE); -} - - -/* - Restores the security context. - - SYNOPSIS - event_restore_security_context() - thd Thread - backup Context to switch to -*/ - -static void -event_restore_security_context(THD *thd, Security_context *backup) -{ - DBUG_ENTER("event_restore_security_context"); -#ifndef NO_EMBEDDED_ACCESS_CHECKS - if (backup) - { - thd->main_security_ctx= *backup; - thd->security_ctx= &thd->main_security_ctx; - } -#endif - DBUG_VOID_RETURN; -} diff --git a/sql/event_data_objects.h b/sql/event_data_objects.h index 4346b0eb5b8..8e03ab19602 100644 --- a/sql/event_data_objects.h +++ b/sql/event_data_objects.h @@ -17,16 +17,12 @@ #define EVEX_GET_FIELD_FAILED -2 -#define EVEX_COMPILE_ERROR -3 -#define EVEX_GENERAL_ERROR -4 #define EVEX_BAD_PARAMS -5 #define EVEX_MICROSECOND_UNSUP -6 - class sp_head; class Sql_alloc; - class Event_queue_element_for_exec { public: @@ -54,19 +50,42 @@ protected: MEM_ROOT mem_root; public: + /* + ENABLED = feature can function normally (is turned on) + SLAVESIDE_DISABLED = feature is turned off on slave + DISABLED = feature is turned off + */ + enum enum_status + { + ENABLED = 1, + DISABLED, + SLAVESIDE_DISABLED + }; + + enum enum_on_completion + { + ON_COMPLETION_DROP = 1, + ON_COMPLETION_PRESERVE + }; + LEX_STRING dbname; LEX_STRING name; LEX_STRING definer;// combination of user and host + Time_zone *time_zone; + Event_basic(); virtual ~Event_basic(); virtual int - load_from_row(TABLE *table) = 0; + load_from_row(THD *thd, TABLE *table) = 0; protected: bool load_string_fields(Field **fields, ...); + + bool + load_time_zone(THD *thd, const LEX_STRING tz_name); }; @@ -78,25 +97,14 @@ protected: bool last_executed_changed; public: - enum enum_status - { - ENABLED = 1, - DISABLED - }; - - enum enum_on_completion - { - ON_COMPLETION_DROP = 1, - ON_COMPLETION_PRESERVE - }; - - enum enum_on_completion on_completion; - enum enum_status status; - TIME last_executed; - - TIME execute_at; - TIME starts; - TIME ends; + int on_completion; + int status; + longlong originator; + + my_time_t last_executed; + my_time_t execute_at; + my_time_t starts; + my_time_t ends; my_bool starts_null; my_bool ends_null; my_bool execute_at_null; @@ -112,7 +120,7 @@ public: virtual ~Event_queue_element(); virtual int - load_from_row(TABLE *table); + load_from_row(THD *thd, TABLE *table); bool compute_next_execution_time(); @@ -122,24 +130,6 @@ public: bool update_timing_fields(THD *thd); - - static void *operator new(size_t size) - { - void *p; - DBUG_ENTER("Event_queue_element::new(size)"); - p= my_malloc(size, MYF(0)); - DBUG_PRINT("info", ("alloc_ptr: 0x%lx", (long) p)); - DBUG_RETURN(p); - } - - static void operator delete(void *ptr, size_t size) - { - DBUG_ENTER("Event_queue_element::delete(ptr,size)"); - DBUG_PRINT("enter", ("free_ptr: 0x%lx", (long) ptr)); - TRASH(ptr, size); - my_free((gptr) ptr, MYF(0)); - DBUG_VOID_RETURN; - } }; @@ -168,7 +158,7 @@ public: init(); virtual int - load_from_row(TABLE *table); + load_from_row(THD *thd, TABLE *table); int get_create_event(THD *thd, String *buf); @@ -178,30 +168,24 @@ public: class Event_job_data : public Event_basic { public: - sp_head *sphead; - LEX_STRING body; LEX_STRING definer_user; LEX_STRING definer_host; ulong sql_mode; - uint execution_count; - Event_job_data(); - virtual ~Event_job_data(); virtual int - load_from_row(TABLE *table); - - int - execute(THD *thd); + load_from_row(THD *thd, TABLE *table); - int - compile(THD *thd, MEM_ROOT *mem_root); + bool + execute(THD *thd, bool drop); private: - int - get_fake_create_event(THD *thd, String *buf); + bool + construct_sp_sql(THD *thd, String *sp_sql); + bool + construct_drop_event_sql(THD *thd, String *sp_sql); Event_job_data(const Event_job_data &); /* Prevent use of these */ void operator=(Event_job_data &); @@ -211,21 +195,17 @@ private: class Event_parse_data : public Sql_alloc { public: - enum enum_status - { - ENABLED = 1, - DISABLED - }; - enum enum_on_completion - { - ON_COMPLETION_DROP = 1, - ON_COMPLETION_PRESERVE - }; - enum enum_on_completion on_completion; - enum enum_status status; + int on_completion; + int status; + longlong originator; + /* + do_not_create will be set if STARTS time is in the past and + on_completion == ON_COMPLETION_DROP. + */ + bool do_not_create; - const uchar *body_begin; + const char *body_begin; LEX_STRING dbname; LEX_STRING name; @@ -237,9 +217,9 @@ public: Item* item_ends; Item* item_execute_at; - TIME starts; - TIME ends; - TIME execute_at; + my_time_t starts; + my_time_t ends; + my_time_t execute_at; my_bool starts_null; my_bool ends_null; my_bool execute_at_null; @@ -284,7 +264,11 @@ private: void report_bad_value(const char *item_name, Item *bad_item); + void + check_if_in_the_past(THD *thd, my_time_t ltime_utc); + Event_parse_data(const Event_parse_data &); /* Prevent use of these */ + void check_originator_id(THD *thd); void operator=(Event_parse_data &); }; diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index 940930ec4c6..e49fd6791a0 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -18,12 +18,6 @@ #include "event_data_objects.h" #include "events.h" #include "sql_show.h" -#include "sp.h" -#include "sp_head.h" - - -static -time_t mysql_event_last_create_time= 0L; static const TABLE_FIELD_W_TYPE event_table_fields[ET_FIELD_COUNT] = @@ -94,7 +88,7 @@ const TABLE_FIELD_W_TYPE event_table_fields[ET_FIELD_COUNT] = }, { { C_STRING_WITH_LEN("status") }, - { C_STRING_WITH_LEN("enum('ENABLED','DISABLED')") }, + { C_STRING_WITH_LEN("enum('ENABLED','DISABLED','SLAVESIDE_DISABLED')") }, {NULL, 0} }, { @@ -118,30 +112,35 @@ const TABLE_FIELD_W_TYPE event_table_fields[ET_FIELD_COUNT] = { C_STRING_WITH_LEN("comment") }, { C_STRING_WITH_LEN("char(64)") }, { C_STRING_WITH_LEN("utf8") } + }, + { + { C_STRING_WITH_LEN("originator") }, + { C_STRING_WITH_LEN("int(10)") }, + {NULL, 0} + }, + { + { C_STRING_WITH_LEN("time_zone") }, + { C_STRING_WITH_LEN("char(64)") }, + { C_STRING_WITH_LEN("latin1") } } }; -/* +/** Puts some data common to CREATE and ALTER EVENT into a row. - SYNOPSIS - mysql_event_fill_row() - thd THD - table The row to fill out - et Event's data - is_update CREATE EVENT or ALTER EVENT + Used both when an event is created and when it is altered. - RETURN VALUE - 0 OK - EVEX_GENERAL_ERROR Bad data - EVEX_GET_FIELD_FAILED Field count does not match. table corrupted? + @param thd THD + @param table The row to fill out + @param et Event's data + @param is_update CREATE EVENT or ALTER EVENT - DESCRIPTION - Used both when an event is created and when it is altered. + @retval FALSE success + @retval TRUE error */ -static int +static bool mysql_event_fill_row(THD *thd, TABLE *table, Event_parse_data *et, my_bool is_update) { @@ -155,6 +154,17 @@ mysql_event_fill_row(THD *thd, TABLE *table, Event_parse_data *et, DBUG_PRINT("info", ("name =[%s]", et->name.str)); DBUG_PRINT("info", ("body =[%s]", et->body.str)); + if (table->s->fields < ET_FIELD_COUNT) + { + /* + Safety: this can only happen if someone started the server + and then altered mysql.event. + */ + my_error(ER_COL_COUNT_DOESNT_MATCH_CORRUPTED, MYF(0), table->alias, + (int) ET_FIELD_COUNT, table->s->fields); + DBUG_RETURN(TRUE); + } + if (fields[f_num= ET_FIELD_DEFINER]-> store(et->definer.str, et->definer.length, scs)) goto err_truncate; @@ -170,10 +180,13 @@ mysql_event_fill_row(THD *thd, TABLE *table, Event_parse_data *et, fields[ET_FIELD_STATUS]->store((longlong)et->status, TRUE); + fields[ET_FIELD_ORIGINATOR]->store((longlong)et->originator, TRUE); + + /* Change the SQL_MODE only if body was present in an ALTER EVENT and of course always during CREATE EVENT. - */ + */ if (et->body.str) { fields[ET_FIELD_SQL_MODE]->store((longlong)thd->variables.sql_mode, TRUE); @@ -183,6 +196,14 @@ mysql_event_fill_row(THD *thd, TABLE *table, Event_parse_data *et, if (et->expression) { + const String *tz_name= thd->variables.time_zone->get_name(); + if (!is_update || !et->starts_null) + { + fields[ET_FIELD_TIME_ZONE]->set_notnull(); + fields[ET_FIELD_TIME_ZONE]->store(tz_name->ptr(), tz_name->length(), + tz_name->charset()); + } + fields[ET_FIELD_INTERVAL_EXPR]->set_notnull(); fields[ET_FIELD_INTERVAL_EXPR]->store((longlong)et->expression, TRUE); @@ -197,26 +218,40 @@ mysql_event_fill_row(THD *thd, TABLE *table, Event_parse_data *et, if (!et->starts_null) { + MYSQL_TIME time; + my_tz_UTC->gmt_sec_to_TIME(&time, et->starts); + fields[ET_FIELD_STARTS]->set_notnull(); - fields[ET_FIELD_STARTS]->store_time(&et->starts, MYSQL_TIMESTAMP_DATETIME); + fields[ET_FIELD_STARTS]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); } if (!et->ends_null) { + MYSQL_TIME time; + my_tz_UTC->gmt_sec_to_TIME(&time, et->ends); + fields[ET_FIELD_ENDS]->set_notnull(); - fields[ET_FIELD_ENDS]->store_time(&et->ends, MYSQL_TIMESTAMP_DATETIME); + fields[ET_FIELD_ENDS]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); } } - else if (et->execute_at.year) + else if (et->execute_at) { + const String *tz_name= thd->variables.time_zone->get_name(); + fields[ET_FIELD_TIME_ZONE]->set_notnull(); + fields[ET_FIELD_TIME_ZONE]->store(tz_name->ptr(), tz_name->length(), + tz_name->charset()); + fields[ET_FIELD_INTERVAL_EXPR]->set_null(); fields[ET_FIELD_TRANSIENT_INTERVAL]->set_null(); fields[ET_FIELD_STARTS]->set_null(); fields[ET_FIELD_ENDS]->set_null(); - + + MYSQL_TIME time; + my_tz_UTC->gmt_sec_to_TIME(&time, et->execute_at); + fields[ET_FIELD_EXECUTE_AT]->set_notnull(); fields[ET_FIELD_EXECUTE_AT]-> - store_time(&et->execute_at, MYSQL_TIMESTAMP_DATETIME); + store_time(&time, MYSQL_TIMESTAMP_DATETIME); } else { @@ -226,7 +261,7 @@ mysql_event_fill_row(THD *thd, TABLE *table, Event_parse_data *et, this is an error if the action is create. something is borked */ } - + ((Field_timestamp *)fields[ET_FIELD_MODIFIED])->set_time(); if (et->comment.str) @@ -236,11 +271,11 @@ mysql_event_fill_row(THD *thd, TABLE *table, Event_parse_data *et, goto err_truncate; } - DBUG_RETURN(0); + DBUG_RETURN(FALSE); err_truncate: my_error(ER_EVENT_DATA_TOO_LONG, MYF(0), fields[f_num]->field_name); - DBUG_RETURN(EVEX_GENERAL_ERROR); + DBUG_RETURN(TRUE); } @@ -275,38 +310,52 @@ Event_db_repository::index_read_for_db_for_i_s(THD *thd, TABLE *schema_table, DBUG_PRINT("info", ("Using prefix scanning on PK")); event_table->file->ha_index_init(0, 1); - event_table->field[ET_FIELD_DB]->store(db, strlen(db), scs); key_info= event_table->key_info; + + if (key_info->key_parts == 0 || + key_info->key_part[0].field != event_table->field[ET_FIELD_DB]) + { + /* Corrupted table: no index or index on a wrong column */ + my_error(ER_CANNOT_LOAD_FROM_TABLE, MYF(0), "event"); + ret= 1; + goto end; + } + + event_table->field[ET_FIELD_DB]->store(db, strlen(db), scs); key_len= key_info->key_part[0].store_length; if (!(key_buf= (byte *)alloc_root(thd->mem_root, key_len))) { - ret= 1; /* Don't send error, it would be done by sql_alloc_error_handler() */ + ret= 1; + goto end; } - else + + key_copy(key_buf, event_table->record[0], key_info, key_len); + if (!(ret= event_table->file->index_read(event_table->record[0], key_buf, + (key_part_map)1, HA_READ_PREFIX))) { - key_copy(key_buf, event_table->record[0], key_info, key_len); - if (!(ret= event_table->file->index_read(event_table->record[0], key_buf, - key_len, HA_READ_PREFIX))) + DBUG_PRINT("info",("Found rows. Let's retrieve them. ret=%d", ret)); + do { - DBUG_PRINT("info",("Found rows. Let's retrieve them. ret=%d", ret)); - do - { - ret= copy_event_to_schema_table(thd, schema_table, event_table); - if (ret == 0) - ret= event_table->file->index_next_same(event_table->record[0], - key_buf, key_len); - } while (ret == 0); - } - DBUG_PRINT("info", ("Scan finished. ret=%d", ret)); + ret= copy_event_to_schema_table(thd, schema_table, event_table); + if (ret == 0) + ret= event_table->file->index_next_same(event_table->record[0], + key_buf, key_len); + } while (ret == 0); } - event_table->file->ha_index_end(); + DBUG_PRINT("info", ("Scan finished. ret=%d", ret)); + /* ret is guaranteed to be != 0 */ if (ret == HA_ERR_END_OF_FILE || ret == HA_ERR_KEY_NOT_FOUND) - DBUG_RETURN(FALSE); + ret= 0; + else + event_table->file->print_error(ret, MYF(0)); - DBUG_RETURN(TRUE); +end: + event_table->file->ha_index_end(); + + DBUG_RETURN(test(ret)); } @@ -354,40 +403,33 @@ Event_db_repository::table_scan_all_for_i_s(THD *thd, TABLE *schema_table, } -/* +/** Fills I_S.EVENTS with data loaded from mysql.event. Also used by SHOW EVENTS - SYNOPSIS - Event_db_repository::fill_schema_events() - thd Thread - tables The schema table - db If not NULL then get events only from this schema + The reason we reset and backup open tables here is that this + function may be called from any query that accesses + INFORMATION_SCHEMA - including a query that is issued from + a pre-locked statement, one that already has open and locked + tables. - RETURN VALUE - FALSE OK - TRUE Error + @retval FALSE success + @retval TRUE error */ -int +bool Event_db_repository::fill_schema_events(THD *thd, TABLE_LIST *tables, const char *db) { TABLE *schema_table= tables->table; TABLE *event_table= NULL; - Open_tables_state backup; int ret= 0; DBUG_ENTER("Event_db_repository::fill_schema_events"); DBUG_PRINT("info",("db=%s", db? db:"(null)")); - thd->reset_n_backup_open_tables_state(&backup); if (open_event_table(thd, TL_READ, &event_table)) - { - sql_print_error("Table mysql.event is damaged."); - thd->restore_backup_open_tables_state(&backup); - DBUG_RETURN(1); - } + DBUG_RETURN(TRUE); /* 1. SELECT I_S => use table scan. I_S.EVENTS does not guarantee order @@ -404,162 +446,100 @@ Event_db_repository::fill_schema_events(THD *thd, TABLE_LIST *tables, ret= table_scan_all_for_i_s(thd, schema_table, event_table); close_thread_tables(thd); - thd->restore_backup_open_tables_state(&backup); DBUG_PRINT("info", ("Return code=%d", ret)); DBUG_RETURN(ret); } -/* - Open mysql.event table for read +/** + Open mysql.event table for read. - SYNOPSIS - Events::open_event_table() - thd [in] Thread context - lock_type [in] How to lock the table - table [out] We will store the open table here + It's assumed that the caller knows what they are doing: + - whether it was necessary to reset-and-backup the open tables state + - whether the requested lock does not lead to a deadlock + - whether this open mode would work under LOCK TABLES, or inside a + stored function or trigger. - RETURN VALUE - 1 Cannot lock table - 2 The table is corrupted - different number of fields - 0 OK + @param[in] thd Thread context + @param[in] lock_type How to lock the table + @param[out] table We will store the open table here + + @retval TRUE open and lock failed - an error message is pushed into the + stack + @retval FALSE success */ -int +bool Event_db_repository::open_event_table(THD *thd, enum thr_lock_type lock_type, TABLE **table) { TABLE_LIST tables; DBUG_ENTER("Event_db_repository::open_event_table"); - bzero((char*) &tables, sizeof(tables)); - tables.db= (char*) "mysql"; - tables.table_name= tables.alias= (char*) "event"; - tables.lock_type= lock_type; + tables.init_one_table("mysql", "event", lock_type); if (simple_open_n_lock_tables(thd, &tables)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); - if (table_check_intact(tables.table, ET_FIELD_COUNT, - event_table_fields, - &mysql_event_last_create_time, - ER_CANNOT_LOAD_FROM_TABLE)) - { - close_thread_tables(thd); - DBUG_RETURN(2); - } *table= tables.table; tables.table->use_all_columns(); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } -/* - Checks parameters which we got from the parsing phase. - - SYNOPSIS - check_parse_params() - thd Thread context - parse_data Event's data - - RETURN VALUE - FALSE OK - TRUE Error (reported) -*/ - -static int -check_parse_params(THD *thd, Event_parse_data *parse_data) -{ - DBUG_ENTER("check_parse_params"); - - if (parse_data->check_parse_data(thd)) - DBUG_RETURN(EVEX_BAD_PARAMS); - - if (!parse_data->dbname.str || - (thd->lex->sql_command == SQLCOM_ALTER_EVENT && thd->lex->spname && - !thd->lex->spname->m_db.str)) - { - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); - DBUG_RETURN(EVEX_BAD_PARAMS); - } - - if (check_access(thd, EVENT_ACL, parse_data->dbname.str, 0, 0, 0, - is_schema_db(parse_data->dbname.str)) || - (thd->lex->sql_command == SQLCOM_ALTER_EVENT && thd->lex->spname && - (check_access(thd, EVENT_ACL, thd->lex->spname->m_db.str, 0, 0, 0, - is_schema_db(thd->lex->spname->m_db.str))))) - DBUG_RETURN(EVEX_BAD_PARAMS); - - DBUG_RETURN(0); -} - +/** + Creates an event record in mysql.event table. -/* - Creates an event in mysql.event + Creates an event. Relies on mysql_event_fill_row which is shared with + ::update_event. - SYNOPSIS - Event_db_repository::create_event() - thd [in] THD - parse_data [in] Object containing info about the event - create_if_not [in] Whether to generate anwarning in case event exists + @pre All semantic checks must be performed outside. This function + only creates a record on disk. + @pre The thread handle has no open tables. - RETURN VALUE - 0 OK - EVEX_GENERAL_ERROR Failure + @param[in,out] THD + @param[in] parse_data Parsed event definition + @param[in] create_if_not TRUE if IF NOT EXISTS clause was provided + to CREATE EVENT statement - DESCRIPTION - Creates an event. Relies on mysql_event_fill_row which is shared with - ::update_event. The name of the event is inside "et". + @retval FALSE success + @retval TRUE error */ bool Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data, my_bool create_if_not) { - int ret= 0; + int ret= 1; TABLE *table= NULL; - char old_db_buf[NAME_LEN+1]; - LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; - bool dbchanged= FALSE; DBUG_ENTER("Event_db_repository::create_event"); - if (check_parse_params(thd, parse_data)) - goto err; - DBUG_PRINT("info", ("open mysql.event for update")); - if (open_event_table(thd, TL_WRITE, &table)) - { - my_error(ER_EVENT_OPEN_TABLE_FAILED, MYF(0)); - goto err; - } + if (open_event_table(thd, TL_WRITE, &table)) + goto end; DBUG_PRINT("info", ("name: %.*s", parse_data->name.length, parse_data->name.str)); DBUG_PRINT("info", ("check existance of an event with the same name")); - if (!find_named_event(thd, parse_data->dbname, parse_data->name, table)) + if (!find_named_event(parse_data->dbname, parse_data->name, table)) { if (create_if_not) { push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_EVENT_ALREADY_EXISTS, ER(ER_EVENT_ALREADY_EXISTS), parse_data->name.str); - goto ok; + ret= 0; } - my_error(ER_EVENT_ALREADY_EXISTS, MYF(0), parse_data->name.str); - goto err; + else + my_error(ER_EVENT_ALREADY_EXISTS, MYF(0), parse_data->name.str); + goto end; } - DBUG_PRINT("info", ("non-existant, go forward")); - - if ((ret= sp_use_new_db(thd, parse_data->dbname, &old_db, 0, &dbchanged))) - { - my_error(ER_BAD_DB_ERROR, MYF(0)); - goto err; - } + DBUG_PRINT("info", ("non-existent, go forward")); restore_record(table, s->default_values); // Get default values for fields @@ -569,7 +549,7 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data, table->field[ET_FIELD_DB]->char_length()) { my_error(ER_TOO_LONG_IDENT, MYF(0), parse_data->dbname.str); - goto err; + goto end; } if (system_charset_info->cset-> @@ -578,20 +558,13 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data, table->field[ET_FIELD_NAME]->char_length()) { my_error(ER_TOO_LONG_IDENT, MYF(0), parse_data->name.str); - goto err; + goto end; } if (parse_data->body.length > table->field[ET_FIELD_BODY]->field_length) { my_error(ER_TOO_LONG_BODY, MYF(0), parse_data->name.str); - goto err; - } - - if (!(parse_data->expression) && !(parse_data->execute_at.year)) - { - DBUG_PRINT("error", ("neither expression nor execute_at are set!")); - my_error(ER_EVENT_NEITHER_M_EXPR_NOR_M_AT, MYF(0)); - goto err; + goto end; } ((Field_timestamp *)table->field[ET_FIELD_CREATED])->set_time(); @@ -600,93 +573,71 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data, mysql_event_fill_row() calls my_error() in case of error so no need to handle it here */ - if ((ret= mysql_event_fill_row(thd, table, parse_data, FALSE))) - goto err; + if (mysql_event_fill_row(thd, table, parse_data, FALSE)) + goto end; - /* Close active transaction only if We are going to modify disk */ - if (end_active_trans(thd)) - goto err; + table->field[ET_FIELD_STATUS]->store((longlong)parse_data->status, TRUE); - if (table->file->ha_write_row(table->record[0])) + if ((ret= table->file->ha_write_row(table->record[0]))) { - my_error(ER_EVENT_STORE_FAILED, MYF(0), parse_data->name.str, ret); - goto err; + table->file->print_error(ret, MYF(0)); + goto end; } + ret= 0; -ok: - if (dbchanged) - (void) mysql_change_db(thd, old_db.str, 1); - /* - This statement may cause a spooky valgrind warning at startup - inside init_key_cache on my system (ahristov, 2006/08/10) - */ - close_thread_tables(thd); - DBUG_RETURN(FALSE); - -err: - if (dbchanged) - (void) mysql_change_db(thd, old_db.str, 1); +end: if (table) close_thread_tables(thd); - DBUG_RETURN(TRUE); + DBUG_RETURN(test(ret)); } -/* +/** Used to execute ALTER EVENT. Pendant to Events::update_event(). - SYNOPSIS - Event_db_repository::update_event() - thd THD - sp_name the name of the event to alter - et event's data + @param[in,out] thd thread handle + @param[in] parse_data parsed event definition + @paran[in[ new_dbname not NULL if ALTER EVENT RENAME + points at a new database name + @param[in] new_name not NULL if ALTER EVENT RENAME + points at a new event name - RETURN VALUE - FALSE OK - TRUE Error (reported) + @pre All semantic checks are performed outside this function, + it only updates the event definition on disk. + @pre We don't have any tables open in the given thread. - NOTES - sp_name is passed since this is the name of the event to - alter in case of RENAME TO. + @retval FALSE success + @retval TRUE error (reported) */ bool Event_db_repository::update_event(THD *thd, Event_parse_data *parse_data, - LEX_STRING *new_dbname, LEX_STRING *new_name) + LEX_STRING *new_dbname, + LEX_STRING *new_name) { CHARSET_INFO *scs= system_charset_info; TABLE *table= NULL; + int ret= 1; DBUG_ENTER("Event_db_repository::update_event"); - if (open_event_table(thd, TL_WRITE, &table)) - { - my_error(ER_EVENT_OPEN_TABLE_FAILED, MYF(0)); - goto err; - } + /* None or both must be set */ + DBUG_ASSERT(new_dbname && new_name || new_dbname == new_name); - if (check_parse_params(thd, parse_data)) - goto err; + if (open_event_table(thd, TL_WRITE, &table)) + goto end; DBUG_PRINT("info", ("dbname: %s", parse_data->dbname.str)); DBUG_PRINT("info", ("name: %s", parse_data->name.str)); DBUG_PRINT("info", ("user: %s", parse_data->definer.str)); - if (new_dbname) - DBUG_PRINT("info", ("rename to: %s@%s", new_dbname->str, new_name->str)); /* first look whether we overwrite */ if (new_name) { - if (!sortcmp_lex_string(parse_data->name, *new_name, scs) && - !sortcmp_lex_string(parse_data->dbname, *new_dbname, scs)) - { - my_error(ER_EVENT_SAME_NAME, MYF(0), parse_data->name.str); - goto err; - } - - if (!find_named_event(thd, *new_dbname, *new_name, table)) + DBUG_PRINT("info", ("rename to: %s@%s", new_dbname->str, new_name->str)); + if (!find_named_event(*new_dbname, *new_name, table)) { my_error(ER_EVENT_ALREADY_EXISTS, MYF(0), new_name->str); - goto err; + goto end; } } /* @@ -695,10 +646,10 @@ Event_db_repository::update_event(THD *thd, Event_parse_data *parse_data, overwrite the key and SE will tell us that it cannot find the already found row (copied into record[1] later */ - if (find_named_event(thd, parse_data->dbname, parse_data->name, table)) + if (find_named_event(parse_data->dbname, parse_data->name, table)) { my_error(ER_EVENT_DOES_NOT_EXIST, MYF(0), parse_data->name.str); - goto err; + goto end; } store_record(table,record[1]); @@ -711,7 +662,7 @@ Event_db_repository::update_event(THD *thd, Event_parse_data *parse_data, handle it here */ if (mysql_event_fill_row(thd, table, parse_data, TRUE)) - goto err; + goto end; if (new_dbname) { @@ -719,42 +670,32 @@ Event_db_repository::update_event(THD *thd, Event_parse_data *parse_data, table->field[ET_FIELD_NAME]->store(new_name->str, new_name->length, scs); } - /* Close active transaction only if We are going to modify disk */ - if (end_active_trans(thd)) - goto err; - - int res; - if ((res= table->file->ha_update_row(table->record[1], table->record[0]))) + if ((ret= table->file->ha_update_row(table->record[1], table->record[0]))) { - my_error(ER_EVENT_STORE_FAILED, MYF(0), parse_data->name.str, res); - goto err; + table->file->print_error(ret, MYF(0)); + goto end; } + ret= 0; - /* close mysql.event or we crash later when loading the event from disk */ - close_thread_tables(thd); - DBUG_RETURN(FALSE); - -err: +end: if (table) close_thread_tables(thd); - DBUG_RETURN(TRUE); + DBUG_RETURN(test(ret)); } -/* - Drops an event +/** + Delete event record from mysql.event table. - SYNOPSIS - Event_db_repository::drop_event() - thd [in] THD - db [in] Database name - name [in] Event's name - drop_if_exists [in] If set and the event not existing => warning - onto the stack + @param[in,out] thd thread handle + @param[in] db Database name + @param[in] name Event name + @param[in] drop_if_exists DROP IF EXISTS clause was specified. + If set, and the event does not exist, + the error is downgraded to a warning. - RETURN VALUE - FALSE OK - TRUE Error (reported) + @retval FALSE success + @retval TRUE error (reported) */ bool @@ -762,66 +703,59 @@ Event_db_repository::drop_event(THD *thd, LEX_STRING db, LEX_STRING name, bool drop_if_exists) { TABLE *table= NULL; - Open_tables_state backup; - int ret; + int ret= 1; DBUG_ENTER("Event_db_repository::drop_event"); DBUG_PRINT("enter", ("%s@%s", db.str, name.str)); - thd->reset_n_backup_open_tables_state(&backup); - if ((ret= open_event_table(thd, TL_WRITE, &table))) - { - my_error(ER_EVENT_OPEN_TABLE_FAILED, MYF(0)); - goto done; - } + if (open_event_table(thd, TL_WRITE, &table)) + goto end; - if (!(ret= find_named_event(thd, db, name, table))) + if (!find_named_event(db, name, table)) { - /* Close active transaction only if we are actually going to modify disk */ - if (!(ret= end_active_trans(thd)) && - (ret= table->file->ha_delete_row(table->record[0]))) - my_error(ER_EVENT_CANNOT_DELETE, MYF(0)); + if ((ret= table->file->ha_delete_row(table->record[0]))) + table->file->print_error(ret, MYF(0)); + goto end; } - else + + /* Event not found */ + if (!drop_if_exists) { - if (drop_if_exists) - { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, - ER_SP_DOES_NOT_EXIST, ER(ER_SP_DOES_NOT_EXIST), - "Event", name.str); - ret= 0; - } else - my_error(ER_EVENT_DOES_NOT_EXIST, MYF(0), name.str); + my_error(ER_EVENT_DOES_NOT_EXIST, MYF(0), name.str); + goto end; } -done: + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_SP_DOES_NOT_EXIST, ER(ER_SP_DOES_NOT_EXIST), + "Event", name.str); + ret= 0; + +end: if (table) close_thread_tables(thd); - thd->restore_backup_open_tables_state(&backup); - DBUG_RETURN(ret); + DBUG_RETURN(test(ret)); } -/* +/** Positions the internal pointer of `table` to the place where (db, name) is stored. - SYNOPSIS - Event_db_repository::find_named_event() - thd Thread - db Schema - name Event name - table Opened mysql.event + In case search succeeded, the table cursor points at the found row. - RETURN VALUE - FALSE OK - TRUE No such event + @param[in] db database name + @param[in] name event name + @param[in,out] table mysql.event table + + + @retval FALSE an event with such db/name key exists + @reval TRUE no record found or an error occured. */ bool -Event_db_repository::find_named_event(THD *thd, LEX_STRING db, LEX_STRING name, - TABLE *table) +Event_db_repository::find_named_event(LEX_STRING db, LEX_STRING name, + TABLE *table) { byte key[MAX_KEY_LENGTH]; DBUG_ENTER("Event_db_repository::find_named_event"); @@ -843,8 +777,7 @@ Event_db_repository::find_named_event(THD *thd, LEX_STRING db, LEX_STRING name, key_copy(key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read_idx(table->record[0], 0, key, - table->key_info->key_length, + if (table->file->index_read_idx(table->record[0], 0, key, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { DBUG_PRINT("info", ("Row not found")); @@ -874,39 +807,30 @@ Event_db_repository::drop_schema_events(THD *thd, LEX_STRING schema) } -/* - Drops all events by field which has specific value of the field +/** + Drops all events which have a specific value of a field. - SYNOPSIS - Event_db_repository::drop_events_by_field() - thd Thread - table mysql.event TABLE - field Which field of the row to use for matching - field_value The value that should match + @pre The thread handle has no open tables. + + @param[in,out] thd Thread + @param[in,out] table mysql.event TABLE + @param[in] field Which field of the row to use for matching + @param[in] field_value The value that should match */ void -Event_db_repository::drop_events_by_field(THD *thd, +Event_db_repository::drop_events_by_field(THD *thd, enum enum_events_table_field field, LEX_STRING field_value) { int ret= 0; TABLE *table= NULL; READ_RECORD read_record_info; - DBUG_ENTER("Event_db_repository::drop_events_by_field"); + DBUG_ENTER("Event_db_repository::drop_events_by_field"); DBUG_PRINT("enter", ("field=%d field_value=%s", field, field_value.str)); if (open_event_table(thd, TL_WRITE, &table)) - { - /* - Currently being used only for DROP DATABASE - In this case we don't need - error message since the OK packet has been sent. But for DROP USER we - could need it. - - my_error(ER_EVENT_OPEN_TABLE_FAILED, MYF(0)); - */ DBUG_VOID_RETURN; - } /* only enabled events are in memory, so we go now and delete the rest */ init_read_record(&read_record_info, thd, table, NULL, 1, 0); @@ -914,14 +838,20 @@ Event_db_repository::drop_events_by_field(THD *thd, { char *et_field= get_field(thd->mem_root, table->field[field]); - LEX_STRING et_field_lex= { et_field, strlen(et_field) }; - DBUG_PRINT("info", ("Current event %s name=%s", et_field, - get_field(thd->mem_root, table->field[ET_FIELD_NAME]))); - - if (!sortcmp_lex_string(et_field_lex, field_value, system_charset_info)) + /* et_field may be NULL if the table is corrupted or out of memory */ + if (et_field) { - DBUG_PRINT("info", ("Dropping")); - ret= table->file->ha_delete_row(table->record[0]); + LEX_STRING et_field_lex= { et_field, strlen(et_field) }; + DBUG_PRINT("info", ("Current event %s name=%s", et_field, + get_field(thd->mem_root, + table->field[ET_FIELD_NAME]))); + + if (!sortcmp_lex_string(et_field_lex, field_value, system_charset_info)) + { + DBUG_PRINT("info", ("Dropping")); + if ((ret= table->file->ha_delete_row(table->record[0]))) + table->file->print_error(ret, MYF(0)); + } } } end_read_record(&read_record_info); @@ -931,20 +861,14 @@ Event_db_repository::drop_events_by_field(THD *thd, } -/* +/** Looks for a named event in mysql.event and then loads it from - the table, compiles and inserts it into the cache. + the table. - SYNOPSIS - Event_db_repository::load_named_event() - thd [in] Thread context - dbname [in] Event's db name - name [in] Event's name - etn [out] The loaded event + @pre The given thread does not have open tables. - RETURN VALUE - FALSE OK - TRUE Error (reported) + @retval FALSE success + @retval TRUE error */ bool @@ -952,26 +876,176 @@ Event_db_repository::load_named_event(THD *thd, LEX_STRING dbname, LEX_STRING name, Event_basic *etn) { TABLE *table= NULL; - int ret= 0; - Open_tables_state backup; + bool ret; DBUG_ENTER("Event_db_repository::load_named_event"); DBUG_PRINT("enter",("thd: 0x%lx name: %*s", (long) thd, name.length, name.str)); - thd->reset_n_backup_open_tables_state(&backup); + if (!(ret= open_event_table(thd, TL_READ, &table))) + { + if ((ret= find_named_event(dbname, name, table))) + my_error(ER_EVENT_DOES_NOT_EXIST, MYF(0), name.str); + else if ((ret= etn->load_from_row(thd, table))) + my_error(ER_CANNOT_LOAD_FROM_TABLE, MYF(0), "event"); - if ((ret= open_event_table(thd, TL_READ, &table))) - my_error(ER_EVENT_OPEN_TABLE_FAILED, MYF(0)); - else if ((ret= find_named_event(thd, dbname, name, table))) - my_error(ER_EVENT_DOES_NOT_EXIST, MYF(0), name.str); - else if ((ret= etn->load_from_row(table))) - my_error(ER_CANNOT_LOAD_FROM_TABLE, MYF(0), "event"); + close_thread_tables(thd); + } + + DBUG_RETURN(ret); +} + + +/** + Update the event record in mysql.event table with a changed status + and/or last execution time. + + @pre The thread handle does not have open tables. +*/ + +bool +Event_db_repository:: +update_timing_fields_for_event(THD *thd, + LEX_STRING event_db_name, + LEX_STRING event_name, + bool update_last_executed, + my_time_t last_executed, + bool update_status, + ulonglong status) +{ + TABLE *table= NULL; + Field **fields; + int ret= 1; + + DBUG_ENTER("Event_db_repository::update_timing_fields_for_event"); + + /* + Turn off row binlogging of event timing updates. These are not used + for RBR of events replicated to the slave. + */ + if (thd->current_stmt_binlog_row_based) + thd->clear_current_stmt_binlog_row_based(); + + if (open_event_table(thd, TL_WRITE, &table)) + goto end; + + fields= table->field; + + if (find_named_event(event_db_name, event_name, table)) + goto end; + + store_record(table, record[1]); + /* Don't update create on row update. */ + table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; + + if (update_last_executed) + { + MYSQL_TIME time; + my_tz_UTC->gmt_sec_to_TIME(&time, last_executed); + + fields[ET_FIELD_LAST_EXECUTED]->set_notnull(); + fields[ET_FIELD_LAST_EXECUTED]->store_time(&time, + MYSQL_TIMESTAMP_DATETIME); + } + if (update_status) + { + fields[ET_FIELD_STATUS]->set_notnull(); + fields[ET_FIELD_STATUS]->store(status, TRUE); + } + + if ((ret= table->file->ha_update_row(table->record[1], table->record[0]))) + { + table->file->print_error(ret, MYF(0)); + goto end; + } + + ret= 0; + +end: if (table) close_thread_tables(thd); - thd->restore_backup_open_tables_state(&backup); - /* In this case no memory was allocated so we don't need to clean */ + DBUG_RETURN(test(ret)); +} - DBUG_RETURN(ret); + +/** + Open mysql.db, mysql.user and mysql.event and check whether: + - mysql.db exists and is up to date (or from a newer version of MySQL), + - mysql.user has column Event_priv at an expected position, + - mysql.event exists and is up to date (or from a newer version of + MySQL) + + This function is called only when the server is started. + @pre The passed in thread handle has no open tables. + + @retval FALSE OK + @retval TRUE Error, an error message is output to the error log. +*/ + +bool +Event_db_repository::check_system_tables(THD *thd) +{ + TABLE_LIST tables; + int ret= FALSE; + const unsigned int event_priv_column_position= 29; + + DBUG_ENTER("Event_db_repository::check_system_tables"); + DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd)); + + + /* Check mysql.db */ + tables.init_one_table("mysql", "db", TL_READ); + + if (simple_open_n_lock_tables(thd, &tables)) + { + ret= 1; + sql_print_error("Cannot open mysql.db"); + } + else + { + if (table_check_intact(tables.table, MYSQL_DB_FIELD_COUNT, + mysql_db_table_fields)) + ret= 1; + /* in case of an error, the message is printed inside table_check_intact */ + + close_thread_tables(thd); + } + /* Check mysql.user */ + tables.init_one_table("mysql", "user", TL_READ); + + if (simple_open_n_lock_tables(thd, &tables)) + { + ret= 1; + sql_print_error("Cannot open mysql.user"); + } + else + { + if (tables.table->s->fields < event_priv_column_position || + strncmp(tables.table->field[event_priv_column_position]->field_name, + STRING_WITH_LEN("Event_priv"))) + { + sql_print_error("mysql.user has no `Event_priv` column at position %d", + event_priv_column_position); + ret= 1; + } + close_thread_tables(thd); + } + /* Check mysql.event */ + tables.init_one_table("mysql", "event", TL_READ); + + if (simple_open_n_lock_tables(thd, &tables)) + { + ret= 1; + sql_print_error("Cannot open mysql.event"); + } + else + { + if (table_check_intact(tables.table, ET_FIELD_COUNT, event_table_fields)) + ret= 1; + /* in case of an error, the message is printed inside table_check_intact */ + close_thread_tables(thd); + } + + DBUG_RETURN(test(ret)); } diff --git a/sql/event_db_repository.h b/sql/event_db_repository.h index 1457fb64e2e..64e19854933 100644 --- a/sql/event_db_repository.h +++ b/sql/event_db_repository.h @@ -15,11 +15,16 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#define EVEX_OPEN_TABLE_FAILED -1 +/* + @file + This is a private header file of Events module. Please do not include it + directly. All public declarations of Events module should be stored in + events.h and event_data_objects.h. +*/ enum enum_events_table_field { - ET_FIELD_DB = 0, + ET_FIELD_DB = 0, ET_FIELD_NAME, ET_FIELD_BODY, ET_FIELD_DEFINER, @@ -35,6 +40,8 @@ enum enum_events_table_field ET_FIELD_ON_COMPLETION, ET_FIELD_SQL_MODE, ET_FIELD_COMMENT, + ET_FIELD_ORIGINATOR, + ET_FIELD_TIME_ZONE, ET_FIELD_COUNT /* a cool trick to count the number of fields :) */ }; @@ -61,24 +68,35 @@ public: update_event(THD *thd, Event_parse_data *parse_data, LEX_STRING *new_dbname, LEX_STRING *new_name); - bool + bool drop_event(THD *thd, LEX_STRING db, LEX_STRING name, bool drop_if_exists); void drop_schema_events(THD *thd, LEX_STRING schema); bool - find_named_event(THD *thd, LEX_STRING db, LEX_STRING name, TABLE *table); + find_named_event(LEX_STRING db, LEX_STRING name, TABLE *table); bool load_named_event(THD *thd, LEX_STRING dbname, LEX_STRING name, Event_basic *et); - int + bool open_event_table(THD *thd, enum thr_lock_type lock_type, TABLE **table); - int + bool fill_schema_events(THD *thd, TABLE_LIST *tables, const char *db); + bool + update_timing_fields_for_event(THD *thd, + LEX_STRING event_db_name, + LEX_STRING event_name, + bool update_last_executed, + my_time_t last_executed, + bool update_status, + ulonglong status); +public: + static bool + check_system_tables(THD *thd); private: void drop_events_by_field(THD *thd, enum enum_events_table_field field, @@ -90,12 +108,10 @@ private: bool table_scan_all_for_i_s(THD *thd, TABLE *schema_table, TABLE *event_table); - static bool - check_system_tables(THD *thd); - +private: /* Prevent use of these */ Event_db_repository(const Event_db_repository &); void operator=(Event_db_repository &); }; - + #endif /* _EVENT_DB_REPOSITORY_H_ */ diff --git a/sql/event_queue.cc b/sql/event_queue.cc index fb1653d6d8f..a7c31429cb3 100644 --- a/sql/event_queue.cc +++ b/sql/event_queue.cc @@ -32,16 +32,6 @@ #define LOCK_QUEUE_DATA() lock_data(SCHED_FUNC, __LINE__) #define UNLOCK_QUEUE_DATA() unlock_data(SCHED_FUNC, __LINE__) -struct event_queue_param -{ - THD *thd; - Event_queue *queue; - pthread_mutex_t LOCK_loaded; - pthread_cond_t COND_loaded; - bool loading_finished; -}; - - /* Compares the execute_at members of two Event_queue_element instances. Used as callback for the prioritized queue when shifting @@ -62,11 +52,13 @@ struct event_queue_param execute_at.second_part is not considered during comparison */ -static int +static int event_queue_element_compare_q(void *vptr, byte* a, byte *b) { - return my_time_compare(&((Event_queue_element *)a)->execute_at, - &((Event_queue_element *)b)->execute_at); + my_time_t lhs = ((Event_queue_element *)a)->execute_at; + my_time_t rhs = ((Event_queue_element *)b)->execute_at; + + return (lhs < rhs ? -1 : (lhs > rhs ? 1 : 0)); } @@ -80,39 +72,21 @@ event_queue_element_compare_q(void *vptr, byte* a, byte *b) Event_queue::Event_queue() :mutex_last_unlocked_at_line(0), mutex_last_locked_at_line(0), mutex_last_attempted_lock_at_line(0), - mutex_queue_data_locked(FALSE), mutex_queue_data_attempting_lock(FALSE) + mutex_queue_data_locked(FALSE), + mutex_queue_data_attempting_lock(FALSE), + next_activation_at(0) { mutex_last_unlocked_in_func= mutex_last_locked_in_func= mutex_last_attempted_lock_in_func= ""; - set_zero_time(&next_activation_at, MYSQL_TIMESTAMP_DATETIME); -} - - -/* - Inits mutexes. - - SYNOPSIS - Event_queue::init_mutexes() -*/ -void -Event_queue::init_mutexes() -{ pthread_mutex_init(&LOCK_event_queue, MY_MUTEX_INIT_FAST); pthread_cond_init(&COND_queue_state, NULL); } -/* - Destroys mutexes. - - SYNOPSIS - Event_queue::deinit_mutexes() -*/ - -void -Event_queue::deinit_mutexes() +Event_queue::~Event_queue() { + deinit_queue(); pthread_mutex_destroy(&LOCK_event_queue); pthread_cond_destroy(&COND_queue_state); } @@ -146,7 +120,7 @@ Event_queue::init_queue(THD *thd) 0 /*max_on_top*/, event_queue_element_compare_q, NULL, EVENT_QUEUE_EXTENT)) { - sql_print_error("SCHEDULER: Can't initialize the execution queue"); + sql_print_error("Event Scheduler: Can't initialize the execution queue"); goto err; } @@ -181,36 +155,50 @@ Event_queue::deinit_queue() } -/* +/** Adds an event to the queue. - SYNOPSIS - Event_queue::create_event() - dbname The schema of the new event - name The name of the new event + Compute the next execution time for an event, and if it is still + active, add it to the queue. Otherwise delete it. + The object is left intact in case of an error. Otherwise + the queue container assumes ownership of it. + + @param[in] thd thread handle + @param[in] new_element a new element to add to the queue + @param[out] created set to TRUE if no error and the element is + added to the queue, FALSE otherwise + + @retval TRUE an error occured. The value of created is undefined, + the element was not deleted. + @retval FALSE success */ -void -Event_queue::create_event(THD *thd, Event_queue_element *new_element) +bool +Event_queue::create_event(THD *thd, Event_queue_element *new_element, + bool *created) { DBUG_ENTER("Event_queue::create_event"); DBUG_PRINT("enter", ("thd: 0x%lx et=%s.%s", (long) thd, new_element->dbname.str, new_element->name.str)); - if (new_element->status == Event_queue_element::DISABLED) - delete new_element; - else + /* Will do nothing if the event is disabled */ + new_element->compute_next_execution_time(); + if (new_element->status != Event_queue_element::ENABLED) { - new_element->compute_next_execution_time(); - DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element)); - - LOCK_QUEUE_DATA(); - queue_insert_safe(&queue, (byte *) new_element); - dbug_dump_queue(thd->query_start()); - pthread_cond_broadcast(&COND_queue_state); - UNLOCK_QUEUE_DATA(); + delete new_element; + *created= FALSE; + DBUG_RETURN(FALSE); } - DBUG_VOID_RETURN; + + DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element)); + + LOCK_QUEUE_DATA(); + *created= (queue_insert_safe(&queue, (byte *) new_element) == FALSE); + dbug_dump_queue(thd->query_start()); + pthread_cond_broadcast(&COND_queue_state); + UNLOCK_QUEUE_DATA(); + + DBUG_RETURN(!*created); } @@ -233,7 +221,8 @@ Event_queue::update_event(THD *thd, LEX_STRING dbname, LEX_STRING name, DBUG_ENTER("Event_queue::update_event"); DBUG_PRINT("enter", ("thd: 0x%lx et=[%s.%s]", (long) thd, dbname.str, name.str)); - if (new_element->status == Event_queue_element::DISABLED) + if ((new_element->status == Event_queue_element::DISABLED) || + (new_element->status == Event_queue_element::SLAVESIDE_DISABLED)) { DBUG_PRINT("info", ("The event is disabled.")); /* @@ -254,7 +243,7 @@ Event_queue::update_event(THD *thd, LEX_STRING dbname, LEX_STRING name, { DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element)); queue_insert_safe(&queue, (byte *) new_element); - pthread_cond_broadcast(&COND_queue_state); + pthread_cond_broadcast(&COND_queue_state); } dbug_dump_queue(thd->query_start()); @@ -285,7 +274,7 @@ Event_queue::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name) find_n_remove_event(dbname, name); dbug_dump_queue(thd->query_start()); UNLOCK_QUEUE_DATA(); - + /* We don't signal here because the scheduler will catch the change next time it wakes up. @@ -307,7 +296,7 @@ Event_queue::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name) RETURN VALUE >=0 Number of dropped events - + NOTE Expected is the caller to acquire lock on LOCK_event_queue */ @@ -339,7 +328,7 @@ Event_queue::drop_matching_events(THD *thd, LEX_STRING pattern, i++; } /* - We don't call pthread_cond_broadcast(&COND_queue_state); + We don't call pthread_cond_broadcast(&COND_queue_state); If we remove the top event: 1. The queue is empty. The scheduler will wake up at some time and realize that the queue is empty. If create_event() comes inbetween @@ -461,7 +450,8 @@ Event_queue::empty_queue() uint i; DBUG_ENTER("Event_queue::empty_queue"); DBUG_PRINT("enter", ("Purging the queue. %u element(s)", queue.elements)); - sql_print_information("SCHEDULER: Purging queue. %u events", queue.elements); + sql_print_information("Event Scheduler: Purging the queue. %u events", + queue.elements); /* empty the queue */ for (i= 0; i < queue.elements; ++i) { @@ -497,15 +487,11 @@ Event_queue::dbug_dump_queue(time_t now) DBUG_PRINT("info", ("exec_at: %lu starts: %lu ends: %lu execs_so_far: %u " "expr: %ld et.exec_at: %ld now: %ld " "(et.exec_at - now): %d if: %d", - (long) TIME_to_ulonglong_datetime(&et->execute_at), - (long) TIME_to_ulonglong_datetime(&et->starts), - (long) TIME_to_ulonglong_datetime(&et->ends), - et->execution_count, - (long) et->expression, - (long) (sec_since_epoch_TIME(&et->execute_at)), - (long) now, - (int) (sec_since_epoch_TIME(&et->execute_at) - now), - sec_since_epoch_TIME(&et->execute_at) <= now)); + (long) et->execute_at, (long) et->starts, + (long) et->ends, et->execution_count, + (long) et->expression, (long) et->execute_at, + (long) now, (int) (et->execute_at - now), + et->execute_at <= now)); } DBUG_VOID_RETURN; #endif @@ -534,7 +520,6 @@ Event_queue::get_top_for_execution_if_time(THD *thd, Event_queue_element_for_exec **event_name) { bool ret= FALSE; - struct timespec top_time; *event_name= NULL; DBUG_ENTER("Event_queue::get_top_for_execution_if_time"); @@ -553,7 +538,7 @@ Event_queue::get_top_for_execution_if_time(THD *thd, if (!queue.elements) { /* There are no events in the queue */ - set_zero_time(&next_activation_at, MYSQL_TIMESTAMP_DATETIME); + next_activation_at= 0; /* Wait on condition until signaled. Release LOCK_queue while waiting. */ cond_wait(thd, NULL, queue_empty_msg, SCHED_FUNC, __LINE__); @@ -565,16 +550,15 @@ Event_queue::get_top_for_execution_if_time(THD *thd, thd->end_time(); /* Get current time */ - time_t seconds_to_next_event= - sec_since_epoch_TIME(&top->execute_at) - thd->query_start(); next_activation_at= top->execute_at; - if (seconds_to_next_event > 0) + if (next_activation_at > thd->query_start()) { /* Not yet time for top event, wait on condition with time or until signaled. Release LOCK_queue while waiting. */ - set_timespec(top_time, seconds_to_next_event); + struct timespec top_time; + set_timespec(top_time, next_activation_at - thd->query_start()); cond_wait(thd, &top_time, queue_wait_msg, SCHED_FUNC, __LINE__); continue; @@ -600,7 +584,7 @@ Event_queue::get_top_for_execution_if_time(THD *thd, if (top->status == Event_queue_element::DISABLED) { DBUG_PRINT("info", ("removing from the queue")); - sql_print_information("SCHEDULER: Last execution of %s.%s. %s", + sql_print_information("Event Scheduler: Last execution of %s.%s. %s", top->dbname.str, top->name.str, top->dropped? "Dropping.":""); delete top; @@ -752,10 +736,11 @@ Event_queue::dump_internal_status() printf("Last lock attempt at: %s:%u\n", mutex_last_attempted_lock_in_func, mutex_last_attempted_lock_at_line); printf("WOC : %s\n", waiting_on_cond? "YES":"NO"); + + MYSQL_TIME time; + my_tz_UTC->gmt_sec_to_TIME(&time, next_activation_at); printf("Next activation : %04d-%02d-%02d %02d:%02d:%02d\n", - next_activation_at.year, next_activation_at.month, - next_activation_at.day, next_activation_at.hour, - next_activation_at.minute, next_activation_at.second); + time.year, time.month, time.day, time.hour, time.minute, time.second); DBUG_VOID_RETURN; } diff --git a/sql/event_queue.h b/sql/event_queue.h index a1237e1b52c..04bb8b93b06 100644 --- a/sql/event_queue.h +++ b/sql/event_queue.h @@ -25,23 +25,16 @@ class Event_queue { public: Event_queue(); - - void - init_mutexes(); - - void - deinit_mutexes(); + ~Event_queue(); bool init_queue(THD *thd); - - void - deinit_queue(); /* Methods for queue management follow */ - void - create_event(THD *thd, Event_queue_element *new_element); + bool + create_event(THD *thd, Event_queue_element *new_element, + bool *created); void update_event(THD *thd, LEX_STRING dbname, LEX_STRING name, @@ -64,9 +57,23 @@ public: void dump_internal_status(); +private: void empty_queue(); -protected: + + void + deinit_queue(); + /* helper functions for working with mutexes & conditionals */ + void + lock_data(const char *func, uint line); + + void + unlock_data(const char *func, uint line); + + void + cond_wait(THD *thd, struct timespec *abstime, const char* msg, + const char *func, uint line); + void find_n_remove_event(LEX_STRING db, LEX_STRING name); @@ -86,7 +93,7 @@ protected: /* The sorted queue with the Event_queue_element objects */ QUEUE queue; - TIME next_activation_at; + my_time_t next_activation_at; uint mutex_last_locked_at_line; uint mutex_last_unlocked_at_line; @@ -98,16 +105,6 @@ protected: bool mutex_queue_data_attempting_lock; bool waiting_on_cond; - /* helper functions for working with mutexes & conditionals */ - void - lock_data(const char *func, uint line); - - void - unlock_data(const char *func, uint line); - - void - cond_wait(THD *thd, struct timespec *abstime, const char* msg, - const char *func, uint line); }; #endif /* _EVENT_QUEUE_H_ */ diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc index 64bba756be9..5844fecc227 100644 --- a/sql/event_scheduler.cc +++ b/sql/event_scheduler.cc @@ -37,7 +37,6 @@ extern pthread_attr_t connection_attrib; Event_db_repository *Event_worker_thread::db_repository; -Events *Event_worker_thread::events_facade; static @@ -78,13 +77,13 @@ Event_worker_thread::print_warnings(THD *thd, Event_job_data *et) char prefix_buf[5 * STRING_BUFFER_USUAL_SIZE]; String prefix(prefix_buf, sizeof(prefix_buf), system_charset_info); prefix.length(0); - prefix.append("SCHEDULER: ["); + prefix.append("Event Scheduler: ["); - append_identifier(thd, &prefix, et->definer.str, et->definer.length); + prefix.append(et->definer.str, et->definer.length, system_charset_info); prefix.append("][", 2); - append_identifier(thd,&prefix, et->dbname.str, et->dbname.length); + prefix.append(et->dbname.str, et->dbname.length, system_charset_info); prefix.append('.'); - append_identifier(thd,&prefix, et->name.str, et->name.length); + prefix.append(et->name.str, et->name.length, system_charset_info); prefix.append("] ", 2); List_iterator_fast<MYSQL_ERROR> it(thd->warn_list); @@ -95,7 +94,6 @@ Event_worker_thread::print_warnings(THD *thd, Event_job_data *et) err_msg.length(0); err_msg.append(prefix); err_msg.append(err->msg, strlen(err->msg), system_charset_info); - err_msg.append("]"); DBUG_ASSERT(err->level < 3); (sql_print_message_handlers[err->level])("%*s", err_msg.length(), err_msg.c_ptr()); @@ -156,8 +154,6 @@ deinit_event_thread(THD *thd) thread_running--; delete thd; pthread_mutex_unlock(&LOCK_thread_count); - - my_thread_end(); } @@ -233,14 +229,13 @@ event_scheduler_thread(void *arg) if (!res) scheduler->run(thd); - deinit_event_thread(thd); - pthread_exit(0); + my_thread_end(); DBUG_RETURN(0); // Against gcc warnings } -/* - Function that executes an event in a child thread. Setups the +/** + Function that executes an event in a child thread. Setups the environment for the event execution and cleans after that. SYNOPSIS @@ -254,7 +249,7 @@ event_scheduler_thread(void *arg) pthread_handler_t event_worker_thread(void *arg) { - THD *thd; + THD *thd; Event_queue_element_for_exec *event= (Event_queue_element_for_exec *)arg; thd= event->thd; @@ -262,12 +257,13 @@ event_worker_thread(void *arg) Event_worker_thread worker_thread; worker_thread.run(thd, event); + my_thread_end(); return 0; // Can't return anything here } -/* - Function that executes an event in a child thread. Setups the +/** + Function that executes an event in a child thread. Setups the environment for the event execution and cleans after that. SYNOPSIS @@ -281,8 +277,7 @@ Event_worker_thread::run(THD *thd, Event_queue_element_for_exec *event) { /* needs to be first for thread_stack */ char my_stack; - int ret; - Event_job_data *job_data= NULL; + Event_job_data job_data; bool res; thd->thread_stack= &my_stack; // remember where our stack is @@ -295,117 +290,60 @@ Event_worker_thread::run(THD *thd, Event_queue_element_for_exec *event) if (res) goto end; - if (!(job_data= new Event_job_data())) - goto end; - else if ((ret= db_repository-> - load_named_event(thd, event->dbname, event->name, job_data))) + if ((res= db_repository->load_named_event(thd, event->dbname, event->name, + &job_data))) { - DBUG_PRINT("error", ("Got %d from load_named_event", ret)); + DBUG_PRINT("error", ("Got error from load_named_event")); goto end; } - sql_print_information("SCHEDULER: [%s.%s of %s] executing in thread %lu. ", - job_data->dbname.str, job_data->name.str, - job_data->definer.str, thd->thread_id); + sql_print_information("Event Scheduler: " + "[%s].[%s.%s] started in thread %lu.", + job_data.definer.str, + job_data.dbname.str, job_data.name.str, + thd->thread_id); thd->enable_slow_log= TRUE; - ret= job_data->execute(thd); + res= job_data.execute(thd, event->dropped); - print_warnings(thd, job_data); + print_warnings(thd, &job_data); - sql_print_information("SCHEDULER: [%s.%s of %s] executed in thread %lu. " - "RetCode=%d", job_data->dbname.str, job_data->name.str, - job_data->definer.str, thd->thread_id, ret); - if (ret == EVEX_COMPILE_ERROR) - sql_print_information("SCHEDULER: COMPILE ERROR for event %s.%s of %s", - job_data->dbname.str, job_data->name.str, - job_data->definer.str); - else if (ret == EVEX_MICROSECOND_UNSUP) - sql_print_information("SCHEDULER: MICROSECOND is not supported"); + if (res) + sql_print_information("Event Scheduler: " + "[%s].[%s.%s] event execution failed.", + job_data.definer.str, + job_data.dbname.str, job_data.name.str); + else + sql_print_information("Event Scheduler: " + "[%s].[%s.%s] executed successfully in thread %lu.", + job_data.definer.str, + job_data.dbname.str, job_data.name.str, + thd->thread_id); end: - delete job_data; - - if (event->dropped) - { - sql_print_information("SCHEDULER: Dropping %s.%s", event->dbname.str, - event->name.str); - /* - Using db_repository can lead to a race condition because we access - the table without holding LOCK_metadata. - Scenario: - 1. CREATE EVENT xyz AT ... (conn thread) - 2. execute xyz (worker) - 3. CREATE EVENT XYZ EVERY ... (conn thread) - 4. drop xyz (worker) - 5. XYZ was just created on disk but `drop xyz` of the worker dropped it. - A consequent load to create Event_queue_element will fail. - - If all operations are performed under LOCK_metadata there is no such - problem. However, this comes at the price of introduction bi-directional - association between class Events and class Event_worker_thread. - */ - events_facade->drop_event(thd, event->dbname, event->name, FALSE); - } DBUG_PRINT("info", ("Done with Event %s.%s", event->dbname.str, event->name.str)); delete event; deinit_event_thread(thd); - pthread_exit(0); } -/* - Performs initialization of the scheduler data, outside of the - threading primitives. - - SYNOPSIS - Event_scheduler::init_scheduler() -*/ - -void -Event_scheduler::init_scheduler(Event_queue *q) -{ - LOCK_DATA(); - queue= q; - started_events= 0; - scheduler_thd= NULL; - state= INITIALIZED; - UNLOCK_DATA(); -} - - -void -Event_scheduler::deinit_scheduler() {} - - -/* - Inits scheduler's threading primitives. - - SYNOPSIS - Event_scheduler::init_mutexes() -*/ - -void -Event_scheduler::init_mutexes() +Event_scheduler::Event_scheduler(Event_queue *queue_arg) + :state(UNINITIALIZED), + scheduler_thd(NULL), + queue(queue_arg), + started_events(0) { pthread_mutex_init(&LOCK_scheduler_state, MY_MUTEX_INIT_FAST); pthread_cond_init(&COND_state, NULL); } -/* - Deinits scheduler's threading primitives. - - SYNOPSIS - Event_scheduler::deinit_mutexes() -*/ - -void -Event_scheduler::deinit_mutexes() +Event_scheduler::~Event_scheduler() { + stop(); /* does nothing if not running */ pthread_mutex_destroy(&LOCK_scheduler_state); pthread_cond_destroy(&COND_state); } @@ -442,7 +380,7 @@ Event_scheduler::start() if (!(new_thd= new THD)) { - sql_print_error("SCHEDULER: Cannot init manager event thread"); + sql_print_error("Event Scheduler: Cannot initialize the scheduler thread"); ret= TRUE; goto end; } @@ -458,7 +396,7 @@ Event_scheduler::start() scheduler_thd= new_thd; DBUG_PRINT("info", ("Setting state go RUNNING")); state= RUNNING; - DBUG_PRINT("info", ("Forking new thread for scheduduler. THD: 0x%lx", (long) new_thd)); + DBUG_PRINT("info", ("Forking new thread for scheduler. THD: 0x%lx", (long) new_thd)); if (pthread_create(&th, &connection_attrib, event_scheduler_thread, (void*)scheduler_param_value)) { @@ -501,7 +439,7 @@ Event_scheduler::run(THD *thd) int res= FALSE; DBUG_ENTER("Event_scheduler::run"); - sql_print_information("SCHEDULER: Manager thread started with id %lu", + sql_print_information("Event Scheduler: scheduler thread started with id %lu", thd->thread_id); /* Recalculate the values in the queue because there could have been stops @@ -516,7 +454,8 @@ Event_scheduler::run(THD *thd) /* Gets a minimized version */ if (queue->get_top_for_execution_if_time(thd, &event_name)) { - sql_print_information("SCHEDULER: Serious error during getting next " + sql_print_information("Event Scheduler: " + "Serious error during getting next " "event to execute. Stopping"); break; } @@ -525,7 +464,7 @@ Event_scheduler::run(THD *thd) "event_name=0x%lx", (long) event_name)); if (event_name) { - if ((res= execute_top(thd, event_name))) + if ((res= execute_top(event_name))) break; } else @@ -535,12 +474,14 @@ Event_scheduler::run(THD *thd) } DBUG_PRINT("info", ("state=%s", scheduler_states_names[state].str)); } + LOCK_DATA(); - DBUG_PRINT("info", ("Signalling back to the stopper COND_state")); + deinit_event_thread(thd); + scheduler_thd= NULL; state= INITIALIZED; + DBUG_PRINT("info", ("Signalling back to the stopper COND_state")); pthread_cond_signal(&COND_state); UNLOCK_DATA(); - sql_print_information("SCHEDULER: Stopped"); DBUG_RETURN(res); } @@ -559,7 +500,7 @@ Event_scheduler::run(THD *thd) */ bool -Event_scheduler::execute_top(THD *thd, Event_queue_element_for_exec *event_name) +Event_scheduler::execute_top(Event_queue_element_for_exec *event_name) { THD *new_thd; pthread_t th; @@ -574,6 +515,14 @@ Event_scheduler::execute_top(THD *thd, Event_queue_element_for_exec *event_name) DBUG_PRINT("info", ("Event %s@%s ready for start", event_name->dbname.str, event_name->name.str)); + /* + TODO: should use thread pool here, preferably with an upper limit + on number of threads: if too many events are scheduled for the + same time, starting all of them at once won't help them run truly + in parallel (because of the great amount of synchronization), so + we may as well execute them in sequence, keeping concurrency at a + reasonable level. + */ /* Major failure */ if ((res= pthread_create(&th, &connection_attrib, event_worker_thread, event_name))) @@ -623,10 +572,13 @@ Event_scheduler::is_running() } -/* +/** Stops the scheduler (again). Waits for acknowledgement from the scheduler that it has stopped - synchronous stopping. + Already running events will not be stopped. If the user needs + them stopped manual intervention is needed. + SYNOPSIS Event_scheduler::stop() @@ -649,8 +601,8 @@ Event_scheduler::stop() /* Guarantee we don't catch spurious signals */ do { - DBUG_PRINT("info", ("Waiting for COND_started_or_stopped from the manager " - "thread. Current value of state is %s . " + DBUG_PRINT("info", ("Waiting for COND_started_or_stopped from " + "the scheduler thread. Current value of state is %s . " "workers count=%d", scheduler_states_names[state].str, workers_count())); /* @@ -664,31 +616,24 @@ Event_scheduler::stop() */ state= STOPPING; - DBUG_PRINT("info", ("Manager thread has id %lu", scheduler_thd->thread_id)); + DBUG_PRINT("info", ("Scheduler thread has id %lu", + scheduler_thd->thread_id)); /* Lock from delete */ pthread_mutex_lock(&scheduler_thd->LOCK_delete); /* This will wake up the thread if it waits on Queue's conditional */ - sql_print_information("SCHEDULER: Killing manager thread %lu", + sql_print_information("Event Scheduler: Killing the scheduler thread, " + "thread id %lu", scheduler_thd->thread_id); scheduler_thd->awake(THD::KILL_CONNECTION); pthread_mutex_unlock(&scheduler_thd->LOCK_delete); /* thd could be 0x0, when shutting down */ - sql_print_information("SCHEDULER: Waiting the manager thread to reply"); + sql_print_information("Event Scheduler: " + "Waiting for the scheduler thread to reply"); COND_STATE_WAIT(thd, NULL, "Waiting scheduler to stop"); } while (state == STOPPING); - DBUG_PRINT("info", ("Manager thread has cleaned up. Set state to INIT")); - /* - The rationale behind setting it to NULL here but not destructing it - beforehand is because the THD will be deinited in event_scheduler_thread(). - It's more clear when the post_init and the deinit is done in one function. - Here we just mark that the scheduler doesn't have a THD anymore. Though for - milliseconds the old thread could exist we can't use it anymore. When we - unlock the mutex in this function a little later the state will be - INITIALIZED. Therefore, a connection thread could enter the critical section - and will create a new THD object. - */ - scheduler_thd= NULL; + DBUG_PRINT("info", ("Scheduler thread has cleaned up. Set state to INIT")); + sql_print_information("Event Scheduler: Stopped"); end: UNLOCK_DATA(); DBUG_RETURN(FALSE); @@ -707,7 +652,7 @@ Event_scheduler::workers_count() { THD *tmp; uint count= 0; - + DBUG_ENTER("Event_scheduler::workers_count"); pthread_mutex_lock(&LOCK_thread_count); // For unlink from list I_List_iterator<THD> it(threads); diff --git a/sql/event_scheduler.h b/sql/event_scheduler.h index 2ab21464057..70635196745 100644 --- a/sql/event_scheduler.h +++ b/sql/event_scheduler.h @@ -15,6 +15,13 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +/** + @file + This file is internal to Events module. Please do not include it directly. + All public declarations of Events module are in events.h and + event_data_objects.h. +*/ + class Event_queue; class Event_job_data; @@ -31,14 +38,13 @@ void deinit_event_thread(THD *thd); -class Event_worker_thread +class Event_worker_thread { public: static void - init(Events *events, Event_db_repository *db_repo) + init(Event_db_repository *db_repository_arg) { - db_repository= db_repo; - events_facade= events; + db_repository= db_repository_arg; } void @@ -49,15 +55,15 @@ private: print_warnings(THD *thd, Event_job_data *et); static Event_db_repository *db_repository; - static Events *events_facade; }; class Event_scheduler { public: - Event_scheduler():state(UNINITIALIZED){} - ~Event_scheduler(){} + Event_scheduler(Event_queue *event_queue_arg); + ~Event_scheduler(); + /* State changing methods follow */ @@ -74,17 +80,6 @@ public: bool run(THD *thd); - void - init_scheduler(Event_queue *queue); - - void - deinit_scheduler(); - - void - init_mutexes(); - - void - deinit_mutexes(); /* Information retrieving methods follow */ bool @@ -99,7 +94,7 @@ private: /* helper functions */ bool - execute_top(THD *thd, Event_queue_element_for_exec *event_name); + execute_top(Event_queue_element_for_exec *event_name); /* helper functions for working with mutexes & conditionals */ void diff --git a/sql/events.cc b/sql/events.cc index f73dc97e7c2..7838972a5d6 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -19,7 +19,6 @@ #include "event_db_repository.h" #include "event_queue.h" #include "event_scheduler.h" -#include "sp_head.h" /* TODO list : @@ -49,7 +48,7 @@ counterpart. 1. CREATE EVENT the_name ON SCHEDULE EVERY 1 SECOND DISABLE DO SELECT 1; 2. DROP EVENT the_name - + In other words, the first one will create a row in mysql.event . In the second step because there will be a line, disk based drop will pass and the scheduler will remove the memory counterpart. The reason is that @@ -66,7 +65,7 @@ static const char *opt_event_scheduler_state_names[]= { "OFF", "ON", "0", "1", "DISABLED", NullS }; -TYPELIB Events::opt_typelib= +const TYPELIB Events::opt_typelib= { array_elements(opt_event_scheduler_state_names)-1, "", @@ -82,7 +81,7 @@ TYPELIB Events::opt_typelib= */ static const char *var_event_scheduler_state_names[]= { "OFF", "ON", NullS }; -TYPELIB Events::var_typelib= +const TYPELIB Events::var_typelib= { array_elements(var_event_scheduler_state_names)-1, "", @@ -90,20 +89,13 @@ TYPELIB Events::var_typelib= NULL }; - -static -Event_queue events_event_queue; - -static -Event_scheduler events_event_scheduler; - - -Event_db_repository events_event_db_repository; - -Events Events::singleton; - -enum Events::enum_opt_event_scheduler Events::opt_event_scheduler= - Events::EVENTS_OFF; +Event_queue *Events::event_queue; +Event_scheduler *Events::scheduler; +Event_db_repository *Events::db_repository; +enum Events::enum_opt_event_scheduler +Events::opt_event_scheduler= Events::EVENTS_OFF; +pthread_mutex_t Events::LOCK_event_metadata; +bool Events::check_system_tables_error= FALSE; /* @@ -128,25 +120,89 @@ int sortcmp_lex_string(LEX_STRING s, LEX_STRING t, CHARSET_INFO *cs) } -/* - Accessor for the singleton instance. +/** + @brief Initialize the start up option of the Events scheduler. - SYNOPSIS - Events::get_instance() + Do not initialize the scheduler subsystem yet - the initialization + is split into steps as it has to fit into the common MySQL + initialization framework. + No locking as this is called only at start up. - RETURN VALUE - address + @param[in,out] argument The value of the argument. If this value + is found in the typelib, the argument is + updated. + + @retval TRUE unknown option value + @retval FALSE success */ -Events * -Events::get_instance() +bool +Events::set_opt_event_scheduler(char *argument) { - DBUG_ENTER("Events::get_instance"); - DBUG_RETURN(&singleton); + if (argument == NULL) + opt_event_scheduler= Events::EVENTS_DISABLED; + else + { + int type; + /* + type= 1 2 3 4 5 + (OFF | ON) - (0 | 1) (DISABLE ) + */ + const static enum enum_opt_event_scheduler type2state[]= + { EVENTS_OFF, EVENTS_ON, EVENTS_OFF, EVENTS_ON, EVENTS_DISABLED }; + + type= find_type(argument, &opt_typelib, 1); + + DBUG_ASSERT(type >= 0 && type <= 5); /* guaranteed by find_type */ + + if (type == 0) + { + fprintf(stderr, "Unknown option to event-scheduler: %s\n", argument); + return TRUE; + } + opt_event_scheduler= type2state[type-1]; + } + return FALSE; } -/* +/** + Return a string representation of the current scheduler mode. +*/ + +const char * +Events::get_opt_event_scheduler_str() +{ + const char *str; + + pthread_mutex_lock(&LOCK_event_metadata); + str= opt_typelib.type_names[(int) opt_event_scheduler]; + pthread_mutex_unlock(&LOCK_event_metadata); + + return str; +} + + +/** + Push an error into the error stack if the system tables are + not up to date. +*/ + +bool Events::check_if_system_tables_error() +{ + DBUG_ENTER("Events::check_if_system_tables_error"); + + if (check_system_tables_error) + { + my_error(ER_EVENTS_DB_ERROR, MYF(0)); + DBUG_RETURN(TRUE); + } + + DBUG_RETURN(FALSE); +} + + +/** Reconstructs interval expression from interval type and expression value that is in form of a value of the smalles entity: For @@ -278,53 +334,74 @@ common_1_lev_code: return 0; } -/* - Constructor of Events class. It's called when events.o - is loaded. Assigning addressed of static variables in this - object file. - SYNOPSIS - Events::Events() +/** + Create a new event. + + @param[in,out] thd THD + @param[in] parse_data Event's data from parsing stage + @param[in] if_not_exists Whether IF NOT EXISTS was + specified + In case there is an event with the same name (db) and + IF NOT EXISTS is specified, an warning is put into the stack. + @sa Events::drop_event for the notes about locking, pre-locking + and Events DDL. + + @retval FALSE OK + @retval TRUE Error (reported) */ -Events::Events() +bool +Events::create_event(THD *thd, Event_parse_data *parse_data, + bool if_not_exists) { - scheduler= &events_event_scheduler; - event_queue= &events_event_queue; - db_repository= &events_event_db_repository; -} + int ret; + DBUG_ENTER("Events::create_event"); + /* + Let's commit the transaction first - MySQL manual specifies + that a DDL issues an implicit commit, and it doesn't say "successful + DDL", so that an implicit commit is a property of any successfully + parsed DDL statement. + */ + if (end_active_trans(thd)) + DBUG_RETURN(TRUE); -/* - The function exported to the world for creating of events. + if (check_if_system_tables_error()) + DBUG_RETURN(TRUE); - SYNOPSIS - Events::create_event() - thd [in] THD - parse_data [in] Event's data from parsing stage - if_not_exists [in] Whether IF NOT EXISTS was specified in the DDL + /* + Perform semantic checks outside of Event_db_repository: + once CREATE EVENT is supported in prepared statements, the + checks will be moved to PREPARE phase. + */ + if (parse_data->check_parse_data(thd)) + DBUG_RETURN(TRUE); - RETURN VALUE - FALSE OK - TRUE Error (Reported) + /* At create, one of them must be set */ + DBUG_ASSERT(parse_data->expression || parse_data->execute_at); - NOTES - In case there is an event with the same name (db) and - IF NOT EXISTS is specified, an warning is put into the stack. -*/ + if (check_access(thd, EVENT_ACL, parse_data->dbname.str, 0, 0, 0, + is_schema_db(parse_data->dbname.str))) + DBUG_RETURN(TRUE); -bool -Events::create_event(THD *thd, Event_parse_data *parse_data, bool if_not_exists) -{ - int ret; - DBUG_ENTER("Events::create_event"); - if (unlikely(check_system_tables_error)) + if (check_db_dir_existence(parse_data->dbname.str)) { - my_error(ER_EVENTS_DB_ERROR, MYF(0)); + my_error(ER_BAD_DB_ERROR, MYF(0), parse_data->dbname.str); DBUG_RETURN(TRUE); } + if (parse_data->do_not_create) + DBUG_RETURN(FALSE); + /* + Turn off row binlogging of this statement and use statement-based + so that all supporting tables are updated for CREATE EVENT command. + */ + if (thd->current_stmt_binlog_row_based) + thd->clear_current_stmt_binlog_row_based(); + pthread_mutex_lock(&LOCK_event_metadata); + /* On error conditions my_error() is called so no need to handle here */ if (!(ret= db_repository->create_event(thd, parse_data, if_not_exists))) { @@ -336,55 +413,119 @@ Events::create_event(THD *thd, Event_parse_data *parse_data, bool if_not_exists) parse_data->name, new_element))) { - DBUG_ASSERT(ret == OP_LOAD_ERROR); + db_repository->drop_event(thd, parse_data->dbname, parse_data->name, + TRUE); delete new_element; } else - event_queue->create_event(thd, new_element); + { + /* TODO: do not ignore the out parameter and a possible OOM error! */ + bool created; + if (event_queue) + event_queue->create_event(thd, new_element, &created); + /* Binlog the create event. */ + DBUG_ASSERT(thd->query && thd->query_length); + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + thd->binlog_query(THD::MYSQL_QUERY_TYPE, + thd->query, thd->query_length, FALSE, FALSE); + } + } } pthread_mutex_unlock(&LOCK_event_metadata); DBUG_RETURN(ret); - } -/* - The function exported to the world for alteration of events. +/** + Alter an event. - SYNOPSIS - Events::update_event() - thd [in] THD - parse_data [in] Event's data from parsing stage - rename_to [in] Set in case of RENAME TO. + @param[in,out] thd THD + @param[in] parse_data Event's data from parsing stage + @param[in] new_dbname A new schema name for the event. Set in the case of + ALTER EVENT RENAME, otherwise is NULL. + @param[in] new_name A new name for the event. Set in the case of + ALTER EVENT RENAME - RETURN VALUE - FALSE OK - TRUE Error + Parameter 'et' contains data about dbname and event name. + Parameter 'new_name' is the new name of the event, if not null + this means that RENAME TO was specified in the query + @sa Events::drop_event for the locking notes. - NOTES - et contains data about dbname and event name. - new_name is the new name of the event, if not null this means - that RENAME TO was specified in the query + @retval FALSE OK + @retval TRUE error (reported) */ bool -Events::update_event(THD *thd, Event_parse_data *parse_data, sp_name *rename_to) +Events::update_event(THD *thd, Event_parse_data *parse_data, + LEX_STRING *new_dbname, LEX_STRING *new_name) { int ret; Event_queue_element *new_element; + DBUG_ENTER("Events::update_event"); - LEX_STRING *new_dbname= rename_to ? &rename_to->m_db : NULL; - LEX_STRING *new_name= rename_to ? &rename_to->m_name : NULL; - if (unlikely(check_system_tables_error)) - { - my_error(ER_EVENTS_DB_ERROR, MYF(0)); + + /* + For consistency, implicit COMMIT should be the first thing in the + execution chain. + */ + if (end_active_trans(thd)) + DBUG_RETURN(TRUE); + + if (check_if_system_tables_error()) DBUG_RETURN(TRUE); + + if (parse_data->check_parse_data(thd) || parse_data->do_not_create) + DBUG_RETURN(TRUE); + + if (check_access(thd, EVENT_ACL, parse_data->dbname.str, 0, 0, 0, + is_schema_db(parse_data->dbname.str))) + DBUG_RETURN(TRUE); + + if (new_dbname) /* It's a rename */ + { + /* Check that the new and the old names differ. */ + if ( !sortcmp_lex_string(parse_data->dbname, *new_dbname, + system_charset_info) && + !sortcmp_lex_string(parse_data->name, *new_name, + system_charset_info)) + { + my_error(ER_EVENT_SAME_NAME, MYF(0), parse_data->name.str); + DBUG_RETURN(TRUE); + } + + /* + And the user has sufficient privileges to use the target database. + Do it before checking whether the database exists: we don't want + to tell the user that a database doesn't exist if they can not + access it. + */ + if (check_access(thd, EVENT_ACL, new_dbname->str, 0, 0, 0, + is_schema_db(new_dbname->str))) + DBUG_RETURN(TRUE); + + /* Check that the target database exists */ + if (check_db_dir_existence(new_dbname->str)) + { + my_error(ER_BAD_DB_ERROR, MYF(0), new_dbname->str); + DBUG_RETURN(TRUE); + } } + /* + Turn off row binlogging of this statement and use statement-based + so that all supporting tables are updated for UPDATE EVENT command. + */ + if (thd->current_stmt_binlog_row_based) + thd->clear_current_stmt_binlog_row_based(); + pthread_mutex_lock(&LOCK_event_metadata); + /* On error conditions my_error() is called so no need to handle here */ - if (!(ret= db_repository->update_event(thd, parse_data, new_dbname, new_name))) + if (!(ret= db_repository->update_event(thd, parse_data, + new_dbname, new_name))) { LEX_STRING dbname= new_dbname ? *new_dbname : parse_data->dbname; LEX_STRING name= new_name ? *new_name : parse_data->name; @@ -395,11 +536,28 @@ Events::update_event(THD *thd, Event_parse_data *parse_data, sp_name *rename_to) new_element))) { DBUG_ASSERT(ret == OP_LOAD_ERROR); - delete new_element; + delete new_element; } else - event_queue->update_event(thd, parse_data->dbname, parse_data->name, - new_element); + { + /* + TODO: check if an update actually has inserted an entry + into the queue. + If not, and the element is ON COMPLETION NOT PRESERVE, delete + it right away. + */ + if (event_queue) + event_queue->update_event(thd, parse_data->dbname, parse_data->name, + new_element); + /* Binlog the alter event. */ + DBUG_ASSERT(thd->query && thd->query_length); + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + thd->binlog_query(THD::MYSQL_QUERY_TYPE, + thd->query, thd->query_length, FALSE, FALSE); + } + } } pthread_mutex_unlock(&LOCK_event_metadata); @@ -407,20 +565,28 @@ Events::update_event(THD *thd, Event_parse_data *parse_data, sp_name *rename_to) } -/* +/** Drops an event - SYNOPSIS - Events::drop_event() - thd [in] THD - dbname [in] Event's schema - name [in] Event's name - if_exists [in] When set and the event does not exist => - warning onto the stack - - RETURN VALUE - FALSE OK - TRUE Error (reported) + @param[in,out] thd THD + @param[in] dbname Event's schema + @param[in] name Event's name + @param[in] if_exists When this is set and the event does not exist + a warning is pushed into the warning stack. + Otherwise the operation produces an error. + + @note Similarly to DROP PROCEDURE, we do not allow DROP EVENT + under LOCK TABLES mode, unless table mysql.event is locked. To + ensure that, we do not reset & backup the open tables state in + this function - if in LOCK TABLES or pre-locking mode, this will + lead to an error 'Table mysql.event is not locked with LOCK + TABLES' unless it _is_ locked. In pre-locked mode there is + another barrier - DROP EVENT commits the current transaction, + and COMMIT/ROLLBACK is not allowed in stored functions and + triggers. + + @retval FALSE OK + @retval TRUE Error (reported) */ bool @@ -428,45 +594,82 @@ Events::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name, bool if_exists) { int ret; DBUG_ENTER("Events::drop_event"); - if (unlikely(check_system_tables_error)) - { - my_error(ER_EVENTS_DB_ERROR, MYF(0)); + + /* + In MySQL, DDL must always commit: since mysql.* tables are + non-transactional, we must modify them outside a transaction + to not break atomicity. + But the second and more important reason to commit here + regardless whether we're actually changing mysql.event table + or not is replication: end_active_trans syncs the binary log, + and unless we run DDL in it's own transaction it may simply + never appear on the slave in case the outside transaction + rolls back. + */ + if (end_active_trans(thd)) + DBUG_RETURN(TRUE); + + if (check_if_system_tables_error()) DBUG_RETURN(TRUE); - } + + if (check_access(thd, EVENT_ACL, dbname.str, 0, 0, 0, + is_schema_db(dbname.str))) + DBUG_RETURN(TRUE); + + /* + Turn off row binlogging of this statement and use statement-based so + that all supporting tables are updated for DROP EVENT command. + */ + if (thd->current_stmt_binlog_row_based) + thd->clear_current_stmt_binlog_row_based(); pthread_mutex_lock(&LOCK_event_metadata); /* On error conditions my_error() is called so no need to handle here */ if (!(ret= db_repository->drop_event(thd, dbname, name, if_exists))) - event_queue->drop_event(thd, dbname, name); + { + if (event_queue) + event_queue->drop_event(thd, dbname, name); + /* Binlog the drop event. */ + DBUG_ASSERT(thd->query && thd->query_length); + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + thd->binlog_query(THD::MYSQL_QUERY_TYPE, + thd->query, thd->query_length, FALSE, FALSE); + } + } pthread_mutex_unlock(&LOCK_event_metadata); DBUG_RETURN(ret); } -/* +/** Drops all events from a schema - SYNOPSIS - Events::drop_schema_events() - thd Thread - db ASCIIZ schema name + @note We allow to drop all events in a schema even if the + scheduler is disabled. This is to not produce any warnings + in case of DROP DATABASE and a disabled scheduler. + + @param[in,out] thd Thread + @param[in] db ASCIIZ schema name */ void Events::drop_schema_events(THD *thd, char *db) { LEX_STRING const db_lex= { db, strlen(db) }; - - DBUG_ENTER("Events::drop_schema_events"); + + DBUG_ENTER("Events::drop_schema_events"); DBUG_PRINT("enter", ("dropping events from %s", db)); - if (unlikely(check_system_tables_error)) - { - my_error(ER_EVENTS_DB_ERROR, MYF(0)); - DBUG_VOID_RETURN; - } + + /* + sic: no check if the scheduler is disabled or system tables + are damaged, as intended. + */ pthread_mutex_lock(&LOCK_event_metadata); - event_queue->drop_schema_events(thd, db_lex); + if (event_queue) + event_queue->drop_schema_events(thd, db_lex); db_repository->drop_schema_events(thd, db_lex); pthread_mutex_unlock(&LOCK_event_metadata); @@ -474,109 +677,137 @@ Events::drop_schema_events(THD *thd, char *db) } -/* - SHOW CREATE EVENT - - SYNOPSIS - Events::show_create_event() - thd Thread context - spn The name of the event (db, name) - - RETURN VALUE - FALSE OK - TRUE Error during writing to the wire +/** + A helper function to generate SHOW CREATE EVENT output from + a named event */ -bool -Events::show_create_event(THD *thd, LEX_STRING dbname, LEX_STRING name) +static bool +send_show_create_event(THD *thd, Event_timed *et, Protocol *protocol) { - CHARSET_INFO *scs= system_charset_info; - int ret; - Event_timed *et= new Event_timed(); + char show_str_buf[10 * STRING_BUFFER_USUAL_SIZE]; + String show_str(show_str_buf, sizeof(show_str_buf), system_charset_info); + List<Item> field_list; + LEX_STRING sql_mode; + const String *tz_name; - DBUG_ENTER("Events::show_create_event"); - DBUG_PRINT("enter", ("name: %s@%s", dbname.str, name.str)); - if (unlikely(check_system_tables_error)) - { - my_error(ER_EVENTS_DB_ERROR, MYF(0)); + DBUG_ENTER("send_show_create_event"); + + show_str.length(0); + if (et->get_create_event(thd, &show_str)) DBUG_RETURN(TRUE); - } - ret= db_repository->load_named_event(thd, dbname, name, et); + field_list.push_back(new Item_empty_string("Event", NAME_CHAR_LEN)); - if (!ret) - { - Protocol *protocol= thd->protocol; - char show_str_buf[10 * STRING_BUFFER_USUAL_SIZE]; - String show_str(show_str_buf, sizeof(show_str_buf), scs); - List<Item> field_list; - byte *sql_mode_str; - ulong sql_mode_len=0; + if (sys_var_thd_sql_mode::symbolic_mode_representation(thd, et->sql_mode, + &sql_mode)) + DBUG_RETURN(TRUE); - show_str.length(0); - show_str.set_charset(system_charset_info); + field_list.push_back(new Item_empty_string("sql_mode", sql_mode.length)); - if (et->get_create_event(thd, &show_str)) - goto err; + tz_name= et->time_zone->get_name(); - field_list.push_back(new Item_empty_string("Event", NAME_LEN)); + field_list.push_back(new Item_empty_string("time_zone", + tz_name->length())); - sql_mode_str= - sys_var_thd_sql_mode::symbolic_mode_representation(thd, et->sql_mode, - &sql_mode_len); + field_list.push_back(new Item_empty_string("Create Event", + show_str.length())); - field_list.push_back(new Item_empty_string("sql_mode", sql_mode_len)); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); - field_list.push_back(new Item_empty_string("Create Event", - show_str.length())); + protocol->prepare_for_resend(); - if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | - Protocol::SEND_EOF)) - goto err; + protocol->store(et->name.str, et->name.length, system_charset_info); + protocol->store(sql_mode.str, sql_mode.length, system_charset_info); + protocol->store(tz_name->ptr(), tz_name->length(), system_charset_info); + protocol->store(show_str.c_ptr(), show_str.length(), system_charset_info); - protocol->prepare_for_resend(); - protocol->store(et->name.str, et->name.length, scs); + if (protocol->write()) + DBUG_RETURN(TRUE); - protocol->store((char*) sql_mode_str, sql_mode_len, scs); + send_eof(thd); + + DBUG_RETURN(FALSE); +} + + +/** + Implement SHOW CREATE EVENT statement + + thd Thread context + spn The name of the event (db, name) + + @retval FALSE OK + @retval TRUE error (reported) +*/ + +bool +Events::show_create_event(THD *thd, LEX_STRING dbname, LEX_STRING name) +{ + Open_tables_state open_tables_backup; + Event_timed et; + bool ret; + + DBUG_ENTER("Events::show_create_event"); + DBUG_PRINT("enter", ("name: %s@%s", dbname.str, name.str)); + + if (check_if_system_tables_error()) + DBUG_RETURN(TRUE); + + if (check_access(thd, EVENT_ACL, dbname.str, 0, 0, 0, + is_schema_db(dbname.str))) + DBUG_RETURN(TRUE); + + /* + We would like to allow SHOW CREATE EVENT under LOCK TABLES and + in pre-locked mode. mysql.event table is marked as a system table. + This flag reduces the set of its participation scenarios in LOCK TABLES + operation, and therefore an out-of-bound open of this table + for reading like the one below (sic, only for reading) is + more or less deadlock-free. For additional information about when a + deadlock can occur please refer to the description of 'system table' + flag. + */ + thd->reset_n_backup_open_tables_state(&open_tables_backup); + ret= db_repository->load_named_event(thd, dbname, name, &et); + thd->restore_backup_open_tables_state(&open_tables_backup); + + if (!ret) + ret= send_show_create_event(thd, &et, thd->protocol); - protocol->store(show_str.c_ptr(), show_str.length(), scs); - ret= protocol->write(); - send_eof(thd); - } - delete et; DBUG_RETURN(ret); -err: - delete et; - DBUG_RETURN(TRUE); } -/* - Proxy for Event_db_repository::fill_schema_events. - Callback for I_S from sql_show.cc +/** + Check access rights and fill INFORMATION_SCHEMA.events table. - SYNOPSIS - Events::fill_schema_events() - thd Thread context - tables The schema table + @param[in,out] thd Thread context + @param[in] table The temporary table to fill. cond Unused - RETURN VALUE - 0 OK - !0 Error + In MySQL INFORMATION_SCHEMA tables are temporary tables that are + created and filled on demand. In this function, we fill + INFORMATION_SCHEMA.events. It is a callback for I_S module, invoked from + sql_show.cc + + @return Has to be integer, as such is the requirement of the I_S API + @retval 0 success + @retval 1 an error, pushed into the error stack */ int Events::fill_schema_events(THD *thd, TABLE_LIST *tables, COND * /* cond */) { char *db= NULL; + int ret; + Open_tables_state open_tables_backup; DBUG_ENTER("Events::fill_schema_events"); - Events *myself= get_instance(); - if (unlikely(myself->check_system_tables_error)) - { - my_error(ER_EVENTS_DB_ERROR, MYF(0)); - DBUG_RETURN(TRUE); - } + + if (check_if_system_tables_error()) + DBUG_RETURN(1); /* If it's SHOW EVENTS then thd->lex->select_lex.db is guaranteed not to @@ -590,7 +821,17 @@ Events::fill_schema_events(THD *thd, TABLE_LIST *tables, COND * /* cond */) DBUG_RETURN(1); db= thd->lex->select_lex.db; } - DBUG_RETURN(myself->db_repository->fill_schema_events(thd, tables, db)); + /* + Reset and backup of the currently open tables in this thread + is a way to allow SELECTs from INFORMATION_SCHEMA.events under + LOCK TABLES and in pre-locked mode. See also + Events::show_create_event for additional comments. + */ + thd->reset_n_backup_open_tables_state(&open_tables_backup); + ret= db_repository->fill_schema_events(thd, tables, db); + thd->restore_backup_open_tables_state(&open_tables_backup); + + DBUG_RETURN(ret); } @@ -609,14 +850,17 @@ Events::fill_schema_events(THD *thd, TABLE_LIST *tables, COND * /* cond */) */ bool -Events::init() +Events::init(my_bool opt_noacl) { THD *thd; bool res= FALSE; + DBUG_ENTER("Events::init"); - if (opt_event_scheduler == Events::EVENTS_DISABLED) - DBUG_RETURN(FALSE); + /* Disable the scheduler if running with --skip-grant-tables */ + if (opt_noacl) + opt_event_scheduler= EVENTS_DISABLED; + /* We need a temporary THD during boot */ if (!(thd= new THD())) @@ -632,30 +876,67 @@ Events::init() thd->thread_stack= (char*) &thd; thd->store_globals(); - if (check_system_tables(thd)) + /* + We will need Event_db_repository anyway, even if the scheduler is + disabled - to perform events DDL. + */ + if (!(db_repository= new Event_db_repository)) { - check_system_tables_error= TRUE; - sql_print_error("SCHEDULER: The system tables are damaged. " - "The scheduler subsystem will be unusable during this run."); + res= TRUE; /* fatal error: request unireg_abort */ goto end; } - check_system_tables_error= FALSE; - if (event_queue->init_queue(thd) || load_events_from_db(thd)) + /* + Since we allow event DDL even if the scheduler is disabled, + check the system tables, as we might need them. + */ + if (Event_db_repository::check_system_tables(thd)) { - sql_print_error("SCHEDULER: Error while loading from disk."); + sql_print_error("Event Scheduler: An error occurred when initializing " + "system tables.%s", + opt_event_scheduler == EVENTS_DISABLED ? + "" : " Disabling the Event Scheduler."); + + /* Disable the scheduler since the system tables are not up to date */ + opt_event_scheduler= EVENTS_DISABLED; + check_system_tables_error= TRUE; goto end; } - scheduler->init_scheduler(event_queue); + /* + Was disabled explicitly from the command line, or because we're running + with --skip-grant-tables, or because we have no system tables. + */ + if (opt_event_scheduler == Events::EVENTS_DISABLED) + goto end; + DBUG_ASSERT(opt_event_scheduler == Events::EVENTS_ON || opt_event_scheduler == Events::EVENTS_OFF); - if (opt_event_scheduler == Events::EVENTS_ON) - res= scheduler->start(); - Event_worker_thread::init(this, db_repository); + if (!(event_queue= new Event_queue) || + !(scheduler= new Event_scheduler(event_queue))) + { + res= TRUE; /* fatal error: request unireg_abort */ + goto end; + } + + if (event_queue->init_queue(thd) || load_events_from_db(thd) || + opt_event_scheduler == EVENTS_ON && scheduler->start()) + { + sql_print_error("Event Scheduler: Error while loading from disk."); + res= TRUE; /* fatal error: request unireg_abort */ + goto end; + } + Event_worker_thread::init(db_repository); + end: + if (res) + { + delete db_repository; + delete event_queue; + delete scheduler; + } delete thd; /* Remember that we don't have a THD */ my_pthread_setspecific_ptr(THR_THD, NULL); @@ -678,19 +959,23 @@ void Events::deinit() { DBUG_ENTER("Events::deinit"); - if (likely(!check_system_tables_error)) - { - scheduler->stop(); - scheduler->deinit_scheduler(); - event_queue->deinit_queue(); + if (opt_event_scheduler != EVENTS_DISABLED) + { + delete scheduler; + scheduler= NULL; /* safety */ + delete event_queue; + event_queue= NULL; /* safety */ } + delete db_repository; + db_repository= NULL; /* safety */ + DBUG_VOID_RETURN; } -/* +/** Inits Events mutexes SYNOPSIS @@ -702,8 +987,6 @@ void Events::init_mutexes() { pthread_mutex_init(&LOCK_event_metadata, MY_MUTEX_INIT_FAST); - event_queue->init_mutexes(); - scheduler->init_mutexes(); } @@ -717,8 +1000,6 @@ Events::init_mutexes() void Events::destroy_mutexes() { - event_queue->deinit_mutexes(); - scheduler->deinit_mutexes(); pthread_mutex_destroy(&LOCK_event_metadata); } @@ -741,282 +1022,163 @@ Events::dump_internal_status() puts("LLA = Last Locked At LUA = Last Unlocked At"); puts("WOC = Waiting On Condition DL = Data Locked"); - scheduler->dump_internal_status(); - event_queue->dump_internal_status(); - - DBUG_VOID_RETURN; -} - - -/* - Starts execution of events by the scheduler - - SYNOPSIS - Events::start_execution_of_events() - - RETURN VALUE - FALSE OK - TRUE Error -*/ - -bool -Events::start_execution_of_events() -{ - DBUG_ENTER("Events::start_execution_of_events"); - if (unlikely(check_system_tables_error)) - { - my_error(ER_EVENTS_DB_ERROR, MYF(0)); - DBUG_RETURN(TRUE); - } - DBUG_RETURN(scheduler->start()); -} - - -/* - Stops execution of events by the scheduler. - Already running events will not be stopped. If the user needs - them stopped manual intervention is needed. - - SYNOPSIS - Events::stop_execution_of_events() - - RETURN VALUE - FALSE OK - TRUE Error -*/ - -bool -Events::stop_execution_of_events() -{ - DBUG_ENTER("Events::stop_execution_of_events"); - if (unlikely(check_system_tables_error)) + pthread_mutex_lock(&LOCK_event_metadata); + if (opt_event_scheduler == EVENTS_DISABLED) + puts("The Event Scheduler is disabled"); + else { - my_error(ER_EVENTS_DB_ERROR, MYF(0)); - DBUG_RETURN(TRUE); + scheduler->dump_internal_status(); + event_queue->dump_internal_status(); } - DBUG_RETURN(scheduler->stop()); -} - -/* - Checks whether the scheduler is running or not. - - SYNOPSIS - Events::is_started() - - RETURN VALUE - TRUE Yes - FALSE No -*/ - -bool -Events::is_execution_of_events_started() -{ - DBUG_ENTER("Events::is_execution_of_events_started"); - if (unlikely(check_system_tables_error)) - { - my_error(ER_EVENTS_DB_ERROR, MYF(0)); - DBUG_RETURN(FALSE); - } - DBUG_RETURN(scheduler->is_running()); + pthread_mutex_unlock(&LOCK_event_metadata); + DBUG_VOID_RETURN; } +/** + Starts or stops the event scheduler thread. -/* - Opens mysql.db and mysql.user and checks whether: - 1. mysql.db has column Event_priv at column 20 (0 based); - 2. mysql.user has column Event_priv at column 29 (0 based); - - SYNOPSIS - Events::check_system_tables() - thd Thread - - RETURN VALUE - FALSE OK - TRUE Error + @retval FALSE success + @retval TRUE error */ bool -Events::check_system_tables(THD *thd) +Events::switch_event_scheduler_state(enum_opt_event_scheduler new_state) { - TABLE_LIST tables; - Open_tables_state backup; bool ret= FALSE; - DBUG_ENTER("Events::check_system_tables"); - DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd)); - - thd->reset_n_backup_open_tables_state(&backup); + DBUG_ENTER("Events::switch_event_scheduler_state"); - bzero((char*) &tables, sizeof(tables)); - tables.db= (char*) "mysql"; - tables.table_name= tables.alias= (char*) "db"; - tables.lock_type= TL_READ; + DBUG_ASSERT(new_state == Events::EVENTS_ON || + new_state == Events::EVENTS_OFF); - if ((ret= simple_open_n_lock_tables(thd, &tables))) - { - sql_print_error("SCHEDULER: Cannot open mysql.db"); - ret= TRUE; - } - ret= table_check_intact(tables.table, MYSQL_DB_FIELD_COUNT, - mysql_db_table_fields, &mysql_db_table_last_check, - ER_CANNOT_LOAD_FROM_TABLE); - close_thread_tables(thd); + /* + If the scheduler was disabled because there are no/bad + system tables, produce a more meaningful error message + than ER_OPTION_PREVENTS_STATEMENT + */ + if (check_if_system_tables_error()) + DBUG_RETURN(TRUE); - bzero((char*) &tables, sizeof(tables)); - tables.db= (char*) "mysql"; - tables.table_name= tables.alias= (char*) "user"; - tables.lock_type= TL_READ; + pthread_mutex_lock(&LOCK_event_metadata); - if (simple_open_n_lock_tables(thd, &tables)) + if (opt_event_scheduler == EVENTS_DISABLED) { - sql_print_error("SCHEDULER: Cannot open mysql.user"); + my_error(ER_OPTION_PREVENTS_STATEMENT, + MYF(0), "--event-scheduler=DISABLED or --skip-grant-tables"); ret= TRUE; + goto end; } + + if (new_state == EVENTS_ON) + ret= scheduler->start(); else + ret= scheduler->stop(); + + if (ret) { - if (tables.table->s->fields < 29 || - strncmp(tables.table->field[29]->field_name, - STRING_WITH_LEN("Event_priv"))) - { - sql_print_error("mysql.user has no `Event_priv` column at position %d", - 29); - ret= TRUE; - } - close_thread_tables(thd); + my_error(ER_EVENT_SET_VAR_ERROR, MYF(0)); + goto end; } - thd->restore_backup_open_tables_state(&backup); + opt_event_scheduler= new_state; +end: + pthread_mutex_unlock(&LOCK_event_metadata); DBUG_RETURN(ret); } -/* - Loads all ENABLED events from mysql.event into the prioritized - queue. Called during scheduler main thread initialization. Compiles - the events. Creates Event_queue_element instances for every ENABLED event - from mysql.event. +/** + Loads all ENABLED events from mysql.event into a prioritized + queue. - SYNOPSIS - Events::load_events_from_db() - thd Thread context. Used for memory allocation in some cases. + This function is called during the server start up. It reads + every event, computes the next execution time, and if the event + needs execution, adds it to a prioritized queue. Otherwise, if + ON COMPLETION DROP is specified, the event is automatically + removed from the table. - RETURN VALUE - 0 OK - !0 Error (EVEX_OPEN_TABLE_FAILED, EVEX_MICROSECOND_UNSUP, - EVEX_COMPILE_ERROR) - in all these cases mysql.event was - tampered. + @param[in,out] thd Thread context. Used for memory allocation in some cases. - NOTES - Reports the error to the console + @retval FALSE success + @retval TRUE error, the load is aborted + + @note Reports the error to the console */ -int +bool Events::load_events_from_db(THD *thd) { TABLE *table; READ_RECORD read_record_info; - int ret= -1; + bool ret= TRUE; uint count= 0; - bool clean_the_queue= TRUE; DBUG_ENTER("Events::load_events_from_db"); DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd)); - if ((ret= db_repository->open_event_table(thd, TL_READ, &table))) + if (db_repository->open_event_table(thd, TL_WRITE, &table)) { - sql_print_error("SCHEDULER: Table mysql.event is damaged. Can not open"); - DBUG_RETURN(EVEX_OPEN_TABLE_FAILED); + sql_print_error("Event Scheduler: Failed to open table mysql.event"); + DBUG_RETURN(TRUE); } - init_read_record(&read_record_info, thd, table ,NULL,1,0); + init_read_record(&read_record_info, thd, table, NULL, 0, 1); while (!(read_record_info.read_record(&read_record_info))) { Event_queue_element *et; + bool created; + bool drop_on_completion; + if (!(et= new Event_queue_element)) - { - DBUG_PRINT("info", ("Out of memory")); - break; - } + goto end; + DBUG_PRINT("info", ("Loading event from row.")); - if ((ret= et->load_from_row(table))) + if (et->load_from_row(thd, table)) { - sql_print_error("SCHEDULER: Error while loading from mysql.event. " - "Table probably corrupted"); - break; - } - if (et->status != Event_queue_element::ENABLED) - { - DBUG_PRINT("info",("%s is disabled",et->name.str)); + sql_print_error("Event Scheduler: " + "Error while loading events from mysql.event. " + "The table probably contains bad data or is corrupted"); delete et; - continue; + goto end; } + drop_on_completion= (et->on_completion == + Event_queue_element::ON_COMPLETION_DROP); - /* let's find when to be executed */ - if (et->compute_next_execution_time()) + + if (event_queue->create_event(thd, et, &created)) { - sql_print_error("SCHEDULER: Error while computing execution time of %s.%s." - " Skipping", et->dbname.str, et->name.str); - continue; + /* Out of memory */ + delete et; + goto end; } - + if (created) + count++; + else if (drop_on_completion) { - Event_job_data temp_job_data; - DBUG_PRINT("info", ("Event %s loaded from row. ", et->name.str)); - - temp_job_data.load_from_row(table); - /* - We load only on scheduler root just to check whether the body - compiles. + If not created, a stale event - drop if immediately if + ON COMPLETION NOT PRESERVE */ - switch (ret= temp_job_data.compile(thd, thd->mem_root)) { - case EVEX_MICROSECOND_UNSUP: - sql_print_error("SCHEDULER: mysql.event is tampered. MICROSECOND is not " - "supported but found in mysql.event"); - break; - case EVEX_COMPILE_ERROR: - sql_print_error("SCHEDULER: Error while compiling %s.%s. Aborting load", - et->dbname.str, et->name.str); - break; - default: - break; + int rc= table->file->ha_delete_row(table->record[0]); + if (rc) + { + table->file->print_error(rc, MYF(0)); + goto end; } - thd->end_statement(); - thd->cleanup_after_query(); } - if (ret) - { - delete et; - goto end; - } - - DBUG_PRINT("load_events_from_db", ("Adding 0x%lx to the exec list.", - (long) et)); - event_queue->create_event(thd, et); - count++; } - clean_the_queue= FALSE; + sql_print_information("Event Scheduler: Loaded %d event%s", + count, (count == 1) ? "" : "s"); + ret= FALSE; + end: end_read_record(&read_record_info); - if (clean_the_queue) - { - event_queue->empty_queue(); - ret= -1; - } - else - { - ret= 0; - sql_print_information("SCHEDULER: Loaded %d event%s", count, - (count == 1)?"":"s"); - } - close_thread_tables(thd); - DBUG_PRINT("info", ("Status code %d. Loaded %d event(s)", ret, count)); DBUG_RETURN(ret); } diff --git a/sql/events.h b/sql/events.h index 35ee3c569d0..1b99b072fd7 100644 --- a/sql/events.h +++ b/sql/events.h @@ -15,11 +15,14 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -class sp_name; +/* + @file + A public interface of Events Scheduler module. +*/ + class Event_parse_data; class Event_db_repository; class Event_queue; -class Event_queue_element; class Event_scheduler; /* Return codes */ @@ -38,6 +41,26 @@ enum enum_events_error_code int sortcmp_lex_string(LEX_STRING s, LEX_STRING t, CHARSET_INFO *cs); +/** + @class Events -- a facade to the functionality of the Event Scheduler. + + Every public operation against the scheduler has to be executed via the + interface provided by a static method of this class. No instance of this + class is ever created and it has no non-static data members. + + The life cycle of the Events module is the following: + + At server start up: + set_opt_event_scheduler() -> init_mutexes() -> init() + When the server is running: + create_event(), drop_event(), start_or_stop_event_scheduler(), etc + At shutdown: + deinit(), destroy_mutexes(). + + The peculiar initialization and shutdown cycle is an adaptation to the + outside server startup/shutdown framework and mimics the rest of MySQL + subsystems (ACL, time zone tables, etc). +*/ class Events { @@ -50,47 +73,48 @@ public: EVENTS_DISABLED= 4 }; - static enum_opt_event_scheduler opt_event_scheduler; - static TYPELIB opt_typelib; - static TYPELIB var_typelib; + /* Possible values of @@event_scheduler variable */ + static const TYPELIB var_typelib; - bool - init(); + static bool + set_opt_event_scheduler(char *argument); - void - deinit(); + static const char * + get_opt_event_scheduler_str(); - void - init_mutexes(); + /* A hack needed for Event_queue_element */ + static Event_db_repository * + get_db_repository() { return db_repository; } - void - destroy_mutexes(); + static bool + init(my_bool opt_noacl); - bool - start_execution_of_events(); + static void + deinit(); - bool - stop_execution_of_events(); + static void + init_mutexes(); - bool - is_execution_of_events_started(); + static void + destroy_mutexes(); - static Events * - get_instance(); + static bool + switch_event_scheduler_state(enum enum_opt_event_scheduler new_state); - bool + static bool create_event(THD *thd, Event_parse_data *parse_data, bool if_exists); - bool - update_event(THD *thd, Event_parse_data *parse_data, sp_name *rename_to); + static bool + update_event(THD *thd, Event_parse_data *parse_data, + LEX_STRING *new_dbname, LEX_STRING *new_name); - bool + static bool drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name, bool if_exists); - void + static void drop_schema_events(THD *thd, char *db); - bool + static bool show_create_event(THD *thd, LEX_STRING dbname, LEX_STRING name); /* Needed for both SHOW CREATE EVENT and INFORMATION_SCHEMA */ @@ -101,31 +125,28 @@ public: static int fill_schema_events(THD *thd, TABLE_LIST *tables, COND * /* cond */); - void + static void dump_internal_status(); private: - bool - check_system_tables(THD *thd); + static bool check_if_system_tables_error(); - int + static bool load_events_from_db(THD *thd); - /* Singleton DP is used */ - Events(); - ~Events(){} - - /* Singleton instance */ - static Events singleton; - - Event_queue *event_queue; - Event_scheduler *scheduler; - Event_db_repository *db_repository; - - pthread_mutex_t LOCK_event_metadata; - - bool check_system_tables_error; +private: + /* Command line option names */ + static const TYPELIB opt_typelib; + static pthread_mutex_t LOCK_event_metadata; + static Event_queue *event_queue; + static Event_scheduler *scheduler; + static Event_db_repository *db_repository; + /* Current state of Event Scheduler */ + static enum enum_opt_event_scheduler opt_event_scheduler; + /* Set to TRUE if an error at start up */ + static bool check_system_tables_error; +private: /* Prevent use of these */ Events(const Events &); void operator=(Events &); diff --git a/sql/field.cc b/sql/field.cc index 488a5d591a3..82f8283ba56 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -968,6 +968,31 @@ static Item_result field_types_result_type [FIELDTYPE_NUM]= /* + Test if the given string contains important data: + not spaces for character string, + or any data for binary string. + + SYNOPSIS + test_if_important_data() + cs Character set + str String to test + strend String end + + RETURN + FALSE - If string does not have important data + TRUE - If string has some important data +*/ + +static bool +test_if_important_data(CHARSET_INFO *cs, const char *str, const char *strend) +{ + if (cs != &my_charset_bin) + str+= cs->cset->scan(cs, str, strend, MY_SEQ_SPACES); + return (str < strend); +} + + +/* Detect Item_result by given field type of UNION merge result SYNOPSIS @@ -1015,6 +1040,7 @@ bool Field::type_can_have_key_part(enum enum_field_types type) case MYSQL_TYPE_BLOB: case MYSQL_TYPE_VAR_STRING: case MYSQL_TYPE_STRING: + case MYSQL_TYPE_GEOMETRY: return TRUE; default: return FALSE; @@ -1054,64 +1080,113 @@ void Field_num::prepend_zeros(String *value) } /* - Test if given number is a int (or a fixed format float with .000) + Test if given number is a int. SYNOPSIS - test_if_int() + Field_num::check_int + cs Character set str String to test end Pointer to char after last used digit - cs Character set - - NOTES - This is called after one has called my_strntol() or similar function. - This is only used to give warnings in ALTER TABLE or LOAD DATA... + length String length + error Error returned by strntoull10rnd() - TODO - Make this multi-byte-character safe + NOTE + This is called after one has called strntoull10rnd() function. RETURN - 0 OK - 1 error. A warning is pushed if field_name != 0 + 0 ok + 1 error: empty string or wrong integer. + 2 error: garbage at the end of string. */ -bool Field::check_int(const char *str, int length, const char *int_end, - CHARSET_INFO *cs) +int Field_num::check_int(CHARSET_INFO *cs, const char *str, int length, + const char *int_end, int error) { - const char *end; - if (str == int_end) + /* Test if we get an empty string or wrong integer */ + if (str == int_end || error == MY_ERRNO_EDOM) { char buff[128]; - String tmp(buff,(uint32) sizeof(buff), system_charset_info); + String tmp(buff, (uint32) sizeof(buff), system_charset_info); tmp.copy(str, length, system_charset_info); push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), "integer", tmp.c_ptr(), field_name, (ulong) table->in_use->row_count); - return 1; // Empty string + return 1; } - end= str+length; - if ((str= int_end) == end) - return 0; // OK; All digits was used + /* Test if we have garbage at the end of the given string. */ + if (test_if_important_data(cs, int_end, str + length)) + { + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); + return 2; + } + return 0; +} - /* Allow end .0000 */ - if (*str == '.') + +/* + Conver a string to an integer then check bounds. + + SYNOPSIS + Field_num::get_int + cs Character set + from String to convert + len Length of the string + rnd OUT longlong value + unsigned_max max unsigned value + signed_min min signed value + signed_max max signed value + + DESCRIPTION + The function calls strntoull10rnd() to get an integer value then + check bounds and errors returned. In case of any error a warning + is raised. + + RETURN + 0 ok + 1 error +*/ + +bool Field_num::get_int(CHARSET_INFO *cs, const char *from, uint len, + longlong *rnd, ulonglong unsigned_max, + longlong signed_min, longlong signed_max) +{ + char *end; + int error; + + *rnd= (longlong) cs->cset->strntoull10rnd(cs, from, len, unsigned_flag, &end, + &error); + if (unsigned_flag) { - for (str++ ; str != end && *str == '0'; str++) - ; + + if (((ulonglong) *rnd > unsigned_max) && (*rnd= (longlong) unsigned_max) || + error == MY_ERRNO_ERANGE) + { + goto out_of_range; + } } - /* Allow end space */ - for ( ; str != end ; str++) + else { - if (!my_isspace(cs,*str)) + if (*rnd < signed_min) { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); - return 1; + *rnd= signed_min; + goto out_of_range; + } + else if (*rnd > signed_max) + { + *rnd= signed_max; + goto out_of_range; } } + if (table->in_use->count_cuted_fields && check_int(cs, from, len, end, error)) + return 1; return 0; -} +out_of_range: + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + return 1; +} /* Process decimal library return codes and issue warnings for overflow and @@ -1207,13 +1282,13 @@ String *Field::val_int_as_str(String *val_buffer, my_bool unsigned_val) { ASSERT_COLUMN_MARKED_FOR_READ; CHARSET_INFO *cs= &my_charset_bin; - uint length= 21; + uint length; longlong value= val_int(); - if (val_buffer->alloc(length)) + if (val_buffer->alloc(MY_INT64_NUM_DECIMAL_DIGITS)) return 0; length= (uint) (*cs->cset->longlong10_to_str)(cs, (char*) val_buffer->ptr(), - length, + MY_INT64_NUM_DECIMAL_DIGITS, unsigned_val ? 10 : -10, value); val_buffer->length(length); @@ -1499,7 +1574,7 @@ uint Field::fill_cache_field(CACHE_FIELD *copy) } -bool Field::get_date(TIME *ltime,uint fuzzydate) +bool Field::get_date(MYSQL_TIME *ltime,uint fuzzydate) { char buff[40]; String tmp(buff,sizeof(buff),&my_charset_bin),*res; @@ -1510,7 +1585,7 @@ bool Field::get_date(TIME *ltime,uint fuzzydate) return 0; } -bool Field::get_time(TIME *ltime) +bool Field::get_time(MYSQL_TIME *ltime) { char buff[40]; String tmp(buff,sizeof(buff),&my_charset_bin),*res; @@ -1527,7 +1602,7 @@ bool Field::get_time(TIME *ltime) Needs to be changed if/when we want to support different time formats */ -int Field::store_time(TIME *ltime, timestamp_type type_arg) +int Field::store_time(MYSQL_TIME *ltime, timestamp_type type_arg) { ASSERT_COLUMN_MARKED_FOR_WRITE; char buff[MAX_DATE_STRING_REP_LENGTH]; @@ -2475,7 +2550,7 @@ int Field_new_decimal::store_decimal(const my_decimal *decimal_value) } -int Field_new_decimal::store_time(TIME *ltime, timestamp_type t_type) +int Field_new_decimal::store_time(MYSQL_TIME *ltime, timestamp_type t_type) { my_decimal decimal_value; return store_value(date2my_decimal(ltime, &decimal_value)); @@ -2568,45 +2643,11 @@ uint Field_new_decimal::is_equal(create_field *new_field) int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs) { ASSERT_COLUMN_MARKED_FOR_WRITE; - char *end; int error; - - if (unsigned_flag) - { - ulonglong tmp= cs->cset->strntoull10rnd(cs, from, len, 1, &end, &error); - if (error == MY_ERRNO_ERANGE || tmp > 255) - { - set_if_smaller(tmp, 255); - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; - } - else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) - error= 1; - else - error= 0; - ptr[0]= (char) tmp; - } - else - { - longlong tmp= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error); - if (tmp < -128) - { - tmp= -128; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; - } - else if (tmp >= 128) - { - tmp= 127; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; - } - else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) - error= 1; - else - error= 0; - ptr[0]= (char) tmp; - } + longlong rnd; + + error= get_int(cs, from, len, &rnd, 255, -128, 127); + ptr[0]= unsigned_flag ? (char) (ulonglong) rnd : (char) rnd; return error; } @@ -2777,59 +2818,20 @@ void Field_tiny::sql_type(String &res) const int Field_short::store(const char *from,uint len,CHARSET_INFO *cs) { ASSERT_COLUMN_MARKED_FOR_WRITE; - char *end; + int store_tmp; int error; - - if (unsigned_flag) - { - ulonglong tmp= cs->cset->strntoull10rnd(cs, from, len, 1, &end, &error); - if (error == MY_ERRNO_ERANGE || tmp > UINT_MAX16) - { - set_if_smaller(tmp, UINT_MAX16); - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; - } - else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) - error= 1; - else - error= 0; + longlong rnd; + + error= get_int(cs, from, len, &rnd, UINT_MAX16, INT_MIN16, INT_MAX16); + store_tmp= unsigned_flag ? (int) (ulonglong) rnd : (int) rnd; #ifdef WORDS_BIGENDIAN - if (table->s->db_low_byte_first) - { - int2store(ptr,tmp); - } - else -#endif - shortstore(ptr,(short) tmp); + if (table->s->db_low_byte_first) + { + int2store(ptr, store_tmp); } else - { - longlong tmp= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error); - if (tmp < INT_MIN16) - { - tmp= INT_MIN16; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; - } - else if (tmp > INT_MAX16) - { - tmp=INT_MAX16; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; - } - else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) - error= 1; - else - error= 0; -#ifdef WORDS_BIGENDIAN - if (table->s->db_low_byte_first) - { - int2store(ptr,tmp); - } - else #endif - shortstore(ptr,(short) tmp); - } + shortstore(ptr, (short) store_tmp); return error; } @@ -3063,45 +3065,13 @@ void Field_short::sql_type(String &res) const int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs) { ASSERT_COLUMN_MARKED_FOR_WRITE; - char *end; + int store_tmp; int error; - - if (unsigned_flag) - { - ulonglong tmp= cs->cset->strntoull10rnd(cs, from, len, 1, &end, &error); - if (error == MY_ERRNO_ERANGE || tmp > UINT_MAX24) - { - set_if_smaller(tmp, UINT_MAX24); - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; - } - else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) - error= 1; - else - error= 0; - int3store(ptr,tmp); - } - else - { - longlong tmp= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error); - if (tmp < INT_MIN24) - { - tmp= INT_MIN24; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; - } - else if (tmp > INT_MAX24) - { - tmp=INT_MAX24; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; - } - else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) - error= 1; - else - error= 0; - int3store(ptr,tmp); - } + longlong rnd; + + error= get_int(cs, from, len, &rnd, UINT_MAX24, INT_MIN24, INT_MAX24); + store_tmp= unsigned_flag ? (int) (ulonglong) rnd : (int) rnd; + int3store(ptr, store_tmp); return error; } @@ -3287,45 +3257,10 @@ int Field_long::store(const char *from,uint len,CHARSET_INFO *cs) ASSERT_COLUMN_MARKED_FOR_WRITE; long store_tmp; int error; - char *end; - - if (unsigned_flag) - { - ulonglong tmp= cs->cset->strntoull10rnd(cs, from, len, 1, &end, &error); - if (error == MY_ERRNO_ERANGE || tmp > (ulonglong) UINT_MAX32) - { - set_if_smaller(tmp, (ulonglong) UINT_MAX32); - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; - } - else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) - error= 1; - else - error= 0; - store_tmp= (long) tmp; - } - else - { - longlong tmp= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error); - if (tmp < INT_MIN32) - { - tmp= INT_MIN32; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; - } - else if (tmp > INT_MAX32) - { - tmp=INT_MAX32; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; - } - else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) - error= 1; - else - error= 0; - store_tmp= (long) tmp; - } - + longlong rnd; + + error= get_int(cs, from, len, &rnd, UINT_MAX32, INT_MIN32, INT_MAX32); + store_tmp= unsigned_flag ? (long) (ulonglong) rnd : (long) rnd; #ifdef WORDS_BIGENDIAN if (table->s->db_low_byte_first) { @@ -3577,7 +3512,8 @@ int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) + else if (table->in_use->count_cuted_fields && + check_int(cs, from, len, end, error)) error= 1; else error= 0; @@ -4521,7 +4457,7 @@ timestamp_auto_set_type Field_timestamp::get_auto_set_type() const int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs) { ASSERT_COLUMN_MARKED_FOR_WRITE; - TIME l_time; + MYSQL_TIME l_time; my_time_t tmp= 0; int error; bool have_smth_to_conv; @@ -4592,7 +4528,7 @@ int Field_timestamp::store(double nr) int Field_timestamp::store(longlong nr, bool unsigned_val) { ASSERT_COLUMN_MARKED_FOR_WRITE; - TIME l_time; + MYSQL_TIME l_time; my_time_t timestamp= 0; int error; my_bool in_dst_time_gap; @@ -4651,7 +4587,7 @@ longlong Field_timestamp::val_int(void) { ASSERT_COLUMN_MARKED_FOR_READ; uint32 temp; - TIME time_tmp; + MYSQL_TIME time_tmp; THD *thd= table ? table->in_use : current_thd; #ifdef WORDS_BIGENDIAN @@ -4677,7 +4613,7 @@ String *Field_timestamp::val_str(String *val_buffer, String *val_ptr) { ASSERT_COLUMN_MARKED_FOR_READ; uint32 temp, temp2; - TIME time_tmp; + MYSQL_TIME time_tmp; THD *thd= table ? table->in_use : current_thd; char *to; @@ -4746,7 +4682,7 @@ String *Field_timestamp::val_str(String *val_buffer, String *val_ptr) } -bool Field_timestamp::get_date(TIME *ltime, uint fuzzydate) +bool Field_timestamp::get_date(MYSQL_TIME *ltime, uint fuzzydate) { long temp; THD *thd= table ? table->in_use : current_thd; @@ -4770,7 +4706,7 @@ bool Field_timestamp::get_date(TIME *ltime, uint fuzzydate) return 0; } -bool Field_timestamp::get_time(TIME *ltime) +bool Field_timestamp::get_time(MYSQL_TIME *ltime) { return Field_timestamp::get_date(ltime,0); } @@ -4778,7 +4714,7 @@ bool Field_timestamp::get_time(TIME *ltime) bool Field_timestamp::send_binary(Protocol *protocol) { - TIME tm; + MYSQL_TIME tm; Field_timestamp::get_date(&tm, 0); return protocol->store(&tm); } @@ -4854,7 +4790,7 @@ void Field_timestamp::set_time() int Field_time::store(const char *from,uint len,CHARSET_INFO *cs) { - TIME ltime; + MYSQL_TIME ltime; long tmp; int error= 0; int warning; @@ -4869,9 +4805,12 @@ int Field_time::store(const char *from,uint len,CHARSET_INFO *cs) else { if (warning & MYSQL_TIME_WARN_TRUNCATED) - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + { + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, from, len, MYSQL_TIMESTAMP_TIME, 1); + error= 1; + } if (warning & MYSQL_TIME_WARN_OUT_OF_RANGE) { set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, @@ -4882,8 +4821,6 @@ int Field_time::store(const char *from,uint len,CHARSET_INFO *cs) if (ltime.month) ltime.day=0; tmp=(ltime.day*24L+ltime.hour)*10000L+(ltime.minute*100+ltime.second); - if (error > 1) - error= 2; } if (ltime.neg) @@ -4893,7 +4830,7 @@ int Field_time::store(const char *from,uint len,CHARSET_INFO *cs) } -int Field_time::store_time(TIME *ltime, timestamp_type time_type) +int Field_time::store_time(MYSQL_TIME *ltime, timestamp_type time_type) { long tmp= ((ltime->month ? 0 : ltime->day * 24L) + ltime->hour) * 10000L + (ltime->minute * 100 + ltime->second); @@ -5002,7 +4939,7 @@ String *Field_time::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { ASSERT_COLUMN_MARKED_FOR_READ; - TIME ltime; + MYSQL_TIME ltime; val_buffer->alloc(19); long tmp=(long) sint3korr(ptr); ltime.neg= 0; @@ -5026,7 +4963,7 @@ String *Field_time::val_str(String *val_buffer, DATE_FORMAT(time, "%l.%i %p") */ -bool Field_time::get_date(TIME *ltime, uint fuzzydate) +bool Field_time::get_date(MYSQL_TIME *ltime, uint fuzzydate) { long tmp; THD *thd= table ? table->in_use : current_thd; @@ -5054,7 +4991,7 @@ bool Field_time::get_date(TIME *ltime, uint fuzzydate) } -bool Field_time::get_time(TIME *ltime) +bool Field_time::get_time(MYSQL_TIME *ltime) { long tmp=(long) sint3korr(ptr); ltime->neg=0; @@ -5076,7 +5013,7 @@ bool Field_time::get_time(TIME *ltime) bool Field_time::send_binary(Protocol *protocol) { - TIME tm; + MYSQL_TIME tm; Field_time::get_time(&tm); tm.day= tm.hour/24; // Move hours to days tm.hour-= tm.day*24; @@ -5115,16 +5052,25 @@ int Field_year::store(const char *from, uint len,CHARSET_INFO *cs) ASSERT_COLUMN_MARKED_FOR_WRITE; char *end; int error; - long nr= my_strntol(cs, from, len, 10, &end, &error); + longlong nr= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error); - if (nr < 0 || nr >= 100 && nr <= 1900 || nr > 2155 || error) + if (nr < 0 || nr >= 100 && nr <= 1900 || nr > 2155 || + error == MY_ERRNO_ERANGE) { *ptr=0; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); return 1; } - if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) + if (table->in_use->count_cuted_fields && + (error= check_int(cs, from, len, end, error))) + { + if (error == 1) /* empty or incorrect string */ + { + *ptr= 0; + return 1; + } error= 1; + } if (nr != 0 || len != 4) { @@ -5225,7 +5171,7 @@ void Field_year::sql_type(String &res) const int Field_date::store(const char *from, uint len,CHARSET_INFO *cs) { ASSERT_COLUMN_MARKED_FOR_WRITE; - TIME l_time; + MYSQL_TIME l_time; uint32 tmp; int error; THD *thd= table ? table->in_use : current_thd; @@ -5282,7 +5228,7 @@ int Field_date::store(double nr) int Field_date::store(longlong nr, bool unsigned_val) { ASSERT_COLUMN_MARKED_FOR_WRITE; - TIME not_used; + MYSQL_TIME not_used; int error; longlong initial_nr= nr; THD *thd= table ? table->in_use : current_thd; @@ -5323,7 +5269,7 @@ int Field_date::store(longlong nr, bool unsigned_val) bool Field_date::send_binary(Protocol *protocol) { longlong tmp= Field_date::val_int(); - TIME tm; + MYSQL_TIME tm; tm.year= (uint32) tmp/10000L % 10000; tm.month= (uint32) tmp/100 % 100; tm.day= (uint32) tmp % 100; @@ -5363,7 +5309,7 @@ String *Field_date::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { ASSERT_COLUMN_MARKED_FOR_READ; - TIME ltime; + MYSQL_TIME ltime; val_buffer->alloc(field_length); int32 tmp; #ifdef WORDS_BIGENDIAN @@ -5432,10 +5378,27 @@ void Field_date::sql_type(String &res) const ** In number context: YYYYMMDD ****************************************************************************/ +/* + Store string into a date field + + SYNOPSIS + Field_newdate::store() + from Date string + len Length of date field + cs Character set (not used) + + RETURN + 0 ok + 1 Value was cut during conversion + 2 Wrong date string + 3 Datetime value that was cut (warning level NOTE) +*/ + int Field_newdate::store(const char *from,uint len,CHARSET_INFO *cs) { ASSERT_COLUMN_MARKED_FOR_WRITE; - TIME l_time; + long tmp; + MYSQL_TIME l_time; int error; THD *thd= table ? table->in_use : current_thd; enum enum_mysql_timestamp_type ret; @@ -5446,20 +5409,23 @@ int Field_newdate::store(const char *from,uint len,CHARSET_INFO *cs) MODE_INVALID_DATES))), &error)) <= MYSQL_TIMESTAMP_ERROR) { - int3store(ptr,0L); + tmp= 0; error= 2; } else { - int3store(ptr, l_time.day + l_time.month*32 + l_time.year*16*32); - if(!error && (ret != MYSQL_TIMESTAMP_DATE)) - return 2; + tmp= l_time.day + l_time.month*32 + l_time.year*16*32; + if (!error && (ret != MYSQL_TIMESTAMP_DATE)) + error= 3; // Datetime was cut (note) } if (error) - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, + set_datetime_warning(error == 3 ? MYSQL_ERROR::WARN_LEVEL_NOTE : + MYSQL_ERROR::WARN_LEVEL_WARN, + WARN_DATA_TRUNCATED, from, len, MYSQL_TIMESTAMP_DATE, 1); + int3store(ptr, tmp); return error; } @@ -5480,7 +5446,7 @@ int Field_newdate::store(double nr) int Field_newdate::store(longlong nr, bool unsigned_val) { ASSERT_COLUMN_MARKED_FOR_WRITE; - TIME l_time; + MYSQL_TIME l_time; longlong tmp; int error; THD *thd= table ? table->in_use : current_thd; @@ -5507,7 +5473,7 @@ int Field_newdate::store(longlong nr, bool unsigned_val) } -int Field_newdate::store_time(TIME *ltime,timestamp_type time_type) +int Field_newdate::store_time(MYSQL_TIME *ltime,timestamp_type time_type) { ASSERT_COLUMN_MARKED_FOR_WRITE; long tmp; @@ -5542,7 +5508,7 @@ int Field_newdate::store_time(TIME *ltime,timestamp_type time_type) bool Field_newdate::send_binary(Protocol *protocol) { - TIME tm; + MYSQL_TIME tm; Field_newdate::get_date(&tm,0); return protocol->store_date(&tm); } @@ -5593,7 +5559,7 @@ String *Field_newdate::val_str(String *val_buffer, } -bool Field_newdate::get_date(TIME *ltime,uint fuzzydate) +bool Field_newdate::get_date(MYSQL_TIME *ltime,uint fuzzydate) { uint32 tmp=(uint32) uint3korr(ptr); ltime->day= tmp & 31; @@ -5606,7 +5572,7 @@ bool Field_newdate::get_date(TIME *ltime,uint fuzzydate) } -bool Field_newdate::get_time(TIME *ltime) +bool Field_newdate::get_time(MYSQL_TIME *ltime) { return Field_newdate::get_date(ltime,0); } @@ -5645,7 +5611,7 @@ void Field_newdate::sql_type(String &res) const int Field_datetime::store(const char *from,uint len,CHARSET_INFO *cs) { ASSERT_COLUMN_MARKED_FOR_WRITE; - TIME time_tmp; + MYSQL_TIME time_tmp; int error; ulonglong tmp= 0; enum enum_mysql_timestamp_type func_res; @@ -5698,7 +5664,7 @@ int Field_datetime::store(double nr) int Field_datetime::store(longlong nr, bool unsigned_val) { ASSERT_COLUMN_MARKED_FOR_WRITE; - TIME not_used; + MYSQL_TIME not_used; int error; longlong initial_nr= nr; THD *thd= table ? table->in_use : current_thd; @@ -5733,7 +5699,7 @@ int Field_datetime::store(longlong nr, bool unsigned_val) } -int Field_datetime::store_time(TIME *ltime,timestamp_type time_type) +int Field_datetime::store_time(MYSQL_TIME *ltime,timestamp_type time_type) { ASSERT_COLUMN_MARKED_FOR_WRITE; longlong tmp; @@ -5779,7 +5745,7 @@ int Field_datetime::store_time(TIME *ltime,timestamp_type time_type) bool Field_datetime::send_binary(Protocol *protocol) { - TIME tm; + MYSQL_TIME tm; Field_datetime::get_date(&tm, TIME_FUZZY_DATE); return protocol->store(&tm); } @@ -5853,7 +5819,7 @@ String *Field_datetime::val_str(String *val_buffer, return val_buffer; } -bool Field_datetime::get_date(TIME *ltime, uint fuzzydate) +bool Field_datetime::get_date(MYSQL_TIME *ltime, uint fuzzydate) { longlong tmp=Field_datetime::val_int(); uint32 part1,part2; @@ -5872,7 +5838,7 @@ bool Field_datetime::get_date(TIME *ltime, uint fuzzydate) return (!(fuzzydate & TIME_FUZZY_DATE) && (!ltime->month || !ltime->day)) ? 1 : 0; } -bool Field_datetime::get_time(TIME *ltime) +bool Field_datetime::get_time(MYSQL_TIME *ltime) { return Field_datetime::get_date(ltime,0); } @@ -6033,31 +5999,6 @@ report_data_too_long(Field_str *field) } -/* - Test if the given string contains important data: - not spaces for character string, - or any data for binary string. - - SYNOPSIS - test_if_important_data() - cs Character set - str String to test - strend String end - - RETURN - FALSE - If string does not have important data - TRUE - If string has some important data -*/ - -static bool -test_if_important_data(CHARSET_INFO *cs, const char *str, const char *strend) -{ - if (cs != &my_charset_bin) - str+= cs->cset->scan(cs, str, strend, MY_SEQ_SPACES); - return (str < strend); -} - - /* Copy a string and fill with space */ int Field_string::store(const char *from,uint length,CHARSET_INFO *cs) @@ -8505,9 +8446,28 @@ char *Field_bit::pack(char *to, const char *from, uint max_length) { DBUG_ASSERT(max_length); uint length; - if (bit_len) + if (bit_len > 0) { - uchar bits= get_rec_bits(bit_ptr, bit_ofs, bit_len); + /* + We have the following: + + ptr Points into a field in record R1 + from Points to a field in a record R2 + bit_ptr Points to the byte (in the null bytes) that holds the + odd bits of R1 + from_bitp Points to the byte that holds the odd bits of R2 + + We have the following: + + ptr - bit_ptr = from - from_bitp + + We want to isolate 'from_bitp', so this gives: + + ptr - bit_ptr - from = - from_bitp + - ptr + bit_ptr + from = from_bitp + bit_ptr + from - ptr = from_bitp + */ + uchar bits= get_rec_bits(bit_ptr + (from - ptr), bit_ofs, bit_len); *to++= bits; } length= min(bytes_in_rec, max_length - (bit_len > 0)); @@ -8518,9 +8478,16 @@ char *Field_bit::pack(char *to, const char *from, uint max_length) const char *Field_bit::unpack(char *to, const char *from) { - if (bit_len) + if (bit_len > 0) { - set_rec_bits(*from, bit_ptr, bit_ofs, bit_len); + /* + set_rec_bits is a macro, don't put the post-increment in the + argument since that might cause strange side-effects. + + For the choice of the second argument, see the explanation for + Field_bit::pack(). + */ + set_rec_bits(*from, bit_ptr + (to - ptr), bit_ofs, bit_len); from++; } memcpy(to, from, bytes_in_rec); @@ -8954,11 +8921,6 @@ bool create_field::init(THD *thd, char *fld_name, enum_field_types fld_type, break; case MYSQL_TYPE_SET: { - if (fld_interval_list->elements > sizeof(longlong)*8) - { - my_error(ER_TOO_BIG_SET, MYF(0), fld_name); /* purecov: inspected */ - DBUG_RETURN(TRUE); - } pack_length= get_set_pack_length(fld_interval_list->elements); List_iterator<String> it(*fld_interval_list); @@ -9413,10 +9375,13 @@ uint32 Field_blob::max_display_length() NOTE This function won't produce warning and increase cut fields counter - if count_cuted_fields == FIELD_CHECK_IGNORE for current thread. + if count_cuted_fields == CHECK_FIELD_IGNORE for current thread. + + if count_cuted_fields == CHECK_FIELD_IGNORE then we ignore notes. + This allows us to avoid notes in optimisation, like convert_constant_item(). RETURN VALUE - 1 if count_cuted_fields == FIELD_CHECK_IGNORE + 1 if count_cuted_fields == CHECK_FIELD_IGNORE and error level is not NOTE 0 otherwise */ @@ -9436,7 +9401,7 @@ Field::set_warning(MYSQL_ERROR::enum_warning_level level, uint code, thd->row_count); return 0; } - return 1; + return level >= MYSQL_ERROR::WARN_LEVEL_WARN; } @@ -9464,9 +9429,10 @@ Field::set_datetime_warning(MYSQL_ERROR::enum_warning_level level, uint code, timestamp_type ts_type, int cuted_increment) { THD *thd= table ? table->in_use : current_thd; - if (thd->really_abort_on_warning() || + if ((thd->really_abort_on_warning() && + level >= MYSQL_ERROR::WARN_LEVEL_WARN) || set_warning(level, code, cuted_increment)) - make_truncated_value_warning(thd, str, str_length, ts_type, + make_truncated_value_warning(thd, level, str, str_length, ts_type, field_name); } @@ -9499,7 +9465,7 @@ Field::set_datetime_warning(MYSQL_ERROR::enum_warning_level level, uint code, { char str_nr[22]; char *str_end= longlong10_to_str(nr, str_nr, -10); - make_truncated_value_warning(thd, str_nr, (uint) (str_end - str_nr), + make_truncated_value_warning(thd, level, str_nr, (uint) (str_end - str_nr), ts_type, field_name); } } @@ -9532,7 +9498,7 @@ Field::set_datetime_warning(MYSQL_ERROR::enum_warning_level level, uint code, /* DBL_DIG is enough to print '-[digits].E+###' */ char str_nr[DBL_DIG + 8]; uint str_len= my_sprintf(str_nr, (str_nr, "%g", nr)); - make_truncated_value_warning(thd, str_nr, str_len, ts_type, + make_truncated_value_warning(thd, level, str_nr, str_len, ts_type, field_name); } } diff --git a/sql/field.h b/sql/field.h index f27ed8b9394..441ff9079c1 100644 --- a/sql/field.h +++ b/sql/field.h @@ -98,7 +98,7 @@ public: virtual int store(double nr)=0; virtual int store(longlong nr, bool unsigned_val)=0; virtual int store_decimal(const my_decimal *d)=0; - virtual int store_time(TIME *ltime, timestamp_type t_type); + virtual int store_time(MYSQL_TIME *ltime, timestamp_type t_type); virtual double val_real(void)=0; virtual longlong val_int(void)=0; virtual my_decimal *val_decimal(my_decimal *); @@ -193,9 +193,9 @@ public: */ virtual void sql_type(String &str) const =0; virtual uint size_of() const =0; // For new field - inline bool is_null(uint row_offset=0) + inline bool is_null(my_ptrdiff_t row_offset= 0) { return null_ptr ? (null_ptr[row_offset] & null_bit ? 1 : 0) : table->null_row; } - inline bool is_real_null(uint row_offset=0) + inline bool is_real_null(my_ptrdiff_t row_offset= 0) { return null_ptr ? (null_ptr[row_offset] & null_bit ? 1 : 0) : 0; } inline bool is_null_in_record(const uchar *record) { @@ -210,9 +210,9 @@ public: return 0; return test(null_ptr[offset] & null_bit); } - inline void set_null(int row_offset=0) + inline void set_null(my_ptrdiff_t row_offset= 0) { if (null_ptr) null_ptr[row_offset]|= null_bit; } - inline void set_notnull(int row_offset=0) + inline void set_notnull(my_ptrdiff_t row_offset= 0) { if (null_ptr) null_ptr[row_offset]&= (uchar) ~null_bit; } inline bool maybe_null(void) { return null_ptr != 0 || table->maybe_null; } inline bool real_maybe_null(void) { return null_ptr != 0; } @@ -347,8 +347,8 @@ public: } void copy_from_tmp(int offset); uint fill_cache_field(struct st_cache_field *copy); - virtual bool get_date(TIME *ltime,uint fuzzydate); - virtual bool get_time(TIME *ltime); + virtual bool get_date(MYSQL_TIME *ltime,uint fuzzydate); + virtual bool get_time(MYSQL_TIME *ltime); virtual CHARSET_INFO *charset(void) const { return &my_charset_bin; } virtual CHARSET_INFO *sort_charset(void) const { return charset(); } virtual bool has_charset(void) const { return FALSE; } @@ -358,8 +358,6 @@ public: virtual void set_derivation(enum Derivation derivation_arg) { } bool set_warning(MYSQL_ERROR::enum_warning_level, unsigned int code, int cuted_increment); - bool check_int(const char *str, int length, const char *int_end, - CHARSET_INFO *cs); void set_datetime_warning(MYSQL_ERROR::enum_warning_level, uint code, const char *str, uint str_len, timestamp_type ts_type, int cuted_increment); @@ -445,6 +443,11 @@ public: int store_decimal(const my_decimal *); my_decimal *val_decimal(my_decimal *); uint is_equal(create_field *new_field); + int check_int(CHARSET_INFO *cs, const char *str, int length, + const char *int_end, int error); + bool get_int(CHARSET_INFO *cs, const char *from, uint len, + longlong *rnd, ulonglong unsigned_max, + longlong signed_min, longlong signed_max); }; @@ -504,6 +507,7 @@ public: {} int store_decimal(const my_decimal *); my_decimal *val_decimal(my_decimal *); + uint32 max_display_length() { return field_length; } }; @@ -532,7 +536,6 @@ public: void overflow(bool negative); bool zero_pack() const { return 0; } void sql_type(String &str) const; - uint32 max_display_length() { return field_length; } }; @@ -564,7 +567,7 @@ public: int store(const char *to, uint length, CHARSET_INFO *charset); int store(double nr); int store(longlong nr, bool unsigned_val); - int store_time(TIME *ltime, timestamp_type t_type); + int store_time(MYSQL_TIME *ltime, timestamp_type t_type); int store_decimal(const my_decimal *); double val_real(void); longlong val_int(void); @@ -783,7 +786,6 @@ public: void sort_string(char *buff,uint length); uint32 pack_length() const { return sizeof(float); } void sql_type(String &str) const; - uint32 max_display_length() { return 24; } }; @@ -825,7 +827,6 @@ public: void sort_string(char *buff,uint length); uint32 pack_length() const { return sizeof(double); } void sql_type(String &str) const; - uint32 max_display_length() { return 53; } uint size_of() const { return sizeof(*this); } }; @@ -909,8 +910,8 @@ public: longget(tmp,ptr); return tmp; } - bool get_date(TIME *ltime,uint fuzzydate); - bool get_time(TIME *ltime); + bool get_date(MYSQL_TIME *ltime,uint fuzzydate); + bool get_time(MYSQL_TIME *ltime); timestamp_auto_set_type get_auto_set_type() const; }; @@ -983,7 +984,7 @@ public: int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); int store(longlong nr, bool unsigned_val); - int store_time(TIME *ltime, timestamp_type type); + int store_time(MYSQL_TIME *ltime, timestamp_type type); int reset(void) { ptr[0]=ptr[1]=ptr[2]=0; return 0; } double val_real(void); longlong val_int(void); @@ -995,8 +996,8 @@ public: void sql_type(String &str) const; bool can_be_compared_as_longlong() const { return TRUE; } bool zero_pack() const { return 1; } - bool get_date(TIME *ltime,uint fuzzydate); - bool get_time(TIME *ltime); + bool get_date(MYSQL_TIME *ltime,uint fuzzydate); + bool get_time(MYSQL_TIME *ltime); }; @@ -1015,7 +1016,7 @@ public: enum_field_types type() const { return MYSQL_TYPE_TIME;} enum ha_base_keytype key_type() const { return HA_KEYTYPE_INT24; } enum Item_result cmp_type () const { return INT_RESULT; } - int store_time(TIME *ltime, timestamp_type type); + int store_time(MYSQL_TIME *ltime, timestamp_type type); int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); int store(longlong nr, bool unsigned_val); @@ -1023,9 +1024,9 @@ public: double val_real(void); longlong val_int(void); String *val_str(String*,String *); - bool get_date(TIME *ltime, uint fuzzydate); + bool get_date(MYSQL_TIME *ltime, uint fuzzydate); bool send_binary(Protocol *protocol); - bool get_time(TIME *ltime); + bool get_time(MYSQL_TIME *ltime); int cmp(const char *,const char*); void sort_string(char *buff,uint length); uint32 pack_length() const { return 3; } @@ -1056,7 +1057,7 @@ public: int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); int store(longlong nr, bool unsigned_val); - int store_time(TIME *ltime, timestamp_type type); + int store_time(MYSQL_TIME *ltime, timestamp_type type); int reset(void) { ptr[0]=ptr[1]=ptr[2]=ptr[3]=ptr[4]=ptr[5]=ptr[6]=ptr[7]=0; @@ -1072,8 +1073,8 @@ public: void sql_type(String &str) const; bool can_be_compared_as_longlong() const { return TRUE; } bool zero_pack() const { return 1; } - bool get_date(TIME *ltime,uint fuzzydate); - bool get_time(TIME *ltime); + bool get_date(MYSQL_TIME *ltime,uint fuzzydate); + bool get_time(MYSQL_TIME *ltime); }; @@ -1355,7 +1356,7 @@ public: int store_decimal(const my_decimal *); void get_key_image(char *buff,uint length,imagetype type); uint size_of() const { return sizeof(*this); } - int reset(void) { return !maybe_null(); } + int reset(void) { return !maybe_null() || Field_blob::reset(); } }; #endif /*HAVE_SPATIAL*/ diff --git a/sql/field_conv.cc b/sql/field_conv.cc index 805aba01754..9771cbc12b1 100644 --- a/sql/field_conv.cc +++ b/sql/field_conv.cc @@ -173,7 +173,7 @@ set_field_to_null_with_conversions(Field *field, bool no_conversions) if (field == field->table->next_number_field) { field->table->auto_increment_field_not_null= FALSE; - return 0; // field is set in handler.cc + return 0; // field is set in fill_record() } if (field->table->in_use->count_cuted_fields == CHECK_FIELD_WARN) { @@ -336,6 +336,13 @@ static void do_field_real(Copy_field *copy) } +static void do_field_decimal(Copy_field *copy) +{ + my_decimal value; + copy->to_field->store_decimal(copy->from_field->val_decimal(&value)); +} + + /* string copy for single byte characters set when to string is shorter than from string @@ -426,6 +433,26 @@ static void do_varstring1(Copy_field *copy) } +static void do_varstring1_mb(Copy_field *copy) +{ + int well_formed_error; + CHARSET_INFO *cs= copy->from_field->charset(); + uint from_length= (uint) *(uchar*) copy->from_ptr; + const char *from_ptr= copy->from_ptr + 1; + uint to_char_length= (copy->to_length - 1) / cs->mbmaxlen; + uint length= cs->cset->well_formed_len(cs, from_ptr, from_ptr + from_length, + to_char_length, &well_formed_error); + if (length < from_length) + { + if (current_thd->count_cuted_fields) + copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + WARN_DATA_TRUNCATED, 1); + } + *(uchar*) copy->to_ptr= (uchar) length; + memcpy(copy->to_ptr + 1, from_ptr, length); +} + + static void do_varstring2(Copy_field *copy) { uint length=uint2korr(copy->from_ptr); @@ -451,6 +478,12 @@ static void do_varstring2_mb(Copy_field *copy) const char *from_beg= copy->from_ptr + HA_KEY_BLOB_LENGTH; uint length= cs->cset->well_formed_len(cs, from_beg, from_beg + from_length, char_length, &well_formed_error); + if (length < from_length) + { + if (current_thd->count_cuted_fields) + copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + WARN_DATA_TRUNCATED, 1); + } int2store(copy->to_ptr, length); memcpy(copy->to_ptr+HA_KEY_BLOB_LENGTH, from_beg, length); } @@ -580,6 +613,8 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) if (to->real_type() == MYSQL_TYPE_BIT || from->real_type() == MYSQL_TYPE_BIT) return do_field_int; + if (to->result_type() == DECIMAL_RESULT) + return do_field_decimal; // Check if identical fields if (from->result_type() == STRING_RESULT) { @@ -624,8 +659,10 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) return do_field_string; if (to_length != from_length) return (((Field_varstring*) to)->length_bytes == 1 ? - do_varstring1 : (from->charset()->mbmaxlen == 1 ? - do_varstring2 : do_varstring2_mb)); + (from->charset()->mbmaxlen == 1 ? do_varstring1 : + do_varstring1_mb) : + (from->charset()->mbmaxlen == 1 ? do_varstring2 : + do_varstring2_mb)); } else if (to_length < from_length) return (from->charset()->mbmaxlen == 1 ? diff --git a/sql/filesort.cc b/sql/filesort.cc index 2f9a96472ca..a80e4a0fa54 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -1374,7 +1374,10 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length, } else { - switch ((sortorder->result_type=sortorder->item->result_type())) { + sortorder->result_type= sortorder->item->result_type(); + if (sortorder->item->result_as_longlong()) + sortorder->result_type= INT_RESULT; + switch (sortorder->result_type) { case STRING_RESULT: sortorder->length=sortorder->item->max_length; set_if_smaller(sortorder->length, thd->variables.max_sort_length); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 872d97a6fa8..7f5c1cfd494 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -24,6 +24,7 @@ #endif #include "mysql_priv.h" +#include "rpl_mi.h" #include <my_dir.h> #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE @@ -133,7 +134,7 @@ static uint ndbcluster_alter_table_flags(uint flags) } static int ndbcluster_inited= 0; -static int ndbcluster_terminating= 0; +int ndbcluster_terminating= 0; static Ndb* g_ndb= NULL; Ndb_cluster_connection* g_ndb_cluster_connection= NULL; @@ -349,7 +350,7 @@ Thd_ndb::Thd_ndb() count= 0; all= NULL; stmt= NULL; - error= 0; + m_error= FALSE; query_state&= NDB_QUERY_NORMAL; options= 0; (void) hash_init(&open_tables, &my_charset_bin, 5, 0, 0, @@ -384,7 +385,7 @@ void Thd_ndb::init_open_tables() { count= 0; - error= 0; + m_error= FALSE; my_hash_reset(&open_tables); } @@ -460,7 +461,7 @@ ha_rows ha_ndbcluster::records() } THD *thd= current_thd; - if (get_thd_ndb(thd)->error) + if (get_thd_ndb(thd)->m_error) local_info->no_uncommitted_rows_count= 0; DBUG_RETURN(retval + local_info->no_uncommitted_rows_count); @@ -480,7 +481,10 @@ int ha_ndbcluster::records_update() { Ndb *ndb= get_ndb(); struct Ndb_statistics stat; - ndb->setDatabaseName(m_dbname); + if (ndb->setDatabaseName(m_dbname)) + { + return my_errno= HA_ERR_OUT_OF_MEM; + } result= ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat); if (result == 0) { @@ -491,7 +495,7 @@ int ha_ndbcluster::records_update() } { THD *thd= current_thd; - if (get_thd_ndb(thd)->error) + if (get_thd_ndb(thd)->m_error) local_info->no_uncommitted_rows_count= 0; } if (result == 0) @@ -504,7 +508,7 @@ void ha_ndbcluster::no_uncommitted_rows_execute_failure() if (m_ha_not_exact_count) return; DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_execute_failure"); - get_thd_ndb(current_thd)->error= 1; + get_thd_ndb(current_thd)->m_error= TRUE; DBUG_VOID_RETURN; } @@ -528,7 +532,7 @@ void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd) DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_reset"); Thd_ndb *thd_ndb= get_thd_ndb(thd); thd_ndb->count++; - thd_ndb->error= 0; + thd_ndb->m_error= FALSE; DBUG_VOID_RETURN; } @@ -733,10 +737,9 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field, DBUG_PRINT("info", ("bit field")); DBUG_DUMP("value", (char*)&bits, pack_len); #ifdef WORDS_BIGENDIAN - if (pack_len < 5) - { - DBUG_RETURN(ndb_op->setValue(fieldnr, ((char*)&bits)+4) != 0); - } + /* store lsw first */ + bits = ((bits >> 32) & 0x00000000FFFFFFFF) + | ((bits << 32) & 0xFFFFFFFF00000000); #endif DBUG_RETURN(ndb_op->setValue(fieldnr, (char*)&bits) != 0); } @@ -875,7 +878,11 @@ int get_ndb_blobs_value(TABLE* table, NdbValue* value_array, DBUG_PRINT("info", ("allocate blobs buffer size %u", offset)); buffer= my_malloc(offset, MYF(MY_WME)); if (buffer == NULL) + { + sql_print_error("ha_ndbcluster::get_ndb_blobs_value: " + "my_malloc(%u) failed", offset); DBUG_RETURN(-1); + } buffer_size= offset; } } @@ -1008,7 +1015,7 @@ int ha_ndbcluster::get_metadata(const char *path) DBUG_ASSERT(m_table == NULL); DBUG_ASSERT(m_table_info == NULL); - const void *data, *pack_data; + const void *data= NULL, *pack_data= NULL; uint length, pack_length; /* @@ -1067,6 +1074,12 @@ static int fix_unique_index_attr_order(NDB_INDEX_DATA &data, if (data.unique_index_attrid_map) my_free((char*)data.unique_index_attrid_map, MYF(0)); data.unique_index_attrid_map= (uchar*)my_malloc(sz,MYF(MY_WME)); + if (data.unique_index_attrid_map == 0) + { + sql_print_error("fix_unique_index_attr_order: my_malloc(%u) failure", + (unsigned int)sz); + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + } KEY_PART_INFO* key_part= key_info->key_part; KEY_PART_INFO* end= key_part+key_info->key_parts; @@ -1104,7 +1117,7 @@ int ha_ndbcluster::create_indexes(Ndb *ndb, TABLE *tab) KEY* key_info= tab->key_info; const char **key_name= tab->s->keynames.type_names; DBUG_ENTER("ha_ndbcluster::create_indexes"); - + for (i= 0; i < tab->s->keys; i++, key_info++, key_name++) { index_name= *key_name; @@ -2713,6 +2726,13 @@ int ha_ndbcluster::write_row(byte *record) op->setValue(no_fields, part_func_value); } + if (unlikely(m_slow_path)) + { + if (!(thd->options & OPTION_BIN_LOG)) + op->setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING); + else if (thd->slave_thread) + op->setAnyValue(thd->server_id); + } m_rows_changed++; /* @@ -2993,8 +3013,21 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) no_fields++; op->setValue(no_fields, part_func_value); } - // Execute update operation - if (!cursor && execute_no_commit(this,trans,FALSE) != 0) { + + if (unlikely(m_slow_path)) + { + if (!(thd->options & OPTION_BIN_LOG)) + op->setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING); + else if (thd->slave_thread) + op->setAnyValue(thd->server_id); + } + /* + Execute update operation if we are not doing a scan for update + and there exist UPDATE AFTER triggers + */ + + if ((!cursor || m_update_cannot_batch) && + execute_no_commit(this,trans,false) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -3048,7 +3081,16 @@ int ha_ndbcluster::delete_row(const byte *record) no_uncommitted_rows_update(-1); - if (!m_primary_key_update) + if (unlikely(m_slow_path)) + { + if (!(thd->options & OPTION_BIN_LOG)) + ((NdbOperation *)trans->getLastDefinedOperation())-> + setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING); + else if (thd->slave_thread) + ((NdbOperation *)trans->getLastDefinedOperation())-> + setAnyValue(thd->server_id); + } + if (!(m_primary_key_update || m_delete_cannot_batch)) // If deleting from cursor, NoCommit will be handled in next_result DBUG_RETURN(0); } @@ -3077,6 +3119,14 @@ int ha_ndbcluster::delete_row(const byte *record) if ((error= set_primary_key_from_record(op, record))) DBUG_RETURN(error); } + + if (unlikely(m_slow_path)) + { + if (!(thd->options & OPTION_BIN_LOG)) + op->setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING); + else if (thd->slave_thread) + op->setAnyValue(thd->server_id); + } } // Execute delete operation @@ -3109,11 +3159,22 @@ void ndb_unpack_record(TABLE *table, NdbValue *value, my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); DBUG_ENTER("ndb_unpack_record"); - // Set null flag(s) - bzero(buf, table->s->null_bytes); + /* + Set the filler bits of the null byte, since they are + not touched in the code below. + + The filler bits are the MSBs in the last null byte + */ + if (table->s->null_bytes > 0) + buf[table->s->null_bytes - 1]|= 256U - (1U << + table->s->last_null_bit_pos); + /* + Set null flag(s) + */ for ( ; field; p_field++, value++, field= *p_field) { + field->set_notnull(row_offset); if ((*value).ptr) { if (!(field->flags & BLOB_FLAG)) @@ -3123,7 +3184,7 @@ void ndb_unpack_record(TABLE *table, NdbValue *value, { if (is_null > 0) { - DBUG_PRINT("info",("[%u] NULL", + DBUG_PRINT("info",("[%u] NULL", (*value).rec->getColumn()->getColumnNo())); field->set_null(row_offset); } @@ -3155,10 +3216,21 @@ void ndb_unpack_record(TABLE *table, NdbValue *value, else { DBUG_PRINT("info", ("bit field H'%.8X%.8X", - *(Uint32*) (*value).rec->aRef(), - *((Uint32*) (*value).rec->aRef()+1))); - field_bit->Field_bit::store((longlong) (*value).rec->u_64_value(), + *(Uint32 *)(*value).rec->aRef(), + *((Uint32 *)(*value).rec->aRef()+1))); +#ifdef WORDS_BIGENDIAN + /* lsw is stored first */ + Uint32 *buf= (Uint32 *)(*value).rec->aRef(); + field_bit->Field_bit::store((((longlong)*buf) + & 0x000000000FFFFFFFF) + | + ((((longlong)*(buf+1)) << 32) + & 0xFFFFFFFF00000000), TRUE); +#else + field_bit->Field_bit::store((longlong) + (*value).rec->u_64_value(), TRUE); +#endif } /* Move back internal field pointer to point to original @@ -3370,19 +3442,6 @@ int ha_ndbcluster::index_read(byte *buf, } -int ha_ndbcluster::index_read_idx(byte *buf, uint index_no, - const byte *key, uint key_len, - enum ha_rkey_function find_flag) -{ - statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status); - DBUG_ENTER("ha_ndbcluster::index_read_idx"); - DBUG_PRINT("enter", ("index_no: %u, key_len: %u", index_no, key_len)); - close_scan(); - index_init(index_no, 0); - DBUG_RETURN(index_read(buf, key, key_len, find_flag)); -} - - int ha_ndbcluster::index_next(byte *buf) { DBUG_ENTER("ha_ndbcluster::index_next"); @@ -3549,10 +3608,10 @@ int ha_ndbcluster::close_scan() m_multi_cursor= 0; if (!m_active_cursor && !m_multi_cursor) - DBUG_RETURN(1); + DBUG_RETURN(0); NdbScanOperation *cursor= m_active_cursor ? m_active_cursor : m_multi_cursor; - + if (m_lock_tuple) { /* @@ -3783,7 +3842,10 @@ int ha_ndbcluster::info(uint flag) Ndb *ndb= get_ndb(); ndb->setDatabaseName(m_dbname); struct Ndb_statistics stat; - ndb->setDatabaseName(m_dbname); + if (ndb->setDatabaseName(m_dbname)) + { + DBUG_RETURN(my_errno= HA_ERR_OUT_OF_MEM); + } if (current_thd->variables.ndb_use_exact_count && (result= ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat)) == 0) @@ -3876,7 +3938,8 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) break; case HA_EXTRA_WRITE_CAN_REPLACE: DBUG_PRINT("info", ("HA_EXTRA_WRITE_CAN_REPLACE")); - if (!m_has_unique_index) + if (!m_has_unique_index || + current_thd->slave_thread) /* always set if slave, quick fix for bug 27378 */ { DBUG_PRINT("info", ("Turning ON use of write instead of insert")); m_use_write= TRUE; @@ -3887,6 +3950,14 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) DBUG_PRINT("info", ("Turning OFF use of write instead of insert")); m_use_write= FALSE; break; + case HA_EXTRA_DELETE_CANNOT_BATCH: + DBUG_PRINT("info", ("HA_EXTRA_DELETE_CANNOT_BATCH")); + m_delete_cannot_batch= TRUE; + break; + case HA_EXTRA_UPDATE_CANNOT_BATCH: + DBUG_PRINT("info", ("HA_EXTRA_UPDATE_CANNOT_BATCH")); + m_update_cannot_batch= TRUE; + break; default: break; } @@ -3906,6 +3977,14 @@ int ha_ndbcluster::reset() */ if (m_part_info) bitmap_set_all(&m_part_info->used_partitions); + + /* reset flags set by extra calls */ + m_ignore_dup_key= FALSE; + m_use_write= FALSE; + m_ignore_no_key= FALSE; + m_delete_cannot_batch= FALSE; + m_update_cannot_batch= FALSE; + DBUG_RETURN(0); } @@ -4119,6 +4198,60 @@ THR_LOCK_DATA **ha_ndbcluster::store_lock(THD *thd, - refresh list of the indexes for the table if needed (if altered) */ +#ifdef HAVE_NDB_BINLOG +extern MASTER_INFO *active_mi; +static int ndbcluster_update_apply_status(THD *thd, int do_update) +{ + return 0; + + Thd_ndb *thd_ndb= get_thd_ndb(thd); + Ndb *ndb= thd_ndb->ndb; + NDBDICT *dict= ndb->getDictionary(); + const NDBTAB *ndbtab; + NdbTransaction *trans= thd_ndb->all ? thd_ndb->all : thd_ndb->stmt; + ndb->setDatabaseName(NDB_REP_DB); + Ndb_table_guard ndbtab_g(dict, NDB_APPLY_TABLE); + if (!(ndbtab= ndbtab_g.get_table())) + { + return -1; + } + NdbOperation *op= 0; + int r= 0; + r|= (op= trans->getNdbOperation(ndbtab)) == 0; + DBUG_ASSERT(r == 0); + if (do_update) + r|= op->updateTuple(); + else + r|= op->writeTuple(); + DBUG_ASSERT(r == 0); + // server_id + r|= op->equal(0u, (Uint32)thd->server_id); + DBUG_ASSERT(r == 0); + if (!do_update) + { + // epoch + r|= op->setValue(1u, (Uint64)0); + DBUG_ASSERT(r == 0); + } + // log_name + char tmp_buf[FN_REFLEN]; + ndb_pack_varchar(ndbtab->getColumn(2u), tmp_buf, + active_mi->rli.group_master_log_name, + strlen(active_mi->rli.group_master_log_name)); + r|= op->setValue(2u, tmp_buf); + DBUG_ASSERT(r == 0); + // start_pos + r|= op->setValue(3u, (Uint64)active_mi->rli.group_master_log_pos); + DBUG_ASSERT(r == 0); + // end_pos + r|= op->setValue(4u, (Uint64)active_mi->rli.group_master_log_pos + + ((Uint64)active_mi->rli.future_event_relay_log_pos - + (Uint64)active_mi->rli.group_relay_log_pos)); + DBUG_ASSERT(r == 0); + return 0; +} +#endif /* HAVE_NDB_BINLOG */ + int ha_ndbcluster::external_lock(THD *thd, int lock_type) { int error=0; @@ -4147,8 +4280,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) { m_transaction_on= FALSE; /* Would be simpler if has_transactions() didn't always say "yes" */ - thd->options|= OPTION_STATUS_NO_TRANS_UPDATE; - thd->no_trans_update= TRUE; + thd->no_trans_update.all= thd->no_trans_update.stmt= TRUE; } else if (!thd->transaction.on) m_transaction_on= FALSE; @@ -4169,6 +4301,11 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) thd_ndb->init_open_tables(); thd_ndb->stmt= trans; thd_ndb->query_state&= NDB_QUERY_NORMAL; + thd_ndb->trans_options= 0; + thd_ndb->m_slow_path= FALSE; + if (thd->slave_thread || + !(thd->options & OPTION_BIN_LOG)) + thd_ndb->m_slow_path= TRUE; trans_register_ha(thd, FALSE, ndbcluster_hton); } else @@ -4185,6 +4322,11 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) thd_ndb->init_open_tables(); thd_ndb->all= trans; thd_ndb->query_state&= NDB_QUERY_NORMAL; + thd_ndb->trans_options= 0; + thd_ndb->m_slow_path= FALSE; + if (thd->slave_thread || + !(thd->options & OPTION_BIN_LOG)) + thd_ndb->m_slow_path= TRUE; trans_register_ha(thd, TRUE, ndbcluster_hton); /* @@ -4225,7 +4367,14 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) // Start of transaction m_rows_changed= 0; m_ops_pending= 0; - + m_slow_path= thd_ndb->m_slow_path; +#ifdef HAVE_NDB_BINLOG + if (unlikely(m_slow_path)) + { + if (m_share == ndb_apply_status_share && thd->slave_thread) + thd_ndb->trans_options|= TNTO_INJECTED_APPLY_STATUS; + } +#endif // TODO remove double pointers... m_thd_ndb_share= thd_ndb->get_open_table(thd, m_table); m_table_info= &m_thd_ndb_share->stat; @@ -4366,7 +4515,18 @@ static int ndbcluster_commit(handlerton *hton, THD *thd, bool all) DBUG_PRINT("transaction",("%s", trans == thd_ndb->stmt ? "stmt" : "all")); - DBUG_ASSERT(ndb && trans); + DBUG_ASSERT(ndb); + if (trans == NULL) + DBUG_RETURN(0); + +#ifdef HAVE_NDB_BINLOG + if (unlikely(thd_ndb->m_slow_path)) + { + if (thd->slave_thread) + ndbcluster_update_apply_status + (thd, thd_ndb->trans_options & TNTO_INJECTED_APPLY_STATUS); + } +#endif /* HAVE_NDB_BINLOG */ if (execute_commit(thd,trans) != 0) { @@ -4456,7 +4616,10 @@ static int create_ndb_column(NDBCOL &col, HA_CREATE_INFO *info) { // Set name - col.setName(field->field_name); + if (col.setName(field->field_name)) + { + return (my_errno= errno); + } // Get char set CHARSET_INFO *cs= field->charset(); // Set type and sizes @@ -4745,10 +4908,11 @@ int ha_ndbcluster::create(const char *name, NDBTAB tab; NDBCOL col; uint pack_length, length, i, pk_length= 0; - const void *data, *pack_data; + const void *data= NULL, *pack_data= NULL; bool create_from_engine= (create_info->table_options & HA_OPTION_CREATE_FROM_ENGINE); bool is_truncate= (thd->lex->sql_command == SQLCOM_TRUNCATE); char tablespace[FN_LEN]; + NdbDictionary::Table::SingleUserMode single_user_mode= NdbDictionary::Table::SingleUserModeLocked; DBUG_ENTER("ha_ndbcluster::create"); DBUG_PRINT("enter", ("name: %s", name)); @@ -4800,19 +4964,26 @@ int ha_ndbcluster::create(const char *name, schema distribution table is setup ( unless it is a creation of the schema dist table itself ) */ - if (!ndb_schema_share && - !(strcmp(m_dbname, NDB_REP_DB) == 0 && - strcmp(m_tabname, NDB_SCHEMA_TABLE) == 0)) + if (!ndb_schema_share) { - DBUG_PRINT("info", ("Schema distribution table not setup")); - DBUG_RETURN(HA_ERR_NO_CONNECTION); + if (!(strcmp(m_dbname, NDB_REP_DB) == 0 && + strcmp(m_tabname, NDB_SCHEMA_TABLE) == 0)) + { + DBUG_PRINT("info", ("Schema distribution table not setup")); + DBUG_RETURN(HA_ERR_NO_CONNECTION); + } + single_user_mode = NdbDictionary::Table::SingleUserModeReadWrite; } #endif /* HAVE_NDB_BINLOG */ DBUG_PRINT("table", ("name: %s", m_tabname)); - tab.setName(m_tabname); + if (tab.setName(m_tabname)) + { + DBUG_RETURN(my_errno= errno); + } tab.setLogging(!(create_info->options & HA_LEX_CREATE_TMP_TABLE)); - + tab.setSingleUserMode(single_user_mode); + // Save frm data for this table if (readfrm(name, &data, &length)) DBUG_RETURN(1); @@ -4826,21 +4997,48 @@ int ha_ndbcluster::create(const char *name, my_free((char*)data, MYF(0)); my_free((char*)pack_data, MYF(0)); + if (create_info->storage_media == HA_SM_DISK) + { + if (create_info->tablespace) + tab.setTablespaceName(create_info->tablespace); + else + tab.setTablespaceName("DEFAULT-TS"); + } + else if (create_info->tablespace) + { + if (create_info->storage_media == HA_SM_MEMORY) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_ILLEGAL_HA_CREATE_OPTION, + ER(ER_ILLEGAL_HA_CREATE_OPTION), + ndbcluster_hton_name, + "TABLESPACE currently only supported for " + "STORAGE DISK"); + DBUG_RETURN(HA_ERR_UNSUPPORTED); + } + tab.setTablespaceName(create_info->tablespace); + create_info->storage_media = HA_SM_DISK; //if use tablespace, that also means store on disk + } + for (i= 0; i < form->s->fields; i++) { Field *field= form->field[i]; - DBUG_PRINT("info", ("name: %s, type: %u, pack_length: %d", + DBUG_PRINT("info", ("name: %s type: %u pack_length: %d", field->field_name, field->real_type(), field->pack_length())); if ((my_errno= create_ndb_column(col, field, create_info))) DBUG_RETURN(my_errno); - if (create_info->storage_media == HA_SM_DISK) + if (create_info->storage_media == HA_SM_DISK || + create_info->tablespace) col.setStorageType(NdbDictionary::Column::StorageTypeDisk); else col.setStorageType(NdbDictionary::Column::StorageTypeMemory); - tab.addColumn(col); + if (tab.addColumn(col)) + { + DBUG_RETURN(my_errno= errno); + } if (col.getPrimaryKey()) pk_length += (field->pack_length() + 3) / 4; } @@ -4855,40 +5053,23 @@ int ha_ndbcluster::create(const char *name, NdbDictionary::Column::StorageTypeMemory); } - if (create_info->storage_media == HA_SM_DISK) - { - if (create_info->tablespace) - tab.setTablespaceName(create_info->tablespace); - else - tab.setTablespaceName("DEFAULT-TS"); - } - else if (create_info->tablespace) - { - if (create_info->storage_media == HA_SM_MEMORY) - { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, - ER_ILLEGAL_HA_CREATE_OPTION, - ER(ER_ILLEGAL_HA_CREATE_OPTION), - ndbcluster_hton_name, - "TABLESPACE currently only supported for " - "STORAGE DISK"); - DBUG_RETURN(HA_ERR_UNSUPPORTED); - } - tab.setTablespaceName(create_info->tablespace); - create_info->storage_media = HA_SM_DISK; //if use tablespace, that also means store on disk - } - // No primary key, create shadow key as 64 bit, auto increment if (form->s->primary_key == MAX_KEY) { DBUG_PRINT("info", ("Generating shadow key")); - col.setName("$PK"); + if (col.setName("$PK")) + { + DBUG_RETURN(my_errno= errno); + } col.setType(NdbDictionary::Column::Bigunsigned); col.setLength(1); col.setNullable(FALSE); col.setPrimaryKey(TRUE); col.setAutoIncrement(TRUE); - tab.addColumn(col); + if (tab.addColumn(col)) + { + DBUG_RETURN(my_errno= errno); + } pk_length += 2; } @@ -5077,7 +5258,7 @@ int ha_ndbcluster::create_handler_files(const char *file, { Ndb* ndb; const NDBTAB *tab; - const void *data, *pack_data; + const void *data= NULL, *pack_data= NULL; uint length, pack_length; int error= 0; @@ -5236,13 +5417,19 @@ int ha_ndbcluster::create_ndb_index(const char *name, // TODO Only temporary ordered indexes supported ndb_index.setLogging(FALSE); } - ndb_index.setTable(m_tabname); + if (ndb_index.setTable(m_tabname)) + { + DBUG_RETURN(my_errno= errno); + } for (; key_part != end; key_part++) { Field *field= key_part->field; DBUG_PRINT("info", ("attr: %s", field->field_name)); - ndb_index.addColumnName(field->field_name); + if (ndb_index.addColumnName(field->field_name)) + { + DBUG_RETURN(my_errno= errno); + } } if (dict->createIndex(ndb_index, *m_table)) @@ -5403,7 +5590,10 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) } // Change current database to that of target table set_dbname(to); - ndb->setDatabaseName(m_dbname); + if (ndb->setDatabaseName(m_dbname)) + { + ERR_RETURN(ndb->getNdbError()); + } NdbDictionary::Table new_tab= *orig_tab; new_tab.setName(new_tabname); @@ -5575,6 +5765,7 @@ retry_temporary_error1: { ndb_table_id= h->m_table->getObjectId(); ndb_table_version= h->m_table->getObjectVersion(); + DBUG_PRINT("info", ("success 1")); } else { @@ -5588,6 +5779,7 @@ retry_temporary_error1: break; } res= ndb_to_mysql_error(&dict->getNdbError()); + DBUG_PRINT("info", ("error(1) %u", res)); } h->release_metadata(thd, ndb); } @@ -5604,6 +5796,8 @@ retry_temporary_error1: { ndb_table_id= ndbtab_g.get_table()->getObjectId(); ndb_table_version= ndbtab_g.get_table()->getObjectVersion(); + DBUG_PRINT("info", ("success 2")); + break; } else { @@ -5623,8 +5817,8 @@ retry_temporary_error1: } } } - else - res= ndb_to_mysql_error(&dict->getNdbError()); + res= ndb_to_mysql_error(&dict->getNdbError()); + DBUG_PRINT("info", ("error(2) %u", res)); break; } } @@ -5829,6 +6023,8 @@ ha_ndbcluster::ha_ndbcluster(handlerton *hton, TABLE_SHARE *table_arg): m_bulk_insert_rows((ha_rows) 1024), m_rows_changed((ha_rows) 0), m_bulk_insert_not_flushed(FALSE), + m_delete_cannot_batch(FALSE), + m_update_cannot_batch(FALSE), m_ops_pending(0), m_skip_auto_increment(TRUE), m_blobs_pending(0), @@ -5971,7 +6167,10 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked) if (!res) { Ndb *ndb= get_ndb(); - ndb->setDatabaseName(m_dbname); + if (ndb->setDatabaseName(m_dbname)) + { + ERR_RETURN(ndb->getNdbError()); + } struct Ndb_statistics stat; res= ndb_get_table_statistics(NULL, FALSE, ndb, m_table, &stat); stats.mean_rec_length= stat.row_size; @@ -6038,6 +6237,11 @@ Thd_ndb* ha_ndbcluster::seize_thd_ndb() DBUG_ENTER("seize_thd_ndb"); thd_ndb= new Thd_ndb(); + if (thd_ndb == NULL) + { + my_errno= HA_ERR_OUT_OF_MEM; + return NULL; + } if (thd_ndb->ndb->init(max_transactions) != 0) { ERR_PRINT(thd_ndb->ndb->getNdbError()); @@ -6090,7 +6294,10 @@ int ha_ndbcluster::check_ndb_connection(THD* thd) if (!(ndb= check_ndb_in_thd(thd))) DBUG_RETURN(HA_ERR_NO_CONNECTION); - ndb->setDatabaseName(m_dbname); + if (ndb->setDatabaseName(m_dbname)) + { + ERR_RETURN(ndb->getNdbError()); + } DBUG_RETURN(0); } @@ -6120,7 +6327,7 @@ int ndbcluster_discover(handlerton *hton, THD* thd, const char *db, int error= 0; NdbError ndb_error; uint len; - const void* data; + const void* data= NULL; Ndb* ndb; char key[FN_REFLEN]; DBUG_ENTER("ndbcluster_discover"); @@ -6128,7 +6335,10 @@ int ndbcluster_discover(handlerton *hton, THD* thd, const char *db, if (!(ndb= check_ndb_in_thd(thd))) DBUG_RETURN(HA_ERR_NO_CONNECTION); - ndb->setDatabaseName(db); + if (ndb->setDatabaseName(db)) + { + ERR_RETURN(ndb->getNdbError()); + } NDBDICT* dict= ndb->getDictionary(); build_table_filename(key, sizeof(key), db, name, "", 0); /* ndb_share reference temporary */ @@ -6156,9 +6366,16 @@ int ndbcluster_discover(handlerton *hton, THD* thd, const char *db, { const NdbError err= dict->getNdbError(); if (err.code == 709 || err.code == 723) + { error= -1; + DBUG_PRINT("info", ("ndb_error.code: %u", ndb_error.code)); + } else + { + error= -1; ndb_error= err; + DBUG_PRINT("info", ("ndb_error.code: %u", ndb_error.code)); + } goto err; } DBUG_PRINT("info", ("Found table %s", tab->getName())); @@ -6192,6 +6409,7 @@ int ndbcluster_discover(handlerton *hton, THD* thd, const char *db, DBUG_RETURN(0); err: + my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR)); if (share) { /* ndb_share reference temporary free */ @@ -6221,7 +6439,6 @@ int ndbcluster_table_exists_in_engine(handlerton *hton, THD* thd, if (!(ndb= check_ndb_in_thd(thd))) DBUG_RETURN(HA_ERR_NO_CONNECTION); - NDBDICT* dict= ndb->getDictionary(); NdbDictionary::Dictionary::List list; if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0) @@ -6291,8 +6508,10 @@ int ndbcluster_drop_database_impl(const char *path) char full_path[FN_REFLEN]; char *tmp= full_path + build_table_filename(full_path, sizeof(full_path), dbname, "", "", 0); - - ndb->setDatabaseName(dbname); + if (ndb->setDatabaseName(dbname)) + { + ERR_RETURN(ndb->getNdbError()); + } List_iterator_fast<char> it(drop_list); while ((tabname=it++)) { @@ -6345,7 +6564,7 @@ int ndb_create_table_from_engine(THD *thd, const char *db, LEX *old_lex= thd->lex, newlex; thd->lex= &newlex; newlex.current_select= NULL; - lex_start(thd, (const uchar*) "", 0); + lex_start(thd, "", 0); int res= ha_create_table_from_engine(thd, db, table_name); thd->lex= old_lex; return res; @@ -6783,6 +7002,7 @@ static int ndbcluster_init(void *p) { DBUG_PRINT("error",("Ndb_cluster_connection(%s)", opt_ndbcluster_connectstring)); + my_errno= HA_ERR_OUT_OF_MEM; goto ndbcluster_init_error; } { @@ -6797,6 +7017,7 @@ static int ndbcluster_init(void *p) if ( (g_ndb= new Ndb(g_ndb_cluster_connection, "sys")) == 0 ) { DBUG_PRINT("error", ("failed to create global ndb object")); + my_errno= HA_ERR_OUT_OF_MEM; goto ndbcluster_init_error; } if (g_ndb->init() != 0) @@ -7289,7 +7510,10 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname, Ndb *ndb; if (!(ndb= check_ndb_in_thd(thd))) DBUG_RETURN(1); - ndb->setDatabaseName(dbname); + if (ndb->setDatabaseName(dbname)) + { + ERR_RETURN(ndb->getNdbError()); + } uint lock= share->commit_count_lock; pthread_mutex_unlock(&share->mutex); @@ -7613,7 +7837,9 @@ int handle_trailing_share(NDB_SHARE *share) /* Ndb share has not been released as it should */ +#ifdef NOT_YET DBUG_ASSERT(FALSE); +#endif /* This is probably an error. We can however save the situation @@ -8503,7 +8729,10 @@ ha_ndbcluster::update_table_comment( return((char*)comment); } - ndb->setDatabaseName(m_dbname); + if (ndb->setDatabaseName(m_dbname)) + { + return((char*)comment); + } const NDBTAB* tab= m_table; DBUG_ASSERT(tab != NULL); @@ -8512,6 +8741,8 @@ ha_ndbcluster::update_table_comment( const unsigned fmt_len_plus_extra= length + strlen(fmt); if ((str= my_malloc(fmt_len_plus_extra, MYF(0))) == NULL) { + sql_print_error("ha_ndbcluster::update_table_comment: " + "my_malloc(%u) failed", (unsigned int)fmt_len_plus_extra); return (char*)comment; } @@ -8527,8 +8758,9 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) { THD *thd; /* needs to be first for thread_stack */ struct timespec abstime; - List<NDB_SHARE> util_open_tables; Thd_ndb *thd_ndb; + uint share_list_size= 0; + NDB_SHARE **share_list= NULL; my_thread_init(); DBUG_ENTER("ndb_util_thread"); @@ -8537,8 +8769,12 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) pthread_mutex_lock(&LOCK_ndb_util_thread); thd= new THD; /* note that contructor of THD uses DBUG_ */ + if (thd == NULL) + { + my_errno= HA_ERR_OUT_OF_MEM; + DBUG_RETURN(NULL); + } THD_CHECK_SENTRY(thd); - pthread_detach_this_thread(); ndb_util_thread= pthread_self(); @@ -8648,7 +8884,22 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) /* Lock mutex and fill list with pointers to all open tables */ NDB_SHARE *share; pthread_mutex_lock(&ndbcluster_mutex); - for (uint i= 0; i < ndbcluster_open_tables.records; i++) + uint i, open_count, record_count= ndbcluster_open_tables.records; + if (share_list_size < record_count) + { + NDB_SHARE ** new_share_list= new NDB_SHARE * [record_count]; + if (!new_share_list) + { + sql_print_warning("ndb util thread: malloc failure, " + "query cache not maintained properly"); + pthread_mutex_unlock(&ndbcluster_mutex); + goto next; // At least do not crash + } + delete [] share_list; + share_list_size= record_count; + share_list= new_share_list; + } + for (i= 0, open_count= 0; i < record_count; i++) { share= (NDB_SHARE *)hash_element(&ndbcluster_open_tables, i); #ifdef HAVE_NDB_BINLOG @@ -8666,14 +8917,14 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) i, share->table_name, share->use_count)); /* Store pointer to table */ - util_open_tables.push_back(share); + share_list[open_count++]= share; } pthread_mutex_unlock(&ndbcluster_mutex); - /* Iterate through the open files list */ - List_iterator_fast<NDB_SHARE> it(util_open_tables); - while ((share= it++)) + /* Iterate through the open files list */ + for (i= 0; i < open_count; i++) { + share= share_list[i]; #ifdef HAVE_NDB_BINLOG if ((share->use_count - (int) (share->op != 0) - (int) (share->op != 0)) <= 1) @@ -8696,11 +8947,13 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) pthread_mutex_lock(&share->mutex); lock= share->commit_count_lock; pthread_mutex_unlock(&share->mutex); - { /* Contact NDB to get commit count for table */ Ndb* ndb= thd_ndb->ndb; - ndb->setDatabaseName(share->db); + if (ndb->setDatabaseName(share->db)) + { + goto loop_next; + } Ndb_table_guard ndbtab_g(ndb->getDictionary(), share->table_name); if (ndbtab_g.get_table() && ndb_get_table_statistics(NULL, FALSE, ndb, @@ -8723,7 +8976,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) stat.commit_count= 0; } } - + loop_next: pthread_mutex_lock(&share->mutex); if (share->commit_count_lock == lock) share->commit_count= stat.commit_count; @@ -8734,10 +8987,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) share->key, share->use_count)); free_share(&share); } - - /* Clear the list of open tables */ - util_open_tables.empty(); - +next: /* Calculate new time to wake up */ int secs= 0; int msecs= ndb_cache_check_time; @@ -8765,6 +9015,8 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) ndb_util_thread_end: net_end(&thd->net); ndb_util_thread_fail: + if (share_list) + delete [] share_list; thd->cleanup(); delete thd; @@ -8804,6 +9056,11 @@ ha_ndbcluster::cond_push(const COND *cond) { DBUG_ENTER("cond_push"); Ndb_cond_stack *ndb_cond = new Ndb_cond_stack(); + if (ndb_cond == NULL) + { + my_errno= HA_ERR_OUT_OF_MEM; + DBUG_RETURN(NULL); + } DBUG_EXECUTE("where",print_where((COND *)cond, m_tabname);); if (m_cond_stack) ndb_cond->next= m_cond_stack; @@ -10620,10 +10877,23 @@ bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *create_info, int pk= 0; int ai= 0; + + if (create_info->tablespace) + create_info->storage_media = HA_SM_DISK; + else + create_info->storage_media = HA_SM_MEMORY; + for (i= 0; i < table->s->fields; i++) { Field *field= table->field[i]; const NDBCOL *col= tab->getColumn(i); + if (col->getStorageType() == NDB_STORAGETYPE_MEMORY && create_info->storage_media != HA_SM_MEMORY || + col->getStorageType() == NDB_STORAGETYPE_DISK && create_info->storage_media != HA_SM_DISK) + { + DBUG_PRINT("info", ("Column storage media is changed")); + DBUG_RETURN(COMPATIBLE_DATA_NO); + } + if (field->flags & FIELD_IS_RENAMED) { DBUG_PRINT("info", ("Field has been renamed, copy table")); @@ -10641,6 +10911,36 @@ bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *create_info, if (field->flags & FIELD_IN_ADD_INDEX) ai=1; } + + char tablespace_name[FN_LEN]; + if (get_tablespace_name(current_thd, tablespace_name, FN_LEN)) + { + if (create_info->tablespace) + { + if (strcmp(create_info->tablespace, tablespace_name)) + { + DBUG_PRINT("info", ("storage media is changed, old tablespace=%s, new tablespace=%s", + tablespace_name, create_info->tablespace)); + DBUG_RETURN(COMPATIBLE_DATA_NO); + } + } + else + { + DBUG_PRINT("info", ("storage media is changed, old is DISK and tablespace=%s, new is MEM", + tablespace_name)); + DBUG_RETURN(COMPATIBLE_DATA_NO); + } + } + else + { + if (create_info->storage_media != HA_SM_MEMORY) + { + DBUG_PRINT("info", ("storage media is changed, old is MEM, new is DISK and tablespace=%s", + create_info->tablespace)); + DBUG_RETURN(COMPATIBLE_DATA_NO); + } + } + if (table_changes != IS_EQUAL_YES) DBUG_RETURN(COMPATIBLE_DATA_NO); diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 63665fde0f8..afc781e762c 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -593,6 +593,11 @@ enum THD_NDB_OPTIONS TNO_NO_LOG_SCHEMA_OP= 1 << 0 }; +enum THD_NDB_TRANS_OPTIONS +{ + TNTO_INJECTED_APPLY_STATUS= 1 << 0 +}; + struct Ndb_local_table_statistics { int no_uncommitted_rows_count; ulong last_count; @@ -618,8 +623,10 @@ class Thd_ndb uint lock_count; NdbTransaction *all; NdbTransaction *stmt; - int error; + bool m_error; + bool m_slow_path; uint32 options; + uint32 trans_options; List<NDB_SHARE> changed_tables; uint query_state; HASH open_tables; @@ -642,8 +649,6 @@ class ha_ndbcluster: public handler int index_end(); int index_read(byte *buf, const byte *key, uint key_len, enum ha_rkey_function find_flag); - int index_read_idx(byte *buf, uint index, const byte *key, uint key_len, - enum ha_rkey_function find_flag); int index_next(byte *buf); int index_prev(byte *buf); int index_first(byte *buf); @@ -960,9 +965,12 @@ private: ha_rows m_bulk_insert_rows; ha_rows m_rows_changed; bool m_bulk_insert_not_flushed; + bool m_delete_cannot_batch; + bool m_update_cannot_batch; ha_rows m_ops_pending; bool m_skip_auto_increment; bool m_blobs_pending; + bool m_slow_path; my_ptrdiff_t m_blobs_offset; // memory for blobs in one tuple char *m_blobs_buffer; @@ -998,4 +1006,6 @@ void ndbcluster_print_error(int error, const NdbOperation *error_op); static const char ndbcluster_hton_name[]= "ndbcluster"; static const int ndbcluster_hton_name_length=sizeof(ndbcluster_hton_name)-1; - +extern int ndbcluster_terminating; +extern int ndb_util_thread_running; +extern pthread_cond_t COND_ndb_util_ready; diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 2615732e7ee..c65c81c088c 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -500,7 +500,7 @@ static int ndbcluster_reset_logs(THD *thd) static int ndbcluster_binlog_index_purge_file(THD *thd, const char *file) { - if (!ndb_binlog_running) + if (!ndb_binlog_running || thd->slave_thread) return 0; DBUG_ENTER("ndbcluster_binlog_index_purge_file"); @@ -582,10 +582,30 @@ static int ndbcluster_binlog_end(THD *thd) ndbcluster_binlog_inited= 0; #ifdef HAVE_NDB_BINLOG + if (ndb_util_thread_running > 0) + { + /* + Wait for util thread to die (as this uses the injector mutex) + There is a very small change that ndb_util_thread dies and the + following mutex is freed before it's accessed. This shouldn't + however be a likely case as the ndbcluster_binlog_end is supposed to + be called before ndb_cluster_end(). + */ + pthread_mutex_lock(&LOCK_ndb_util_thread); + /* Ensure mutex are not freed if ndb_cluster_end is running at same time */ + ndb_util_thread_running++; + ndbcluster_terminating= 1; + pthread_cond_signal(&COND_ndb_util_thread); + while (ndb_util_thread_running > 1) + pthread_cond_wait(&COND_ndb_util_ready, &LOCK_ndb_util_thread); + ndb_util_thread_running--; + pthread_mutex_unlock(&LOCK_ndb_util_thread); + } + /* wait for injector thread to finish */ ndbcluster_binlog_terminating= 1; - pthread_cond_signal(&injector_cond); pthread_mutex_lock(&injector_mutex); + pthread_cond_signal(&injector_cond); while (ndb_binlog_thread_running > 0) pthread_cond_wait(&injector_cond, &injector_mutex); pthread_mutex_unlock(&injector_mutex); @@ -729,6 +749,9 @@ static int ndbcluster_create_ndb_apply_status_table(THD *thd) NDB_REP_DB "." NDB_APPLY_TABLE " ( server_id INT UNSIGNED NOT NULL," " epoch BIGINT UNSIGNED NOT NULL, " + " log_name VARCHAR(255) BINARY NOT NULL, " + " start_pos BIGINT UNSIGNED NOT NULL, " + " end_pos BIGINT UNSIGNED NOT NULL, " " PRIMARY KEY USING HASH (server_id) ) ENGINE=NDB"); run_query(thd, buf, end, TRUE, TRUE); @@ -869,6 +892,7 @@ struct Cluster_schema uint32 id; uint32 version; uint32 type; + uint32 any_value; }; /* @@ -950,8 +974,8 @@ static void ndbcluster_get_schema(NDB_SHARE *share, /* helper function to pack a ndb varchar */ -static char *ndb_pack_varchar(const NDBCOL *col, char *buf, - const char *str, int sz) +char *ndb_pack_varchar(const NDBCOL *col, char *buf, + const char *str, int sz) { switch (col->getArrayType()) { @@ -1389,6 +1413,12 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share, /* type */ r|= op->setValue(SCHEMA_TYPE_I, log_type); DBUG_ASSERT(r == 0); + /* any value */ + if (!(thd->options & OPTION_BIN_LOG)) + r|= op->setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING); + else + r|= op->setAnyValue(thd->server_id); + DBUG_ASSERT(r == 0); if (log_db != new_db && new_db && new_table_name) { log_db= new_db; @@ -1734,6 +1764,31 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp, DBUG_RETURN(0); } +static void ndb_binlog_query(THD *thd, Cluster_schema *schema) +{ + if (schema->any_value & NDB_ANYVALUE_RESERVED) + { + if (schema->any_value != NDB_ANYVALUE_FOR_NOLOGGING) + sql_print_warning("NDB: unknown value for binlog signalling 0x%X, " + "query not logged", + schema->any_value); + return; + } + uint32 thd_server_id_save= thd->server_id; + DBUG_ASSERT(sizeof(thd_server_id_save) == sizeof(thd->server_id)); + char *thd_db_save= thd->db; + if (schema->any_value == 0) + thd->server_id= ::server_id; + else + thd->server_id= schema->any_value; + thd->db= schema->db; + thd->binlog_query(THD::STMT_QUERY_TYPE, schema->query, + schema->query_length, FALSE, + schema->name[0] == 0 || thd->db[0] == 0); + thd->server_id= thd_server_id_save; + thd->db= thd_db_save; +} + static int ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, NdbEventOperation *pOp, @@ -1758,7 +1813,10 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, MY_BITMAP slock; bitmap_init(&slock, schema->slock, 8*SCHEMA_SLOCK_SIZE, FALSE); uint node_id= g_ndb_cluster_connection->node_id(); - ndbcluster_get_schema(tmp_share, schema); + { + ndbcluster_get_schema(tmp_share, schema); + schema->any_value= pOp->getAnyValue(); + } enum SCHEMA_OP_TYPE schema_type= (enum SCHEMA_OP_TYPE)schema->type; DBUG_PRINT("info", ("%s.%s: log query_length: %d query: '%s' type: %d", @@ -1882,7 +1940,8 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, run_query(thd, schema->query, schema->query + schema->query_length, TRUE, /* print error */ - FALSE); /* binlog the query */ + TRUE); /* don't binlog the query */ + log_query= 1; break; case SOT_TABLESPACE: case SOT_LOGFILE_GROUP: @@ -1892,14 +1951,7 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, abort(); } if (log_query && ndb_binlog_running) - { - char *thd_db_save= thd->db; - thd->db= schema->db; - thd->binlog_query(THD::STMT_QUERY_TYPE, schema->query, - schema->query_length, FALSE, - schema->name[0] == 0 || thd->db[0] == 0); - thd->db= thd_db_save; - } + ndb_binlog_query(thd, schema); /* signal that schema operation has been handled */ DBUG_DUMP("slock", (char*)schema->slock, schema->slock_length); if (bitmap_is_set(&slock, node_id)) @@ -2076,10 +2128,10 @@ ndb_binlog_thread_handle_schema_event_post_epoch(THD *thd, log_query= 1; break; case SOT_DROP_TABLE: + log_query= 1; // invalidation already handled by binlog thread if (share && share->op) { - log_query= 1; break; } // fall through @@ -2157,14 +2209,7 @@ ndb_binlog_thread_handle_schema_event_post_epoch(THD *thd, } } if (ndb_binlog_running && log_query) - { - char *thd_db_save= thd->db; - thd->db= schema->db; - thd->binlog_query(THD::STMT_QUERY_TYPE, schema->query, - schema->query_length, FALSE, - schema->name[0] == 0); - thd->db= thd_db_save; - } + ndb_binlog_query(thd, schema); } while ((schema= post_epoch_unlock_list->pop())) { @@ -2270,9 +2315,11 @@ int ndb_add_ndb_binlog_index(THD *thd, void *_row) break; } - // Set all fields non-null. - if(ndb_binlog_index->s->null_bytes > 0) - bzero(ndb_binlog_index->record[0], ndb_binlog_index->s->null_bytes); + /* + Intialize ndb_binlog_index->record[0] + */ + empty_record(ndb_binlog_index); + ndb_binlog_index->field[0]->store(row.master_log_pos); ndb_binlog_index->field[1]->store(row.master_log_file, strlen(row.master_log_file), @@ -2318,6 +2365,18 @@ int ndbcluster_binlog_start() { DBUG_ENTER("ndbcluster_binlog_start"); + if (::server_id == 0) + { + sql_print_warning("NDB: server id set to zero will cause any other mysqld " + "with bin log to log with wrong server id"); + } + else if (::server_id & 0x1 << 31) + { + sql_print_error("NDB: server id's with high bit set is reserved for internal " + "purposes"); + DBUG_RETURN(-1); + } + pthread_mutex_init(&injector_mutex, MY_MUTEX_INIT_FAST); pthread_cond_init(&injector_cond, NULL); pthread_mutex_init(&ndb_schema_share_mutex, MY_MUTEX_INIT_FAST); @@ -3186,8 +3245,20 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp, NDB_SHARE *share= (NDB_SHARE*) pOp->getCustomData(); if (share == ndb_apply_status_share) return 0; - TABLE *table= share->table; + uint32 originating_server_id= pOp->getAnyValue(); + if (originating_server_id == 0) + originating_server_id= ::server_id; + else if (originating_server_id & NDB_ANYVALUE_RESERVED) + { + if (originating_server_id != NDB_ANYVALUE_FOR_NOLOGGING) + sql_print_warning("NDB: unknown value for binlog signalling 0x%X, " + "event not logged", + originating_server_id); + return 0; + } + + TABLE *table= share->table; DBUG_ASSERT(trans.good()); DBUG_ASSERT(table != 0); @@ -3232,7 +3303,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp, DBUG_ASSERT(ret == 0); } ndb_unpack_record(table, share->ndb_value[0], &b, table->record[0]); - IF_DBUG(int ret=) trans.write_row(::server_id, + IF_DBUG(int ret=) trans.write_row(originating_server_id, injector::transaction::table(table, TRUE), &b, n_fields, table->record[0]); @@ -3272,7 +3343,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp, } ndb_unpack_record(table, share->ndb_value[n], &b, table->record[n]); DBUG_EXECUTE("info", print_records(table, table->record[n]);); - IF_DBUG(int ret =) trans.delete_row(::server_id, + IF_DBUG(int ret =) trans.delete_row(originating_server_id, injector::transaction::table(table, TRUE), &b, n_fields, table->record[n]); @@ -3302,7 +3373,8 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp, since table has a primary key, we can do a write using only after values */ - trans.write_row(::server_id, injector::transaction::table(table, TRUE), + trans.write_row(originating_server_id, + injector::transaction::table(table, TRUE), &b, n_fields, table->record[0]);// after values } else @@ -3322,7 +3394,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp, } ndb_unpack_record(table, share->ndb_value[1], &b, table->record[1]); DBUG_EXECUTE("info", print_records(table, table->record[1]);); - IF_DBUG(int ret =) trans.update_row(::server_id, + IF_DBUG(int ret =) trans.update_row(originating_server_id, injector::transaction::table(table, TRUE), &b, n_fields, @@ -3582,6 +3654,14 @@ restart: Main NDB Injector loop */ { + /* + Always insert a GAP event as we cannot know what has happened in the cluster + while not being connected. + */ + LEX_STRING const msg= { C_STRING_WITH_LEN("Cluster connect") }; + inj->record_incident(thd, INCIDENT_LOST_EVENTS, msg); + } + { thd->proc_info= "Waiting for ndbcluster to start"; pthread_mutex_lock(&injector_mutex); @@ -3893,11 +3973,16 @@ restart: IF_DBUG(int ret=) trans.use_table(::server_id, tbl); DBUG_ASSERT(ret == 0); - // Set all fields non-null. - if(table->s->null_bytes > 0) - bzero(table->record[0], table->s->null_bytes); + /* + Intialize table->record[0] + */ + empty_record(table); + table->field[0]->store((longlong)::server_id); table->field[1]->store((longlong)gci); + table->field[2]->store("", 0, &my_charset_bin); + table->field[3]->store((longlong)0); + table->field[4]->store((longlong)0); trans.write_row(::server_id, injector::transaction::table(table, TRUE), &table->s->all_set, table->s->fields, diff --git a/sql/ha_ndbcluster_binlog.h b/sql/ha_ndbcluster_binlog.h index 00fc689f061..ac0d769433e 100644 --- a/sql/ha_ndbcluster_binlog.h +++ b/sql/ha_ndbcluster_binlog.h @@ -30,6 +30,10 @@ extern ulong ndb_extra_logging; #define NDB_INVALID_SCHEMA_OBJECT 241 +/* server id's with high bit set is reservered */ +#define NDB_ANYVALUE_FOR_NOLOGGING 0xFFFFFFFF +#define NDB_ANYVALUE_RESERVED 0x80000000 + extern handlerton *ndbcluster_hton; /* @@ -181,6 +185,8 @@ int ndbcluster_find_all_files(THD *thd); void ndb_unpack_record(TABLE *table, NdbValue *value, MY_BITMAP *defined, byte *buf); +char *ndb_pack_varchar(const NDBCOL *col, char *buf, + const char *str, int sz); NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table, diff --git a/sql/ha_ndbcluster_tables.h b/sql/ha_ndbcluster_tables.h index 9f7b9146d91..c6bc8f577f8 100644 --- a/sql/ha_ndbcluster_tables.h +++ b/sql/ha_ndbcluster_tables.h @@ -15,7 +15,9 @@ */ #define NDB_REP_DB "mysql" +#define OLD_NDB_REP_DB "cluster" #define NDB_REP_TABLE "ndb_binlog_index" #define NDB_APPLY_TABLE "ndb_apply_status" #define OLD_NDB_APPLY_TABLE "apply_status" #define NDB_SCHEMA_TABLE "ndb_schema" +#define OLD_NDB_SCHEMA_TABLE "schema" diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index db0d118c2e0..3c25dcd202f 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -1781,7 +1781,8 @@ int ha_partition::set_up_table_before_create(TABLE *table, } table->s->max_rows= part_elem->part_max_rows; table->s->min_rows= part_elem->part_min_rows; - partition_name= strrchr(partition_name_with_path, FN_LIBCHAR); + /* Here we have unified path so should always look for '/', not FN_LIBCHAR */ + partition_name= strrchr(partition_name_with_path, '/'); if ((part_elem->index_file_name && (error= append_file_to_dir(thd, (const char**)&part_elem->index_file_name, @@ -3336,13 +3337,14 @@ int ha_partition::index_end() */ int ha_partition::index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag) + key_part_map keypart_map, + enum ha_rkey_function find_flag) { DBUG_ENTER("ha_partition::index_read"); end_range= 0; m_index_scan_type= partition_index_read; - DBUG_RETURN(common_index_read(buf, key, key_len, find_flag)); + DBUG_RETURN(common_index_read(buf, key, keypart_map, find_flag)); } @@ -3355,14 +3357,17 @@ int ha_partition::index_read(byte * buf, const byte * key, see index_read for rest */ -int ha_partition::common_index_read(byte *buf, const byte *key, uint key_len, +int ha_partition::common_index_read(byte *buf, const byte *key, + key_part_map keypart_map, enum ha_rkey_function find_flag) { int error; bool reverse_order= FALSE; + uint key_len= calculate_key_len(table, active_index, key, keypart_map); DBUG_ENTER("ha_partition::common_index_read"); memcpy((void*)m_start_key.key, key, key_len); + m_start_key.keypart_map= keypart_map; m_start_key.length= key_len; m_start_key.flag= find_flag; @@ -3491,33 +3496,6 @@ int ha_partition::common_first_last(byte *buf) /* - Perform index read using index where always only one row is returned - - SYNOPSIS - index_read_idx() - see index_read for rest of parameters and return values - - DESCRIPTION - Positions an index cursor to the index specified in key. Fetches the - row if any. This is only used to read whole keys. - TODO: Optimise this code to avoid index_init and index_end -*/ - -int ha_partition::index_read_idx(byte * buf, uint index, const byte * key, - uint key_len, - enum ha_rkey_function find_flag) -{ - int res; - DBUG_ENTER("ha_partition::index_read_idx"); - - index_init(index, 0); - res= index_read(buf, key, key_len, find_flag); - index_end(); - DBUG_RETURN(res); -} - - -/* Read last using key SYNOPSIS @@ -3535,14 +3513,15 @@ int ha_partition::index_read_idx(byte * buf, uint index, const byte * key, Can only be used on indexes supporting HA_READ_ORDER */ -int ha_partition::index_read_last(byte *buf, const byte *key, uint keylen) +int ha_partition::index_read_last(byte *buf, const byte *key, + key_part_map keypart_map) { DBUG_ENTER("ha_partition::index_read_last"); m_ordered= TRUE; // Safety measure end_range= 0; m_index_scan_type= partition_index_read_last; - DBUG_RETURN(common_index_read(buf, key, keylen, HA_READ_PREFIX_LAST)); + DBUG_RETURN(common_index_read(buf, key, keypart_map, HA_READ_PREFIX_LAST)); } @@ -3688,7 +3667,7 @@ int ha_partition::read_range_first(const key_range *start_key, m_index_scan_type= partition_index_read; error= common_index_read(m_rec0, start_key->key, - start_key->length, start_key->flag); + start_key->keypart_map, start_key->flag); } DBUG_RETURN(error); } @@ -3887,7 +3866,7 @@ int ha_partition::handle_unordered_scan_next_partition(byte * buf) case partition_index_read: DBUG_PRINT("info", ("index_read on partition %d", i)); error= file->index_read(buf, m_start_key.key, - m_start_key.length, + m_start_key.keypart_map, m_start_key.flag); break; case partition_index_first: @@ -3979,7 +3958,7 @@ int ha_partition::handle_ordered_index_scan(byte *buf, bool reverse_order) case partition_index_read: error= file->index_read(rec_buf_ptr, m_start_key.key, - m_start_key.length, + m_start_key.keypart_map, m_start_key.flag); break; case partition_index_first: @@ -3993,7 +3972,7 @@ int ha_partition::handle_ordered_index_scan(byte *buf, bool reverse_order) case partition_index_read_last: error= file->index_read_last(rec_buf_ptr, m_start_key.key, - m_start_key.length); + m_start_key.keypart_map); reverse_order= TRUE; break; default: diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 1069dd058d5..a081e4bb472 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -384,9 +384,8 @@ public: any end processing needed. */ virtual int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - virtual int index_read_idx(byte * buf, uint idx, const byte * key, - uint key_len, enum ha_rkey_function find_flag); + key_part_map keypart_map, + enum ha_rkey_function find_flag); virtual int index_init(uint idx, bool sorted); virtual int index_end(); @@ -399,7 +398,8 @@ public: virtual int index_first(byte * buf); virtual int index_last(byte * buf); virtual int index_next_same(byte * buf, const byte * key, uint keylen); - virtual int index_read_last(byte * buf, const byte * key, uint keylen); + virtual int index_read_last(byte * buf, const byte * key, + key_part_map keypart_map); /* read_first_row is virtual method but is only implemented by @@ -425,7 +425,8 @@ public: private: int common_index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag); + key_part_map keypart_map, + enum ha_rkey_function find_flag); int common_first_last(byte * buf); int partition_scan_set_up(byte * buf, bool idx_read_flag); int handle_unordered_next(byte * buf, bool next_same); diff --git a/sql/handler.cc b/sql/handler.cc index 23c3103493e..e0018a66400 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -844,7 +844,7 @@ int ha_rollback_trans(THD *thd, bool all) the error log; but we don't want users to wonder why they have this message in the error log, so we don't send it. */ - if (is_real_trans && (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) && + if (is_real_trans && thd->no_trans_update.all && !thd->slave_thread && thd->killed != THD::KILL_CONNECTION) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARNING_NOT_COMPLETE_ROLLBACK, @@ -1143,9 +1143,9 @@ bool mysql_xa_recover(THD *thd) XID_STATE *xs; DBUG_ENTER("mysql_xa_recover"); - field_list.push_back(new Item_int("formatID",0,11)); - field_list.push_back(new Item_int("gtrid_length",0,11)); - field_list.push_back(new Item_int("bqual_length",0,11)); + field_list.push_back(new Item_int("formatID", 0, MY_INT32_NUM_DECIMAL_DIGITS)); + field_list.push_back(new Item_int("gtrid_length", 0, MY_INT32_NUM_DECIMAL_DIGITS)); + field_list.push_back(new Item_int("bqual_length", 0, MY_INT32_NUM_DECIMAL_DIGITS)); field_list.push_back(new Item_empty_string("data",XIDDATASIZE)); if (protocol->send_fields(&field_list, @@ -1753,7 +1753,6 @@ int handler::update_auto_increment() bool append= FALSE; THD *thd= table->in_use; struct system_variables *variables= &thd->variables; - bool auto_increment_field_not_null; DBUG_ENTER("handler::update_auto_increment"); /* @@ -1761,11 +1760,9 @@ int handler::update_auto_increment() than the interval, but not smaller. */ DBUG_ASSERT(next_insert_id >= auto_inc_interval_for_cur_row.minimum()); - auto_increment_field_not_null= table->auto_increment_field_not_null; - table->auto_increment_field_not_null= FALSE; // to reset for next row if ((nr= table->next_number_field->val_int()) != 0 || - auto_increment_field_not_null && + table->auto_increment_field_not_null && thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO) { /* @@ -1843,7 +1840,7 @@ int handler::update_auto_increment() nr= compute_next_insert_id(nr-1, variables); } - if (table->s->next_number_key_offset == 0) + if (table->s->next_number_keypart == 0) { /* We must defer the appending until "nr" has been possibly truncated */ append= TRUE; @@ -1963,7 +1960,7 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment, table->read_set); column_bitmaps_signal(); index_init(table->s->next_number_index, 1); - if (!table->s->next_number_key_offset) + if (table->s->next_number_keypart == 0) { // Autoincrement at key-start error=index_last(table->record[1]); /* @@ -1979,7 +1976,8 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment, key_copy(key, table->record[0], table->key_info + table->s->next_number_index, table->s->next_number_key_offset); - error= index_read(table->record[1], key, table->s->next_number_key_offset, + error= index_read(table->record[1], key, + make_prev_keypart_map(table->s->next_number_keypart), HA_READ_PREFIX_LAST); /* MySQL needs to call us for next row: assume we are inserting ("a",null) @@ -3079,9 +3077,9 @@ int handler::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, multi_range_curr < multi_range_end; multi_range_curr++) { - result= read_range_first(multi_range_curr->start_key.length ? + result= read_range_first(multi_range_curr->start_key.keypart_map ? &multi_range_curr->start_key : 0, - multi_range_curr->end_key.length ? + multi_range_curr->end_key.keypart_map ? &multi_range_curr->end_key : 0, test(multi_range_curr->range_flag & EQ_RANGE), multi_range_sorted); @@ -3146,9 +3144,9 @@ int handler::read_multi_range_next(KEY_MULTI_RANGE **found_range_p) multi_range_curr < multi_range_end; multi_range_curr++) { - result= read_range_first(multi_range_curr->start_key.length ? + result= read_range_first(multi_range_curr->start_key.keypart_map ? &multi_range_curr->start_key : 0, - multi_range_curr->end_key.length ? + multi_range_curr->end_key.keypart_map ? &multi_range_curr->end_key : 0, test(multi_range_curr->range_flag & EQ_RANGE), multi_range_sorted); @@ -3207,7 +3205,7 @@ int handler::read_range_first(const key_range *start_key, else result= index_read(table->record[0], start_key->key, - start_key->length, + start_key->keypart_map, start_key->flag); if (result) DBUG_RETURN((result == HA_ERR_KEY_NOT_FOUND) @@ -3279,15 +3277,19 @@ int handler::compare_key(key_range *range) return cmp; } + int handler::index_read_idx(byte * buf, uint index, const byte * key, - uint key_len, enum ha_rkey_function find_flag) + key_part_map keypart_map, + enum ha_rkey_function find_flag) { - int error= ha_index_init(index, 0); - if (!error) - error= index_read(buf, key, key_len, find_flag); + int error, error1; + error= index_init(index, 0); if (!error) - error= ha_index_end(); - return error; + { + error= index_read(buf, key, keypart_map, find_flag); + error1= index_end(); + } + return error ? error : error1; } diff --git a/sql/handler.h b/sql/handler.h index 82970cc1ac6..cfa86358fa1 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -867,6 +867,18 @@ public: {} }; +uint calculate_key_len(TABLE *, uint, const byte *, key_part_map); +/* + bitmap with first N+1 bits set + (keypart_map for a key prefix of [0..N] keyparts) +*/ +#define make_keypart_map(N) (((key_part_map)2 << (N)) - 1) +/* + bitmap with first N bits set + (keypart_map for a key prefix of [0..N-1] keyparts) +*/ +#define make_prev_keypart_map(N) (((key_part_map)1 << (N)) - 1) + /* The handler class is the interface for dynamically loadable storage engines. Do not add ifdefs and take care when adding or @@ -974,7 +986,11 @@ public: check_if_locking_is_allowed() thd Handler of the thread, trying to lock the table table Table handler to check - count Number of locks already granted to the table + count Total number of tables to be locked + current Index of the current table in the list of the tables + to be locked. + system_count Pointer to the counter of system tables seen thus + far. called_by_privileged_thread TRUE if called from a logger THD (general_log_thd or slow_log_thd) or by a privileged thread, which @@ -993,7 +1009,8 @@ public: */ virtual bool check_if_locking_is_allowed(uint sql_command, ulong type, TABLE *table, - uint count, + uint count, uint current, + uint *system_count, bool called_by_privileged_thread) { return TRUE; @@ -1202,11 +1219,32 @@ public: DBUG_ASSERT(FALSE); return HA_ERR_WRONG_COMMAND; } - virtual int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag) + private: + virtual int index_read(byte * buf, const byte * key, uint key_len, + enum ha_rkey_function find_flag) { return HA_ERR_WRONG_COMMAND; } + public: +/** + @brief + Positions an index cursor to the index specified in the handle. Fetches the + row if available. If the key value is null, begin at the first key of the + index. +*/ + virtual int index_read(byte * buf, const byte * key, key_part_map keypart_map, + enum ha_rkey_function find_flag) + { + uint key_len= calculate_key_len(table, active_index, key, keypart_map); + return index_read(buf, key, key_len, find_flag); + } +/** + @brief + Positions an index cursor to the index specified in the handle. Fetches the + row if available. If the key value is null, begin at the first key of the + index. +*/ virtual int index_read_idx(byte * buf, uint index, const byte * key, - uint key_len, enum ha_rkey_function find_flag); + key_part_map keypart_map, + enum ha_rkey_function find_flag); virtual int index_next(byte * buf) { return HA_ERR_WRONG_COMMAND; } virtual int index_prev(byte * buf) @@ -1216,8 +1254,21 @@ public: virtual int index_last(byte * buf) { return HA_ERR_WRONG_COMMAND; } virtual int index_next_same(byte *buf, const byte *key, uint keylen); + private: virtual int index_read_last(byte * buf, const byte * key, uint key_len) { return (my_errno=HA_ERR_WRONG_COMMAND); } + public: +/** + @brief + The following functions works like index_read, but it find the last + row with the current key value or prefix. +*/ + virtual int index_read_last(byte * buf, const byte * key, + key_part_map keypart_map) + { + uint key_len= calculate_key_len(table, active_index, key, keypart_map); + return index_read_last(buf, key, key_len); + } virtual int read_multi_range_first(KEY_MULTI_RANGE **found_range_p, KEY_MULTI_RANGE *ranges, uint range_count, bool sorted, HANDLER_BUFFER *buffer); @@ -1243,8 +1294,7 @@ public: { return HA_ERR_WRONG_COMMAND; } virtual int rnd_same(byte *buf, uint inx) { return HA_ERR_WRONG_COMMAND; } - virtual ha_rows records_in_range(uint inx, key_range *min_key, - key_range *max_key) + virtual ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key) { return (ha_rows) 10; } virtual void position(const byte *record)=0; virtual int info(uint)=0; // see my_base.h for full description @@ -1422,6 +1472,17 @@ public: virtual void free_foreign_key_create_info(char* str) {} /* The following can be called without an open handler */ virtual const char *table_type() const =0; + /* + If frm_error() is called then we will use this to find out what file + extentions exist for the storage engine. This is also used by the default + rename_table and delete_table method in handler.cc. + + For engines that have two file name extentions (separate meta/index file + and data file), the order of elements is relevant. First element of engine + file name extentions array should be meta/index file extention. Second + element - data file extention. This order is assumed by + prepare_for_repair() when REPAIR TABLE ... USE_FRM is issued. + */ virtual const char **bas_ext() const =0; virtual int get_default_no_partitions(HA_CREATE_INFO *info) { return 1;} diff --git a/sql/item.cc b/sql/item.cc index 43f182307f1..bd5e0ae1a8f 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -148,7 +148,7 @@ void Hybrid_type_traits_integer::fix_length_and_dec(Item *item, Item *arg) const { item->decimals= 0; - item->max_length= 21; + item->max_length= MY_INT64_NUM_DECIMAL_DIGITS; item->unsigned_flag= 0; } @@ -267,7 +267,7 @@ my_decimal *Item::val_decimal_from_string(my_decimal *decimal_value) my_decimal *Item::val_decimal_from_date(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; if (get_date(<ime, TIME_FUZZY_DATE)) { my_decimal_set_zero(decimal_value); @@ -280,7 +280,7 @@ my_decimal *Item::val_decimal_from_date(my_decimal *decimal_value) my_decimal *Item::val_decimal_from_time(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; if (get_time(<ime)) { my_decimal_set_zero(decimal_value); @@ -315,7 +315,7 @@ longlong Item::val_int_from_decimal() int Item::save_time_in_field(Field *field) { - TIME ltime; + MYSQL_TIME ltime; if (get_time(<ime)) return set_field_to_null(field); field->set_notnull(); @@ -325,7 +325,7 @@ int Item::save_time_in_field(Field *field) int Item::save_date_in_field(Field *field) { - TIME ltime; + MYSQL_TIME ltime; if (get_date(<ime, TIME_FUZZY_DATE)) return set_field_to_null(field); field->set_notnull(); @@ -853,22 +853,40 @@ bool Item_string::eq(const Item *item, bool binary_cmp) const /* - Get the value of the function as a TIME structure. + Get the value of the function as a MYSQL_TIME structure. As a extra convenience the time structure is reset on error! */ -bool Item::get_date(TIME *ltime,uint fuzzydate) +bool Item::get_date(MYSQL_TIME *ltime,uint fuzzydate) { - char buff[40]; - String tmp(buff,sizeof(buff), &my_charset_bin),*res; - if (!(res=val_str(&tmp)) || - str_to_datetime_with_warn(res->ptr(), res->length(), - ltime, fuzzydate) <= MYSQL_TIMESTAMP_ERROR) + if (result_type() == STRING_RESULT) { - bzero((char*) ltime,sizeof(*ltime)); - return 1; + char buff[40]; + String tmp(buff,sizeof(buff), &my_charset_bin),*res; + if (!(res=val_str(&tmp)) || + str_to_datetime_with_warn(res->ptr(), res->length(), + ltime, fuzzydate) <= MYSQL_TIMESTAMP_ERROR) + goto err; + } + else + { + longlong value= val_int(); + int was_cut; + if (number_to_datetime(value, ltime, fuzzydate, &was_cut) == LL(-1)) + { + char buff[22], *end; + end= longlong10_to_str(value, buff, -10); + make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + buff, (int) (end-buff), MYSQL_TIMESTAMP_NONE, + NullS); + goto err; + } } return 0; + +err: + bzero((char*) ltime,sizeof(*ltime)); + return 1; } /* @@ -876,7 +894,7 @@ bool Item::get_date(TIME *ltime,uint fuzzydate) As a extra convenience the time structure is reset on error! */ -bool Item::get_time(TIME *ltime) +bool Item::get_time(MYSQL_TIME *ltime) { char buff[40]; String tmp(buff,sizeof(buff),&my_charset_bin),*res; @@ -1088,7 +1106,7 @@ bool Item_splocal::set_value(THD *thd, sp_rcontext *ctx, Item **it) Item_case_expr methods *****************************************************************************/ -Item_case_expr::Item_case_expr(int case_expr_id) +Item_case_expr::Item_case_expr(uint case_expr_id) :Item_sp_variable( C_STRING_WITH_LEN("case_expr")), m_case_expr_id(case_expr_id) { @@ -1125,6 +1143,8 @@ Item_case_expr::this_item_addr(THD *thd, Item **) void Item_case_expr::print(String *str) { + if (str->reserve(MAX_INT_WIDTH + sizeof("case_expr@"))) + return; /* purecov: inspected */ VOID(str->append(STRING_WITH_LEN("case_expr@"))); str->qs_append(m_case_expr_id); } @@ -1289,15 +1309,18 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array, Exception is Item_direct_view_ref which we need to convert to Item_ref to allow fields from view being stored in tmp table. */ + Item_aggregate_ref *item_ref; uint el= fields.elements; - Item *new_item, *real_itm= real_item(); + Item *real_itm= real_item(); ref_pointer_array[el]= real_itm; - if (!(new_item= new Item_aggregate_ref(&thd->lex->current_select->context, + if (!(item_ref= new Item_aggregate_ref(&thd->lex->current_select->context, ref_pointer_array + el, 0, name))) return; // fatal_error is set + if (type() == SUM_FUNC_ITEM) + item_ref->depended_from= ((Item_sum *) this)->depended_from(); fields.push_front(real_itm); - thd->change_item_tree(ref, new_item); + thd->change_item_tree(ref, item_ref); } } @@ -1639,7 +1662,7 @@ void Item_ident_for_show::make_field(Send_field *tmp_field) Item_field::Item_field(Field *f) :Item_ident(0, NullS, *f->table_name, f->field_name), item_equal(0), no_const_subst(0), - have_privileges(0), any_privileges(0), fixed_as_field(0) + have_privileges(0), any_privileges(0) { set_field(f); /* @@ -1654,7 +1677,7 @@ Item_field::Item_field(THD *thd, Name_resolution_context *context_arg, Field *f) :Item_ident(context_arg, f->table->s->db.str, *f->table_name, f->field_name), item_equal(0), no_const_subst(0), - have_privileges(0), any_privileges(0), fixed_as_field(0) + have_privileges(0), any_privileges(0) { /* We always need to provide Item_field with a fully qualified field @@ -1693,7 +1716,7 @@ Item_field::Item_field(Name_resolution_context *context_arg, const char *field_name_arg) :Item_ident(context_arg, db_arg,table_name_arg,field_name_arg), field(0), result_field(0), item_equal(0), no_const_subst(0), - have_privileges(0), any_privileges(0), fixed_as_field(0) + have_privileges(0), any_privileges(0) { SELECT_LEX *select= current_thd->lex->current_select; collation.set(DERIVATION_IMPLICIT); @@ -1709,8 +1732,7 @@ Item_field::Item_field(THD *thd, Item_field *item) item_equal(item->item_equal), no_const_subst(item->no_const_subst), have_privileges(item->have_privileges), - any_privileges(item->any_privileges), - fixed_as_field(item->fixed_as_field) + any_privileges(item->any_privileges) { collation.set(DERIVATION_IMPLICIT); } @@ -1791,9 +1813,10 @@ void Item_ident::print(String *str) } } - if (!table_name || !field_name) + if (!table_name || !field_name || !field_name[0]) { - const char *nm= field_name ? field_name : name ? name : "tmp_field"; + const char *nm= (field_name && field_name[0]) ? + field_name : name ? name : "tmp_field"; append_identifier(thd, str, nm, (uint) strlen(nm)); return; } @@ -1867,7 +1890,7 @@ String *Item_field::str_result(String *str) return result_field->val_str(str,&str_value); } -bool Item_field::get_date(TIME *ltime,uint fuzzydate) +bool Item_field::get_date(MYSQL_TIME *ltime,uint fuzzydate) { if ((null_value=field->is_null()) || field->get_date(ltime,fuzzydate)) { @@ -1877,7 +1900,7 @@ bool Item_field::get_date(TIME *ltime,uint fuzzydate) return 0; } -bool Item_field::get_date_result(TIME *ltime,uint fuzzydate) +bool Item_field::get_date_result(MYSQL_TIME *ltime,uint fuzzydate) { if ((null_value=result_field->is_null()) || result_field->get_date(ltime,fuzzydate)) @@ -1888,7 +1911,7 @@ bool Item_field::get_date_result(TIME *ltime,uint fuzzydate) return 0; } -bool Item_field::get_time(TIME *ltime) +bool Item_field::get_time(MYSQL_TIME *ltime) { if ((null_value=field->is_null()) || field->get_time(ltime)) { @@ -2415,7 +2438,7 @@ void Item_param::set_decimal(const char *str, ulong length) /* - Set parameter value from TIME value. + Set parameter value from MYSQL_TIME value. SYNOPSIS set_time() @@ -2429,7 +2452,7 @@ void Item_param::set_decimal(const char *str, ulong length) the fact that even wrong value sent over binary protocol fits into MAX_DATE_STRING_REP_LENGTH buffer. */ -void Item_param::set_time(TIME *tm, timestamp_type time_type, +void Item_param::set_time(MYSQL_TIME *tm, timestamp_type time_type, uint32 max_length_arg) { DBUG_ENTER("Item_param::set_time"); @@ -2444,7 +2467,8 @@ void Item_param::set_time(TIME *tm, timestamp_type time_type, { char buff[MAX_DATE_STRING_REP_LENGTH]; uint length= my_TIME_to_str(&value.time, buff); - make_truncated_value_warning(current_thd, buff, length, time_type, 0); + make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + buff, length, time_type, 0); set_zero_time(&value.time, MYSQL_TIMESTAMP_ERROR); } @@ -2524,7 +2548,7 @@ bool Item_param::set_from_user_var(THD *thd, const user_var_entry *entry) item_result_type= REAL_RESULT; break; case INT_RESULT: - set_int(*(longlong*)entry->value, 21); + set_int(*(longlong*)entry->value, MY_INT64_NUM_DECIMAL_DIGITS); item_type= Item::INT_ITEM; item_result_type= INT_RESULT; break; @@ -2646,7 +2670,7 @@ int Item_param::save_in_field(Field *field, bool no_conversions) } -bool Item_param::get_time(TIME *res) +bool Item_param::get_time(MYSQL_TIME *res) { if (state == TIME_VALUE) { @@ -2661,7 +2685,7 @@ bool Item_param::get_time(TIME *res) } -bool Item_param::get_date(TIME *res, uint fuzzydate) +bool Item_param::get_date(MYSQL_TIME *res, uint fuzzydate) { if (state == TIME_VALUE) { @@ -3087,7 +3111,7 @@ String* Item_ref_null_helper::val_str(String* s) } -bool Item_ref_null_helper::get_date(TIME *ltime, uint fuzzydate) +bool Item_ref_null_helper::get_date(MYSQL_TIME *ltime, uint fuzzydate) { return (owner->was_null|= null_value= (*ref)->get_date(ltime, fuzzydate)); } @@ -3354,7 +3378,7 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select) ORDER *group_list= (ORDER*) select->group_list.first; bool ambiguous_fields= FALSE; uint counter; - bool not_used; + enum_resolution_type resolution; /* Search for a column or derived column named as 'ref' in the SELECT @@ -3362,8 +3386,10 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select) */ if (!(select_ref= find_item_in_list(ref, *(select->get_item_list()), &counter, REPORT_EXCEPT_NOT_FOUND, - ¬_used))) + &resolution))) return NULL; /* Some error occurred. */ + if (resolution == RESOLVED_AGAINST_ALIAS) + ref->alias_name_used= TRUE; /* If this is a non-aggregated field inside HAVING, search in GROUP BY. */ if (select->having_fix_field && !ref->with_sum_func && group_list) @@ -3473,12 +3499,18 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) */ Name_resolution_context *last_checked_context= context; Item **ref= (Item **) not_found_item; - Name_resolution_context *outer_context= context->outer_context; + SELECT_LEX *current_sel= (SELECT_LEX *) thd->lex->current_select; + Name_resolution_context *outer_context= 0; + SELECT_LEX *select= 0; + /* Currently derived tables cannot be correlated */ + if (current_sel->master_unit()->first_select()->linkage != + DERIVED_TABLE_TYPE) + outer_context= context->outer_context; for (; outer_context; outer_context= outer_context->outer_context) { - SELECT_LEX *select= outer_context->select_lex; + select= outer_context->select_lex; Item_subselect *prev_subselect_item= last_checked_context->select_lex->master_unit()->item; last_checked_context= outer_context; @@ -3521,45 +3553,28 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) } if (*from_field != view_ref_found) { - prev_subselect_item->used_tables_cache|= (*from_field)->table->map; prev_subselect_item->const_item_cache= 0; + set_field(*from_field); if (!last_checked_context->select_lex->having_fix_field && - !fixed_as_field) + select->group_list.elements) { Item_outer_ref *rf; - Query_arena *arena= 0, backup; /* - Each outer field is replaced for an Item_outer_ref object. - This is done in order to get correct results when the outer - select employs a temporary table. - The original fields are saved in the inner_fields_list of the - outer select. This list is created by the following reasons: - 1. We can't add field items to the outer select list directly - because the outer select hasn't been fully fixed yet. - 2. We need a location to refer to in the Item_ref object - so the inner_fields_list is used as such temporary - reference storage. - The new Item_outer_ref object replaces the original field and is - also saved in the inner_refs_list of the outer select. Here - it is only created. It can be fixed only after the original - field has been fixed and this is done in the fix_inner_refs() - function. + If an outer field is resolved in a grouping select then it + is replaced for an Item_outer_ref object. Otherwise an + Item_field object is used. + The new Item_outer_ref object is saved in the inner_refs_list of + the outer select. Here it is only created. It can be fixed only + after the original field has been fixed and this is done in the + fix_inner_refs() function. */ - set_field(*from_field); - arena= thd->activate_stmt_arena_if_needed(&backup); - rf= new Item_outer_ref(context, this); - if (!rf) - { - if (arena) - thd->restore_active_arena(arena, &backup); + ; + if (!(rf= new Item_outer_ref(context, this))) return -1; - } - *reference= rf; + thd->change_item_tree(reference, rf); select->inner_refs_list.push_back(rf); - if (arena) - thd->restore_active_arena(arena, &backup); - fixed_as_field= 1; + rf->in_sum_func= thd->lex->in_sum_func; } if (thd->lex->in_sum_func && thd->lex->in_sum_func->nest_level == @@ -3664,12 +3679,21 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) *ref= NULL; // Don't call set_properties() rf= (place == IN_HAVING ? new Item_ref(context, ref, (char*) table_name, - (char*) field_name) : + (char*) field_name, alias_name_used) : + (!select->group_list.elements ? new Item_direct_ref(context, ref, (char*) table_name, - (char*) field_name)); + (char*) field_name, alias_name_used) : + new Item_outer_ref(context, ref, (char*) table_name, + (char*) field_name, alias_name_used))); *ref= save; if (!rf) return -1; + + if (place != IN_HAVING && select->group_list.elements) + { + outer_context->select_lex->inner_refs_list.push_back((Item_outer_ref*)rf); + ((Item_outer_ref*)rf)->in_sum_func= thd->lex->in_sum_func; + } thd->change_item_tree(reference, rf); /* rf is Item_ref => never substitute other items (in this case) @@ -3784,12 +3808,14 @@ bool Item_field::fix_fields(THD *thd, Item **reference) if (thd->lex->current_select->is_item_list_lookup) { uint counter; - bool not_used; + enum_resolution_type resolution; Item** res= find_item_in_list(this, thd->lex->current_select->item_list, &counter, REPORT_EXCEPT_NOT_FOUND, - ¬_used); + &resolution); if (!res) return 1; + if (resolution == RESOLVED_AGAINST_ALIAS) + alias_name_used= TRUE; if (res != (Item **)not_found_item) { if ((*res)->type() == Item::FIELD_ITEM) @@ -3897,7 +3923,9 @@ bool Item_field::fix_fields(THD *thd, Item **reference) { /* First usage of column */ table->used_fields++; // Used to optimize loops - table->used_keys.intersect(field->part_of_key); + /* purecov: begin inspected */ + table->covering_keys.intersect(field->part_of_key); + /* purecov: end */ } } } @@ -4115,7 +4143,9 @@ bool Item_field::set_no_const_sub(byte *arg) DESCRIPTION The function returns a pointer to an item that is taken from the very beginning of the item_equal list which the Item_field - object refers to (belongs to). + object refers to (belongs to) unless item_equal contains a constant + item. In this case the function returns this constant item, + (if the substitution does not require conversion). If the Item_field object does not refer any Item_equal object 'this' is returned @@ -4124,7 +4154,8 @@ bool Item_field::set_no_const_sub(byte *arg) of the thransformer method. RETURN VALUES - pointer to a replacement Item_field if there is a better equal item; + pointer to a replacement Item_field if there is a better equal item or + a pointer to a constant equal item; this - otherwise. */ @@ -4132,6 +4163,14 @@ Item *Item_field::replace_equal_field(byte *arg) { if (item_equal) { + Item *const_item= item_equal->get_const(); + if (const_item) + { + if (cmp_context != (Item_result)-1 && + const_item->cmp_context != cmp_context) + return this; + return const_item; + } Item_field *subst= item_equal->get_first(); if (subst && !field->eq(subst->field)) return subst; @@ -4335,13 +4374,16 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table, bool fixed_length) case MYSQL_TYPE_MEDIUM_BLOB: case MYSQL_TYPE_LONG_BLOB: case MYSQL_TYPE_BLOB: - case MYSQL_TYPE_GEOMETRY: if (this->type() == Item::TYPE_HOLDER) field= new Field_blob(max_length, maybe_null, name, collation.collation, 1); else field= new Field_blob(max_length, maybe_null, name, collation.collation); break; // Blob handled outside of case + case MYSQL_TYPE_GEOMETRY: + return new Field_geom(max_length, maybe_null, name, table->s, + (Field::geometry_type) + ((Item_geometry_func *)this)->get_geometry_type()); } if (field) field->init(table); @@ -4900,7 +4942,7 @@ bool Item::send(Protocol *protocol, String *buffer) case MYSQL_TYPE_DATE: case MYSQL_TYPE_TIMESTAMP: { - TIME tm; + MYSQL_TIME tm; get_date(&tm, TIME_FUZZY_DATE); if (!null_value) { @@ -4913,7 +4955,7 @@ bool Item::send(Protocol *protocol, String *buffer) } case MYSQL_TYPE_TIME: { - TIME tm; + MYSQL_TIME tm; get_time(&tm); if (!null_value) result= protocol->store_time(&tm); @@ -4993,12 +5035,30 @@ Item *Item_field::update_value_transformer(byte *select_arg) } +void Item_field::print(String *str) +{ + if (field && field->table->const_table) + { + char buff[MAX_FIELD_WIDTH]; + String tmp(buff,sizeof(buff),str->charset()); + field->val_str(&tmp); + str->append('\''); + str->append(tmp); + str->append('\''); + return; + } + Item_ident::print(str); +} + + Item_ref::Item_ref(Name_resolution_context *context_arg, Item **item, const char *table_name_arg, - const char *field_name_arg) + const char *field_name_arg, + bool alias_name_used_arg) :Item_ident(context_arg, NullS, table_name_arg, field_name_arg), result_field(0), ref(item) { + alias_name_used= alias_name_used_arg; /* This constructor used to create some internals references over fixed items */ @@ -5281,11 +5341,13 @@ void Item_ref::set_properties() */ with_sum_func= (*ref)->with_sum_func; unsigned_flag= (*ref)->unsigned_flag; + fixed= 1; + if (alias_name_used) + return; if ((*ref)->type() == FIELD_ITEM) alias_name_used= ((Item_ident *) (*ref))->alias_name_used; else alias_name_used= TRUE; // it is not field, so it is was resolved by alias - fixed= 1; } @@ -5303,7 +5365,7 @@ void Item_ref::print(String *str) if (ref) { if ((*ref)->type() != Item::CACHE_ITEM && ref_type() != VIEW_REF && - ref_type() != OUTER_REF && name && alias_name_used) + !table_name && name && alias_name_used) { THD *thd= current_thd; append_identifier(thd, str, name, (uint) strlen(name)); @@ -5445,7 +5507,7 @@ bool Item_ref::is_null() } -bool Item_ref::get_date(TIME *ltime,uint fuzzydate) +bool Item_ref::get_date(MYSQL_TIME *ltime,uint fuzzydate) { return (null_value=(*ref)->get_date_result(ltime,fuzzydate)); } @@ -5544,7 +5606,7 @@ bool Item_direct_ref::is_null() } -bool Item_direct_ref::get_date(TIME *ltime,uint fuzzydate) +bool Item_direct_ref::get_date(MYSQL_TIME *ltime,uint fuzzydate) { return (null_value=(*ref)->get_date(ltime,fuzzydate)); } @@ -5589,16 +5651,19 @@ bool Item_direct_view_ref::fix_fields(THD *thd, Item **reference) bool Item_outer_ref::fix_fields(THD *thd, Item **reference) { - DBUG_ASSERT(*ref); - /* outer_field->check_cols() will be made in Item_direct_ref::fix_fields */ - outer_field->fixed_as_field= 1; - if (!outer_field->fixed && - (outer_field->fix_fields(thd, reference))) + bool err; + /* outer_ref->check_cols() will be made in Item_direct_ref::fix_fields */ + if ((*ref) && !(*ref)->fixed && ((*ref)->fix_fields(thd, reference))) return TRUE; - table_name= outer_field->table_name; - return Item_direct_ref::fix_fields(thd, reference); + err= Item_direct_ref::fix_fields(thd, reference); + if (!outer_ref) + outer_ref= *ref; + if ((*ref)->type() == Item::FIELD_ITEM) + table_name= ((Item_field*)outer_ref)->table_name; + return err; } + /* Compare two view column references for equality. @@ -6394,8 +6459,6 @@ Item_type_holder::Item_type_holder(THD *thd, Item *item) :Item(thd, item), enum_set_typelib(0), fld_type(get_real_type(item)) { DBUG_ASSERT(item->fixed); - - max_length= display_length(item); maybe_null= item->maybe_null; collation.set(item->collation); get_full_info(item); @@ -6567,11 +6630,17 @@ bool Item_type_holder::join_types(THD *thd, Item *item) { int delta1= max_length_orig - decimals_orig; int delta2= item->max_length - item->decimals; - if (fld_type == MYSQL_TYPE_DECIMAL) - max_length= max(delta1, delta2) + decimals; - else - max_length= min(max(delta1, delta2) + decimals, - (fld_type == MYSQL_TYPE_FLOAT) ? FLT_DIG+6 : DBL_DIG+7); + max_length= max(delta1, delta2) + decimals; + if (fld_type == MYSQL_TYPE_FLOAT && max_length > FLT_DIG + 2) + { + max_length= FLT_DIG + 6; + decimals= NOT_FIXED_DEC; + } + if (fld_type == MYSQL_TYPE_DOUBLE && max_length > DBL_DIG + 2) + { + max_length= DBL_DIG + 7; + decimals= NOT_FIXED_DEC; + } } else max_length= (fld_type == MYSQL_TYPE_FLOAT) ? FLT_DIG+6 : DBL_DIG+7; @@ -6633,7 +6702,7 @@ uint32 Item_type_holder::display_length(Item *item) case MYSQL_TYPE_SHORT: return 6; case MYSQL_TYPE_LONG: - return 11; + return MY_INT32_NUM_DECIMAL_DIGITS; case MYSQL_TYPE_FLOAT: return 25; case MYSQL_TYPE_DOUBLE: diff --git a/sql/item.h b/sql/item.h index be4636db91e..3d20aaf66cd 100644 --- a/sql/item.h +++ b/sql/item.h @@ -730,17 +730,16 @@ public: /* Called for items that really have to be split */ void split_sum_func2(THD *thd, Item **ref_pointer_array, List<Item> &fields, Item **ref, bool skip_registered); - virtual bool get_date(TIME *ltime,uint fuzzydate); - virtual bool get_time(TIME *ltime); - virtual bool get_date_result(TIME *ltime,uint fuzzydate) + virtual bool get_date(MYSQL_TIME *ltime,uint fuzzydate); + virtual bool get_time(MYSQL_TIME *ltime); + virtual bool get_date_result(MYSQL_TIME *ltime,uint fuzzydate) { return get_date(ltime,fuzzydate); } /* - This function is used only in Item_func_isnull/Item_func_isnotnull - (implementations of IS NULL/IS NOT NULL clauses). Item_func_is{not}null - calls this method instead of one of val/result*() methods, which - normally will set null_value. This allows to determine nullness of - a complex expression without fully evaluating it. - Any new item which can be NULL must implement this call. + The method allows to determine nullness of a complex expression + without fully evaluating it, instead of calling val/result*() then + checking null_value. Used in Item_func_isnull/Item_func_isnotnull + and Item_sum_count/Item_sum_count_distinct. + Any new item which can be NULL must implement this method. */ virtual bool is_null() { return 0; } @@ -1116,7 +1115,7 @@ inline Item_result Item_splocal::result_type() const class Item_case_expr :public Item_sp_variable { public: - Item_case_expr(int case_expr_id); + Item_case_expr(uint case_expr_id); public: Item *this_item(); @@ -1135,7 +1134,7 @@ public: void print(String *str); private: - int m_case_expr_id; + uint m_case_expr_id; }; /***************************************************************************** @@ -1317,7 +1316,6 @@ public: uint have_privileges; /* field need any privileges (for VIEW creation) */ bool any_privileges; - bool fixed_as_field; Item_field(Name_resolution_context *context_arg, const char *db_arg,const char *table_name_arg, const char *field_name_arg); @@ -1373,9 +1371,9 @@ public: } Field *get_tmp_table_field() { return result_field; } Field *tmp_table_field(TABLE *t_arg) { return result_field; } - bool get_date(TIME *ltime,uint fuzzydate); - bool get_date_result(TIME *ltime,uint fuzzydate); - bool get_time(TIME *ltime); + bool get_date(MYSQL_TIME *ltime,uint fuzzydate); + bool get_date_result(MYSQL_TIME *ltime,uint fuzzydate); + bool get_time(MYSQL_TIME *ltime); bool is_null() { return field->is_null(); } void update_null_value(); Item *get_tmp_table_item(THD *thd); @@ -1398,6 +1396,7 @@ public: Item *safe_charset_converter(CHARSET_INFO *tocs); int fix_outer_field(THD *thd, Field **field, Item **reference); virtual Item *update_value_transformer(byte *select_arg); + void print(String *str); friend class Item_default_value; friend class Item_insert_value; friend class st_select_lex_unit; @@ -1498,7 +1497,7 @@ public: */ CHARSET_INFO *final_character_set_of_str_value; } cs_info; - TIME time; + MYSQL_TIME time; } value; /* Cached values for virtual methods to save us one switch. */ @@ -1530,8 +1529,8 @@ public: longlong val_int(); my_decimal *val_decimal(my_decimal*); String *val_str(String*); - bool get_time(TIME *tm); - bool get_date(TIME *tm, uint fuzzydate); + bool get_time(MYSQL_TIME *tm); + bool get_date(MYSQL_TIME *tm, uint fuzzydate); int save_in_field(Field *field, bool no_conversions); void set_null(); @@ -1540,7 +1539,7 @@ public: void set_decimal(const char *str, ulong length); bool set_str(const char *str, ulong length); bool set_longdata(const char *str, ulong length); - void set_time(TIME *tm, timestamp_type type, uint32 max_length_arg); + void set_time(MYSQL_TIME *tm, timestamp_type type, uint32 max_length_arg); bool set_from_user_var(THD *thd, const user_var_entry *entry); void reset(); /* @@ -1591,11 +1590,14 @@ class Item_int :public Item_num { public: longlong value; - Item_int(int32 i,uint length=11) :value((longlong) i) + Item_int(int32 i,uint length= MY_INT32_NUM_DECIMAL_DIGITS) + :value((longlong) i) { max_length=length; fixed= 1; } - Item_int(longlong i,uint length=21) :value(i) + Item_int(longlong i,uint length= MY_INT64_NUM_DECIMAL_DIGITS) + :value(i) { max_length=length; fixed= 1; } - Item_int(ulonglong i, uint length= 21) :value((longlong)i) + Item_int(ulonglong i, uint length= MY_INT64_NUM_DECIMAL_DIGITS) + :value((longlong)i) { max_length=length; fixed= 1; unsigned_flag= 1; } Item_int(const char *str_arg,longlong i,uint length) :value(i) { max_length=length; name=(char*) str_arg; fixed= 1; } @@ -1810,7 +1812,11 @@ public: str_value.length(), collation.collation); } Item *safe_charset_converter(CHARSET_INFO *tocs); - inline void append(char *str, uint length) { str_value.append(str, length); } + inline void append(char *str, uint length) + { + str_value.append(str, length); + max_length= str_value.numchars() * collation.collation->mbmaxlen; + } void print(String *str); // to prevent drop fixed flag (no need parent cleanup call) void cleanup() {} @@ -1876,7 +1882,10 @@ public: Item_hex_string(const char *str,uint str_length); enum Type type() const { return VARBIN_ITEM; } double val_real() - { DBUG_ASSERT(fixed == 1); return (double) Item_hex_string::val_int(); } + { + DBUG_ASSERT(fixed == 1); + return (double) (ulonglong) Item_hex_string::val_int(); + } longlong val_int(); bool basic_const_item() const { return 1; } String *val_str(String*) { DBUG_ASSERT(fixed == 1); return &str_value; } @@ -1951,7 +1960,8 @@ public: with Bar, and if we have a more broader set of problems like this. */ Item_ref(Name_resolution_context *context_arg, Item **item, - const char *table_name_arg, const char *field_name_arg); + const char *table_name_arg, const char *field_name_arg, + bool alias_name_used_arg= FALSE); /* Constructor need to process subselect with temporary tables (see Item) */ Item_ref(THD *thd, Item_ref *item) @@ -1968,7 +1978,7 @@ public: bool val_bool(); String *val_str(String* tmp); bool is_null(); - bool get_date(TIME *ltime,uint fuzzydate); + bool get_date(MYSQL_TIME *ltime,uint fuzzydate); double val_result(); longlong val_int_result(); String *str_result(String* tmp); @@ -1992,6 +2002,11 @@ public: { return depended_from ? OUTER_REF_TABLE_BIT : (*ref)->used_tables(); } + void update_used_tables() + { + if (!depended_from) + (*ref)->update_used_tables(); + } table_map not_null_tables() const { return (*ref)->not_null_tables(); } void set_result_field(Field *field) { result_field= field; } bool is_result_field() { return 1; } @@ -2026,8 +2041,11 @@ class Item_direct_ref :public Item_ref public: Item_direct_ref(Name_resolution_context *context_arg, Item **item, const char *table_name_arg, - const char *field_name_arg) - :Item_ref(context_arg, item, table_name_arg, field_name_arg) {} + const char *field_name_arg, + bool alias_name_used_arg= FALSE) + :Item_ref(context_arg, item, table_name_arg, + field_name_arg, alias_name_used_arg) + {} /* Constructor need to process subselect with temporary tables (see Item) */ Item_direct_ref(THD *thd, Item_direct_ref *item) : Item_ref(thd, item) {} @@ -2037,7 +2055,7 @@ public: my_decimal *val_decimal(my_decimal *); bool val_bool(); bool is_null(); - bool get_date(TIME *ltime,uint fuzzydate); + bool get_date(MYSQL_TIME *ltime,uint fuzzydate); virtual Ref_Type ref_type() { return DIRECT_REF; } }; @@ -2062,30 +2080,49 @@ public: }; +/* + Class for outer fields. + An object of this class is created when the select where the outer field was + resolved is a grouping one. After it has been fixed the ref field will point + to either an Item_ref or an Item_direct_ref object which will be used to + access the field. + See also comments for the fix_inner_refs() and the + Item_field::fix_outer_field() functions. +*/ + +class Item_sum; class Item_outer_ref :public Item_direct_ref { public: - Item_field *outer_field; + Item *outer_ref; + /* The aggregate function under which this outer ref is used, if any. */ + Item_sum *in_sum_func; + /* + TRUE <=> that the outer_ref is already present in the select list + of the outer select. + */ + bool found_in_select_list; Item_outer_ref(Name_resolution_context *context_arg, Item_field *outer_field_arg) :Item_direct_ref(context_arg, 0, outer_field_arg->table_name, - outer_field_arg->field_name), - outer_field(outer_field_arg) + outer_field_arg->field_name), + outer_ref(outer_field_arg), in_sum_func(0), + found_in_select_list(0) { - ref= (Item**)&outer_field; + ref= &outer_ref; set_properties(); fixed= 0; } - void cleanup() - { - ref= (Item**)&outer_field; - fixed= 0; - Item_direct_ref::cleanup(); - outer_field->cleanup(); - } + Item_outer_ref(Name_resolution_context *context_arg, Item **item, + const char *table_name_arg, const char *field_name_arg, + bool alias_name_used_arg) + :Item_direct_ref(context_arg, item, table_name_arg, field_name_arg, + alias_name_used_arg), + outer_ref(0), in_sum_func(0), found_in_select_list(1) + {} void save_in_result_field(bool no_conversions) { - outer_field->save_org_in_field(result_field); + outer_ref->save_org_in_field(result_field); } bool fix_fields(THD *, Item **); table_map used_tables() const @@ -2123,7 +2160,7 @@ public: String* val_str(String* s); my_decimal *val_decimal(my_decimal *); bool val_bool(); - bool get_date(TIME *ltime, uint fuzzydate); + bool get_date(MYSQL_TIME *ltime, uint fuzzydate); void print(String *str); /* we add RAND_TABLE_BIT to prevent moving this item from HAVING to WHERE diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index e78550598f5..0a177a58dbd 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -69,26 +69,80 @@ static void agg_result_type(Item_result *type, Item **items, uint nitems) /* + Compare row signature of two expressions + + SYNOPSIS: + cmp_row_type() + item1 the first expression + item2 the second expression + + DESCRIPTION + The function checks that two expressions have compatible row signatures + i.e. that the number of columns they return are the same and that if they + are both row expressions then each component from the first expression has + a row signature compatible with the signature of the corresponding component + of the second expression. + + RETURN VALUES + 1 type incompatibility has been detected + 0 otherwise +*/ + +static int cmp_row_type(Item* item1, Item* item2) +{ + uint n= item1->cols(); + if (item2->check_cols(n)) + return 1; + for (uint i=0; i<n; i++) + { + if (item2->element_index(i)->check_cols(item1->element_index(i)->cols()) || + (item1->element_index(i)->result_type() == ROW_RESULT && + cmp_row_type(item1->element_index(i), item2->element_index(i)))) + return 1; + } + return 0; +} + + +/* Aggregates result types from the array of items. - SYNOPSIS + SYNOPSIS: agg_cmp_type() - items array of items to aggregate the type from - nitems number of items in the array + type [out] the aggregated type + items array of items to aggregate the type from + nitems number of items in the array DESCRIPTION This function aggregates result types from the array of items. Found type supposed to be used later for comparison of values of these items. Aggregation itself is performed by the item_cmp_type() function. + The function also checks compatibility of row signatures for the + submitted items (see the spec for the cmp_row_type function). + + RETURN VALUES + 1 type incompatibility has been detected + 0 otherwise */ -static Item_result agg_cmp_type(Item **items, uint nitems) +static int agg_cmp_type(Item_result *type, Item **items, uint nitems) { uint i; - Item_result type= items[0]->result_type(); + type[0]= items[0]->result_type(); for (i= 1 ; i < nitems ; i++) - type= item_cmp_type(type, items[i]->result_type()); - return type; + { + type[0]= item_cmp_type(type[0], items[i]->result_type()); + /* + When aggregating types of two row expressions we have to check + that they have the same cardinality and that each component + of the first row expression has a compatible row signature with + the signature of the corresponding component of the second row + expression. + */ + if (type[0] == ROW_RESULT && cmp_row_type(items[0], items[i])) + return 1; // error found: invalid usage of rows + } + return 0; } @@ -105,7 +159,8 @@ static Item_result agg_cmp_type(Item **items, uint nitems) item in the list with each of the remaining items in the 'items' array. RETURN - Bitmap of collected types + 0 - if row type incompatibility has been detected (see cmp_row_type) + Bitmap of collected types - otherwise */ static uint collect_cmp_types(Item **items, uint nitems) @@ -116,12 +171,17 @@ static uint collect_cmp_types(Item **items, uint nitems) DBUG_ASSERT(nitems > 1); found_types= 0; for (i= 1; i < nitems ; i++) + { + if ((left_result == ROW_RESULT || + items[i]->result_type() == ROW_RESULT) && + cmp_row_type(items[0], items[i])) + return 0; found_types|= 1<< (uint)item_cmp_type(left_result, items[i]->result_type()); + } return found_types; } - static void my_coll_agg_error(DTCollation &c1, DTCollation &c2, const char *fname) { @@ -293,6 +353,7 @@ static bool convert_constant_item(THD *thd, Field *field, Item **item) { TABLE *table= field->table; ulong orig_sql_mode= thd->variables.sql_mode; + enum_check_fields orig_count_cuted_fields= thd->count_cuted_fields; my_bitmap_map *old_write_map; my_bitmap_map *old_read_map; @@ -305,7 +366,9 @@ static bool convert_constant_item(THD *thd, Field *field, Item **item) old_read_map= dbug_tmp_use_all_columns(table, table->read_set); } /* For comparison purposes allow invalid dates like 2000-01-32 */ - thd->variables.sql_mode|= MODE_INVALID_DATES; + thd->variables.sql_mode= (orig_sql_mode & ~MODE_NO_ZERO_DATE) | + MODE_INVALID_DATES; + thd->count_cuted_fields= CHECK_FIELD_IGNORE; if (!(*item)->save_in_field(field, 1) && !((*item)->null_value)) { Item *tmp= new Item_int_with_ref(field->val_int(), *item, @@ -315,6 +378,7 @@ static bool convert_constant_item(THD *thd, Field *field, Item **item) result= 1; // Item was replaced } thd->variables.sql_mode= orig_sql_mode; + thd->count_cuted_fields= orig_count_cuted_fields; if (table) { dbug_tmp_restore_column_map(table->write_set, old_write_map); @@ -804,8 +868,18 @@ int Arg_comparator::compare_row() if (owner->null_value) { // NULL was compared - if (owner->abort_on_null) - return -1; // We do not need correct NULL returning + switch (owner->functype()) { + case Item_func::NE_FUNC: + break; // NE never aborts on NULL even if abort_on_null is set + case Item_func::LT_FUNC: + case Item_func::LE_FUNC: + case Item_func::GT_FUNC: + case Item_func::GE_FUNC: + return -1; // <, <=, > and >= always fail on NULL + default: // EQ_FUNC + if (owner->abort_on_null) + return -1; // We do not need correct NULL returning + } was_null= 1; owner->null_value= 0; res= 0; // continue comparison (maybe we will meet explicit difference) @@ -816,8 +890,8 @@ int Arg_comparator::compare_row() if (was_null) { /* - There was NULL(s) in comparison in some parts, but there was not - explicit difference in other parts, so we have to return NULL + There was NULL(s) in comparison in some parts, but there was no + explicit difference in other parts, so we have to return NULL. */ owner->null_value= 1; return -1; @@ -1127,6 +1201,26 @@ longlong Item_func_strcmp::val_int() } +bool Item_func_opt_neg::eq(const Item *item, bool binary_cmp) const +{ + /* Assume we don't have rtti */ + if (this == item) + return 1; + if (item->type() != FUNC_ITEM) + return 0; + Item_func *item_func=(Item_func*) item; + if (arg_count != item_func->arg_count || + functype() != item_func->functype()) + return 0; + if (negated != ((Item_func_opt_neg *) item_func)->negated) + return 0; + for (uint i=0; i < arg_count ; i++) + if (!args[i]->eq(item_func->arguments()[i], binary_cmp)) + return 0; + return 1; +} + + void Item_func_interval::fix_length_and_dec() { use_decimal_comparison= ((row->element_index(0)->result_type() == @@ -1335,7 +1429,8 @@ void Item_func_between::fix_length_and_dec() */ if (!args[0] || !args[1] || !args[2]) return; - cmp_type= agg_cmp_type(args, 3); + if ( agg_cmp_type(&cmp_type, args, 3)) + return; if (cmp_type == STRING_RESULT && agg_arg_charsets(cmp_collation, args, 3, MY_COLL_CMP_CONV, 1)) return; @@ -2024,7 +2119,8 @@ void Item_func_case::fix_length_and_dec() for (nagg= 0; nagg < ncases/2 ; nagg++) agg[nagg+1]= args[nagg*2]; nagg++; - found_types= collect_cmp_types(agg, nagg); + if (!(found_types= collect_cmp_types(agg, nagg))) + return; for (i= 0; i <= (uint)DECIMAL_RESULT; i++) { @@ -2190,9 +2286,100 @@ void Item_func_coalesce::fix_length_and_dec() Classes and function for the IN operator ****************************************************************************/ -static int cmp_longlong(void *cmp_arg, longlong *a,longlong *b) +/* + Determine which of the signed longlong arguments is bigger + + SYNOPSIS + cmp_longs() + a_val left argument + b_val right argument + + DESCRIPTION + This function will compare two signed longlong arguments + and will return -1, 0, or 1 if left argument is smaller than, + equal to or greater than the right argument. + + RETURN VALUE + -1 left argument is smaller than the right argument. + 0 left argument is equal to the right argument. + 1 left argument is greater than the right argument. +*/ +static inline int cmp_longs (longlong a_val, longlong b_val) { - return *a < *b ? -1 : *a == *b ? 0 : 1; + return a_val < b_val ? -1 : a_val == b_val ? 0 : 1; +} + + +/* + Determine which of the unsigned longlong arguments is bigger + + SYNOPSIS + cmp_ulongs() + a_val left argument + b_val right argument + + DESCRIPTION + This function will compare two unsigned longlong arguments + and will return -1, 0, or 1 if left argument is smaller than, + equal to or greater than the right argument. + + RETURN VALUE + -1 left argument is smaller than the right argument. + 0 left argument is equal to the right argument. + 1 left argument is greater than the right argument. +*/ +static inline int cmp_ulongs (ulonglong a_val, ulonglong b_val) +{ + return a_val < b_val ? -1 : a_val == b_val ? 0 : 1; +} + + +/* + Compare two integers in IN value list format (packed_longlong) + + SYNOPSIS + cmp_longlong() + cmp_arg an argument passed to the calling function (qsort2) + a left argument + b right argument + + DESCRIPTION + This function will compare two integer arguments in the IN value list + format and will return -1, 0, or 1 if left argument is smaller than, + equal to or greater than the right argument. + It's used in sorting the IN values list and finding an element in it. + Depending on the signedness of the arguments cmp_longlong() will + compare them as either signed (using cmp_longs()) or unsigned (using + cmp_ulongs()). + + RETURN VALUE + -1 left argument is smaller than the right argument. + 0 left argument is equal to the right argument. + 1 left argument is greater than the right argument. +*/ +int cmp_longlong(void *cmp_arg, + in_longlong::packed_longlong *a, + in_longlong::packed_longlong *b) +{ + if (a->unsigned_flag != b->unsigned_flag) + { + /* + One of the args is unsigned and is too big to fit into the + positive signed range. Report no match. + */ + if (a->unsigned_flag && ((ulonglong) a->val) > (ulonglong) LONGLONG_MAX || + b->unsigned_flag && ((ulonglong) b->val) > (ulonglong) LONGLONG_MAX) + return a->unsigned_flag ? 1 : -1; + /* + Although the signedness differs both args can fit into the signed + positive range. Make them signed and compare as usual. + */ + return cmp_longs (a->val, b->val); + } + if (a->unsigned_flag) + return cmp_ulongs ((ulonglong) a->val, (ulonglong) b->val); + else + return cmp_longs (a->val, b->val); } static int cmp_double(void *cmp_arg, double *a,double *b) @@ -2311,25 +2498,29 @@ byte *in_row::get_value(Item *item) void in_row::set(uint pos, Item *item) { DBUG_ENTER("in_row::set"); - DBUG_PRINT("enter", ("pos %u item 0x%lx", pos, (ulong) item)); + DBUG_PRINT("enter", ("pos: %u item: 0x%lx", pos, (ulong) item)); ((cmp_item_row*) base)[pos].store_value_by_template(&tmp, item); DBUG_VOID_RETURN; } in_longlong::in_longlong(uint elements) - :in_vector(elements,sizeof(longlong),(qsort2_cmp) cmp_longlong, 0) + :in_vector(elements,sizeof(packed_longlong),(qsort2_cmp) cmp_longlong, 0) {} void in_longlong::set(uint pos,Item *item) { - ((longlong*) base)[pos]=item->val_int(); + struct packed_longlong *buff= &((packed_longlong*) base)[pos]; + + buff->val= item->val_int(); + buff->unsigned_flag= item->unsigned_flag; } byte *in_longlong::get_value(Item *item) { - tmp= item->val_int(); + tmp.val= item->val_int(); if (item->null_value) return 0; + tmp.unsigned_flag= item->unsigned_flag; return (byte*) &tmp; } @@ -2363,7 +2554,8 @@ void in_decimal::set(uint pos, Item *item) dec->len= DECIMAL_BUFF_LENGTH; dec->fix_buffer_pointer(); my_decimal *res= item->val_decimal(dec); - if (res != dec) + /* if item->val_decimal() is evaluated to NULL then res == 0 */ + if (!item->null_value && res != dec) my_decimal2decimal(res, dec); } @@ -2632,7 +2824,8 @@ void Item_func_in::fix_length_and_dec() uint type_cnt= 0, i; Item_result cmp_type= STRING_RESULT; left_result_type= args[0]->result_type(); - found_types= collect_cmp_types(args, arg_count); + if (!(found_types= collect_cmp_types(args, arg_count))) + return; for (arg= args + 1, arg_end= args + arg_count; arg != arg_end ; arg++) { @@ -2665,6 +2858,31 @@ void Item_func_in::fix_length_and_dec() */ if (type_cnt == 1 && const_itm && !nulls_in_row()) { + /* + IN must compare INT/DATE/DATETIME/TIMESTAMP columns and constants + as int values (the same way as equality does). + So we must check here if the column on the left and all the constant + values on the right can be compared as integers and adjust the + comparison type accordingly. + */ + if (args[0]->real_item()->type() == FIELD_ITEM && + thd->lex->sql_command != SQLCOM_CREATE_VIEW && + thd->lex->sql_command != SQLCOM_SHOW_CREATE && + cmp_type != INT_RESULT) + { + Field *field= ((Item_field*) (args[0]->real_item()))->field; + if (field->can_be_compared_as_longlong()) + { + bool all_converted= TRUE; + for (arg=args+1, arg_end=args+arg_count; arg != arg_end ; arg++) + { + if (!convert_constant_item (thd, field, &arg[0])) + all_converted= FALSE; + } + if (all_converted) + cmp_type= INT_RESULT; + } + } switch (cmp_type) { case STRING_RESULT: array=new in_string(arg_count - 1,(qsort2_cmp) srtcmp_in, diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 3b08036368c..f35b6a126ed 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -333,7 +333,7 @@ public: bool is_bool_func() { return 1; } CHARSET_INFO *compare_collation() { return cmp.cmp_collation.collation; } uint decimal_precision() const { return 1; } - void top_level_item() { abort_on_null=1; } + void top_level_item() { abort_on_null= TRUE; } friend class Arg_comparator; }; @@ -563,6 +563,7 @@ public: negated= !negated; return this; } + bool eq(const Item *item, bool binary_cmp) const; bool subst_argument_checker(byte **arg) { return TRUE; } }; @@ -785,7 +786,16 @@ public: class in_longlong :public in_vector { - longlong tmp; + /* + Here we declare a temporary variable (tmp) of the same type as the + elements of this vector. tmp is used in finding if a given value is in + the list. + */ + struct packed_longlong + { + longlong val; + longlong unsigned_flag; // Use longlong, not bool, to preserve alignment + } tmp; public: in_longlong(uint elements); void set(uint pos,Item *item); @@ -801,9 +811,13 @@ public: } void value_to_item(uint pos, Item *item) { - ((Item_int*)item)->value= ((longlong*)base)[pos]; + ((Item_int*) item)->value= ((packed_longlong*) base)[pos].val; + ((Item_int*) item)->unsigned_flag= (my_bool) + ((packed_longlong*) base)[pos].unsigned_flag; } Item_result result_type() { return INT_RESULT; } + + friend int cmp_longlong(void *cmp_arg, packed_longlong *a,packed_longlong *b); }; class in_double :public in_vector diff --git a/sql/item_create.cc b/sql/item_create.cc index ff5825ef389..8ff78ef1b48 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -167,7 +167,7 @@ class Create_sp_func : public Create_qfunc { public: virtual Item* create(THD *thd, LEX_STRING db, LEX_STRING name, - List<Item> *item_list); + bool use_explicit_name, List<Item> *item_list); static Create_sp_func s_singleton; @@ -2316,7 +2316,7 @@ Create_qfunc::create(THD *thd, LEX_STRING name, List<Item> *item_list) if (thd->copy_db_to(&db.str, &db.length)) return NULL; - return create(thd, db, name, item_list); + return create(thd, db, name, false, item_list); } @@ -2433,7 +2433,7 @@ Create_sp_func Create_sp_func::s_singleton; Item* Create_sp_func::create(THD *thd, LEX_STRING db, LEX_STRING name, - List<Item> *item_list) + bool use_explicit_name, List<Item> *item_list) { int arg_count= 0; Item *func= NULL; @@ -2458,7 +2458,7 @@ Create_sp_func::create(THD *thd, LEX_STRING db, LEX_STRING name, if (item_list != NULL) arg_count= item_list->elements; - qname= new (thd->mem_root) sp_name(db, name); + qname= new (thd->mem_root) sp_name(db, name, use_explicit_name); qname->init_qname(thd); sp_add_used_routine(lex, thd, qname, TYPE_ENUM_FUNCTION); @@ -2877,9 +2877,6 @@ Create_func_convert_tz Create_func_convert_tz::s_singleton; Item* Create_func_convert_tz::create(THD *thd, Item *arg1, Item *arg2, Item *arg3) { - if (thd->lex->add_time_zone_tables_to_query_tables(thd)) - return NULL; - return new (thd->mem_root) Item_func_convert_tz(arg1, arg2, arg3); } @@ -4795,6 +4792,12 @@ static Native_func_registry func_array[] = { C_STRING_WITH_LEN("MAKE_SET"), BUILDER(Create_func_make_set)}, { C_STRING_WITH_LEN("MASTER_POS_WAIT"), BUILDER(Create_func_master_pos_wait)}, { C_STRING_WITH_LEN("MBRCONTAINS"), GEOM_BUILDER(Create_func_contains)}, + { C_STRING_WITH_LEN("MBRDISJOINT"), GEOM_BUILDER(Create_func_disjoint)}, + { C_STRING_WITH_LEN("MBREQUAL"), GEOM_BUILDER(Create_func_equals)}, + { C_STRING_WITH_LEN("MBRINTERSECTS"), GEOM_BUILDER(Create_func_intersects)}, + { C_STRING_WITH_LEN("MBROVERLAPS"), GEOM_BUILDER(Create_func_overlaps)}, + { C_STRING_WITH_LEN("MBRTOUCHES"), GEOM_BUILDER(Create_func_touches)}, + { C_STRING_WITH_LEN("MBRWITHIN"), GEOM_BUILDER(Create_func_within)}, { C_STRING_WITH_LEN("MD5"), BUILDER(Create_func_md5)}, { C_STRING_WITH_LEN("MLINEFROMTEXT"), GEOM_BUILDER(Create_func_geometry_from_text)}, { C_STRING_WITH_LEN("MLINEFROMWKB"), GEOM_BUILDER(Create_func_geometry_from_wkb)}, diff --git a/sql/item_create.h b/sql/item_create.h index 985c4428d8f..0a668b3e67f 100644 --- a/sql/item_create.h +++ b/sql/item_create.h @@ -87,11 +87,12 @@ public: @param thd The current thread @param db The database name @param name The function name + @param use_explicit_name Should the function be represented as 'db.name'? @param item_list The list of arguments to the function, can be NULL @return An item representing the parsed function call */ virtual Item* create(THD *thd, LEX_STRING db, LEX_STRING name, - List<Item> *item_list) = 0; + bool use_explicit_name, List<Item> *item_list) = 0; protected: /** Constructor. */ diff --git a/sql/item_func.cc b/sql/item_func.cc index 55888f6b16a..dfddbba70f4 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -22,6 +22,7 @@ #include "mysql_priv.h" #include "slave.h" // for wait_for_master_pos +#include "rpl_mi.h" #include <m_ctype.h> #include <hash.h> #include <time.h> @@ -413,8 +414,13 @@ bool Item_func::eq(const Item *item, bool binary_cmp) const if (item->type() != FUNC_ITEM) return 0; Item_func *item_func=(Item_func*) item; - if (arg_count != item_func->arg_count || - func_name() != item_func->func_name()) + Item_func::Functype func_type; + if ((func_type= functype()) != item_func->functype() || + arg_count != item_func->arg_count || + (func_type != Item_func::FUNC_SP && + func_name() != item_func->func_name()) || + (func_type == Item_func::FUNC_SP && + my_strcasecmp(system_charset_info, func_name(), item_func->func_name()))) return 0; for (uint i=0; i < arg_count ; i++) if (!args[i]->eq(item_func->args[i], binary_cmp)) @@ -430,7 +436,7 @@ Field *Item_func::tmp_table_field(TABLE *table) switch (result_type()) { case INT_RESULT: - if (max_length > 11) + if (max_length > MY_INT32_NUM_DECIMAL_DIGITS) field= new Field_longlong(max_length, maybe_null, name, unsigned_flag); else field= new Field_long(max_length, maybe_null, name, unsigned_flag); @@ -2333,7 +2339,8 @@ longlong Item_func_coercibility::val_int() void Item_func_locate::fix_length_and_dec() { - maybe_null=0; max_length=11; + maybe_null= 0; + max_length= MY_INT32_NUM_DECIMAL_DIGITS; agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV, 1); } @@ -4602,14 +4609,14 @@ void Item_func_match::init_search(bool no_order) fields.push_back(new Item_string(" ",1, cmp_collation.collation)); for (uint i=1; i < arg_count; i++) fields.push_back(args[i]); - concat=new Item_func_concat_ws(fields); + concat_ws=new Item_func_concat_ws(fields); /* Above function used only to get value and do not need fix_fields for it: Item_string - basic constant fields - fix_fields() was already called for this arguments Item_func_concat_ws - do not need fix_fields() to produce value */ - concat->quick_fix_field(); + concat_ws->quick_fix_field(); } if (master) @@ -4824,8 +4831,8 @@ double Item_func_match::val_real() if (key == NO_SUCH_KEY) { - String *a= concat->val_str(&value); - if ((null_value= (a == 0))) + String *a= concat_ws->val_str(&value); + if ((null_value= (a == 0)) || !a->length()) DBUG_RETURN(0); DBUG_RETURN(ft_handler->please->find_relevance(ft_handler, (byte *)a->ptr(), a->length())); @@ -4982,10 +4989,10 @@ longlong Item_func_row_count::val_int() } -Item_func_sp::Item_func_sp(Name_resolution_context *context_arg, - sp_name *name_arg) - :Item_func(), context(context_arg), m_name(name_arg), m_sp(NULL), - result_field(NULL) + + +Item_func_sp::Item_func_sp(Name_resolution_context *context_arg, sp_name *name) + :Item_func(), context(context_arg), m_name(name), m_sp(NULL), sp_result_field(NULL) { maybe_null= 1; m_name->init_qname(current_thd); @@ -4995,9 +5002,8 @@ Item_func_sp::Item_func_sp(Name_resolution_context *context_arg, Item_func_sp::Item_func_sp(Name_resolution_context *context_arg, - sp_name *name_arg, List<Item> &list) - :Item_func(list), context(context_arg), m_name(name_arg), m_sp(NULL), - result_field(NULL) + sp_name *name, List<Item> &list) + :Item_func(list), context(context_arg), m_name(name), m_sp(NULL),sp_result_field(NULL) { maybe_null= 1; m_name->init_qname(current_thd); @@ -5009,10 +5015,10 @@ Item_func_sp::Item_func_sp(Name_resolution_context *context_arg, void Item_func_sp::cleanup() { - if (result_field) + if (sp_result_field) { - delete result_field; - result_field= NULL; + delete sp_result_field; + sp_result_field= NULL; } m_sp= NULL; dummy_table->alias= NULL; @@ -5024,7 +5030,7 @@ Item_func_sp::func_name() const { THD *thd= current_thd; /* Calculate length to avoid reallocation of string for sure */ - uint len= ((m_name->m_db.length + + uint len= ((m_name->m_explicit_name ? m_name->m_db.length : 0 + m_name->m_name.length)*2 + //characters*quoting 2 + // ` and ` 1 + // . @@ -5034,88 +5040,128 @@ Item_func_sp::func_name() const system_charset_info); qname.length(0); - append_identifier(thd, &qname, m_name->m_db.str, m_name->m_db.length); - qname.append('.'); + if (m_name->m_explicit_name) + { + append_identifier(thd, &qname, m_name->m_db.str, m_name->m_db.length); + qname.append('.'); + } append_identifier(thd, &qname, m_name->m_name.str, m_name->m_name.length); return qname.ptr(); } -Field * -Item_func_sp::sp_result_field(void) const + +/** + @brief Initialize the result field by creating a temporary dummy table + and assign it to a newly created field object. Meta data used to + create the field is fetched from the sp_head belonging to the stored + proceedure found in the stored procedure functon cache. + + @note This function should be called from fix_fields to init the result + field. It is some what related to Item_field. + + @see Item_field + + @param thd A pointer to the session and thread context. + + @return Function return error status. + @retval TRUE is returned on an error + @retval FALSE is returned on success. +*/ +bool +Item_func_sp::init_result_field(THD *thd) { - Field *field; - DBUG_ENTER("Item_func_sp::sp_result_field"); - DBUG_PRINT("info", ("sp: %s, flags: %x, level: %lu", - (m_sp ? "YES" : "NO"), - (m_sp ? m_sp->m_flags : (uint)0), - (m_sp ? m_sp->m_recursion_level : (ulong)0))); + DBUG_ENTER("Item_func_sp::init_result_field"); + + LEX_STRING empty_name= { C_STRING_WITH_LEN("") }; + + TABLE_SHARE *share; + + DBUG_ASSERT(m_sp == NULL); + DBUG_ASSERT(sp_result_field == NULL); - if (!m_sp) + if (!(m_sp= sp_find_routine(thd, TYPE_ENUM_FUNCTION, m_name, + &thd->sp_func_cache, TRUE))) { - THD *thd= current_thd; - if (!(m_sp= sp_find_routine(thd, TYPE_ENUM_FUNCTION, m_name, - &thd->sp_func_cache, TRUE))) - { - my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str); - DBUG_RETURN(0); - } + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str); + context->process_error(thd); + DBUG_RETURN(TRUE); } - if (!dummy_table->alias) + + /* + A Field need to be attached to a Table. + Below we "create" a dummy table by initializing + the needed pointers. + */ + + share= dummy_table->s; + dummy_table->alias = ""; + dummy_table->maybe_null = maybe_null; + dummy_table->in_use= thd; + dummy_table->copy_blobs= TRUE; + share->table_cache_key = empty_name; + share->table_name = empty_name; + + if (!(sp_result_field= m_sp->create_result_field(max_length, name, dummy_table))) + { + DBUG_RETURN(TRUE); + } + + if (sp_result_field->pack_length() > sizeof(result_buf)) { - char *empty_name= (char *) ""; - dummy_table->alias= empty_name; - dummy_table->maybe_null= maybe_null; - dummy_table->in_use= current_thd; - dummy_table->copy_blobs= TRUE; - dummy_table->s->table_cache_key.str = empty_name; - dummy_table->s->table_name.str= empty_name; - dummy_table->s->db.str= empty_name; + sp_result_field->move_field(sql_alloc(sp_result_field->pack_length())); + } else { + sp_result_field->move_field(result_buf); } - if (!(field= m_sp->create_result_field(max_length, name, dummy_table))) - my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); + + sp_result_field->null_ptr= (uchar *) &null_value; + sp_result_field->null_bit= 1; + - DBUG_RETURN(field); + DBUG_RETURN(FALSE); } +/** + @brief Initialize local members with values from the Field interface. -/* - Execute function & store value in field + @note called from Item::fix_fields. +*/ +void Item_func_sp::fix_length_and_dec() +{ + DBUG_ENTER("Item_func_sp::fix_length_and_dec"); - RETURN - 0 value <> NULL - 1 value = NULL or error + DBUG_ASSERT(sp_result_field); + decimals= sp_result_field->decimals(); + max_length= sp_result_field->field_length; + collation.set(sp_result_field->charset()); + maybe_null= 1; + unsigned_flag= test(sp_result_field->flags & UNSIGNED_FLAG); + + DBUG_VOID_RETURN; +} + +/** + @brief Execute function & store value in field. + + @return Function returns error status. + @retval FALSE on success. + @retval TRUE if an error occurred. */ bool -Item_func_sp::execute(Field **flp) +Item_func_sp::execute() { THD *thd= current_thd; - Field *f; - + /* Get field in virtual tmp table to store result. Create the field if invoked first time. */ - - if (!(f= *flp)) - { - if (!(*flp= f= sp_result_field())) - { - /* Error set by sp_result_field() */ - null_value= 1; - return TRUE; - } - f->move_field((f->pack_length() > sizeof(result_buf)) ? - sql_alloc(f->pack_length()) : result_buf); - f->null_ptr= (uchar *)&null_value; - f->null_bit= 1; - } /* Execute function and store the return value in the field. */ - if (execute_impl(thd, f)) + if (execute_impl(thd)) { null_value= 1; context->process_error(thd); @@ -5124,14 +5170,24 @@ Item_func_sp::execute(Field **flp) /* Check that the field (the value) is not NULL. */ - null_value= f->is_null(); + null_value= sp_result_field->is_null(); return null_value; } +/** + @brief Execute function and store the return value in the field. + + @note This function was intended to be the concrete implementation of + the interface function execute. This was never realized. + + @return The error state. + @retval FALSE on success + @retval TRUE if an error occurred. +*/ bool -Item_func_sp::execute_impl(THD *thd, Field *return_value_fld) +Item_func_sp::execute_impl(THD *thd) { bool err_status= TRUE; Sub_statement_state statement_state; @@ -5148,7 +5204,7 @@ Item_func_sp::execute_impl(THD *thd, Field *return_value_fld) thd->security_ctx= context->security_ctx; } #endif - if (find_and_check_access(thd)) + if (sp_check_access(thd)) goto error; /* @@ -5169,7 +5225,7 @@ Item_func_sp::execute_impl(THD *thd, Field *return_value_fld) function call into binlog. */ thd->reset_sub_statement_state(&statement_state, SUB_STMT_FUNCTION); - err_status= m_sp->execute_function(thd, args, arg_count, return_value_fld); + err_status= m_sp->execute_function(thd, args, arg_count, sp_result_field); thd->restore_sub_statement_state(&statement_state); error: @@ -5184,15 +5240,9 @@ error: void Item_func_sp::make_field(Send_field *tmp_field) { - Field *field; DBUG_ENTER("Item_func_sp::make_field"); - if ((field= sp_result_field())) - { - field->make_field(tmp_field); - delete field; - DBUG_VOID_RETURN; - } - init_make_field(tmp_field, MYSQL_TYPE_VARCHAR); + DBUG_ASSERT(sp_result_field); + sp_result_field->make_field(tmp_field); DBUG_VOID_RETURN; } @@ -5200,67 +5250,20 @@ Item_func_sp::make_field(Send_field *tmp_field) enum enum_field_types Item_func_sp::field_type() const { - Field *field; DBUG_ENTER("Item_func_sp::field_type"); - - if (result_field) - DBUG_RETURN(result_field->type()); - if ((field= sp_result_field())) - { - enum_field_types result= field->type(); - delete field; - DBUG_RETURN(result); - } - DBUG_RETURN(MYSQL_TYPE_VARCHAR); + DBUG_ASSERT(sp_result_field); + DBUG_RETURN(sp_result_field->type()); } - Item_result Item_func_sp::result_type() const { - Field *field; DBUG_ENTER("Item_func_sp::result_type"); - DBUG_PRINT("info", ("m_sp: 0x%lx", (long) m_sp)); - - if (result_field) - DBUG_RETURN(result_field->result_type()); - if ((field= sp_result_field())) - { - Item_result result= field->result_type(); - delete field; - DBUG_RETURN(result); - } - DBUG_RETURN(STRING_RESULT); + DBUG_PRINT("info", ("m_sp = %p", m_sp)); + DBUG_ASSERT(sp_result_field); + DBUG_RETURN(sp_result_field->result_type()); } -void -Item_func_sp::fix_length_and_dec() -{ - Field *field; - DBUG_ENTER("Item_func_sp::fix_length_and_dec"); - - if (result_field) - { - decimals= result_field->decimals(); - max_length= result_field->field_length; - collation.set(result_field->charset()); - DBUG_VOID_RETURN; - } - - if (!(field= sp_result_field())) - { - context->process_error(current_thd); - DBUG_VOID_RETURN; - } - decimals= field->decimals(); - max_length= field->field_length; - collation.set(field->charset()); - maybe_null= 1; - delete field; - DBUG_VOID_RETURN; -} - - longlong Item_func_found_rows::val_int() { DBUG_ASSERT(fixed == 1); @@ -5271,57 +5274,39 @@ longlong Item_func_found_rows::val_int() Field * Item_func_sp::tmp_table_field(TABLE *t_arg) { - Field *field= 0; DBUG_ENTER("Item_func_sp::tmp_table_field"); - if (m_sp) - field= m_sp->create_result_field(max_length, (const char*) name, t_arg); - - if (!field) - field= Item_func::tmp_table_field(t_arg); - - if (!field) - my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); - - DBUG_RETURN(field); + DBUG_ASSERT(sp_result_field); + DBUG_RETURN(sp_result_field); } -/* - Find the function and check access rights to the function - - SYNOPSIS - find_and_check_access() - thd thread handler - - RETURN - FALSE Access granted - TRUE Requested access can't be granted or function doesn't exists - - NOTES - Checks if requested access to function can be granted to user. +/** + @brief Checks if requested access to function can be granted to user. If function isn't found yet, it searches function first. If function can't be found or user don't have requested access error is raised. + + @param thd thread handler + + @return Indication if the access was granted or not. + @retval FALSE Access is granted. + @retval TRUE Requested access can't be granted or function doesn't exists. + */ bool -Item_func_sp::find_and_check_access(THD *thd) +Item_func_sp::sp_check_access(THD *thd) { - if (! m_sp && ! (m_sp= sp_find_routine(thd, TYPE_ENUM_FUNCTION, m_name, - &thd->sp_func_cache, TRUE))) - { - my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str); - return TRUE; - } - + DBUG_ENTER("Item_func_sp::sp_check_access"); + DBUG_ASSERT(m_sp); #ifndef NO_EMBEDDED_ACCESS_CHECKS if (check_routine_access(thd, EXECUTE_ACL, m_sp->m_db.str, m_sp->m_name.str, 0, FALSE)) - return TRUE; + DBUG_RETURN(TRUE); #endif - return FALSE; + DBUG_RETURN(FALSE); } @@ -5329,9 +5314,25 @@ bool Item_func_sp::fix_fields(THD *thd, Item **ref) { bool res; + DBUG_ENTER("Item_func_sp::fix_fields"); DBUG_ASSERT(fixed == 0); + + /* + We must call init_result_field before Item_func::fix_fields() + to make m_sp and result_field members available to fix_length_and_dec(), + which is called from Item_func::fix_fields(). + */ + res= init_result_field(thd); + + if (res) + DBUG_RETURN(res); + res= Item_func::fix_fields(thd, ref); - if (!res && thd->lex->view_prepare_mode) + + if (res) + DBUG_RETURN(res); + + if (thd->lex->view_prepare_mode) { /* Here we check privileges of the stored routine only during view @@ -5343,15 +5344,17 @@ Item_func_sp::fix_fields(THD *thd, Item **ref) good idea especially if the view has SQL SECURITY DEFINER and the used stored procedure has SQL SECURITY DEFINER. */ - res= find_and_check_access(thd); + res= sp_check_access(thd); #ifndef NO_EMBEDDED_ACCESS_CHECKS + /* + Try to set and restore the security context to see whether it's valid + */ Security_context *save_secutiry_ctx; - if (!res && !(res= set_routine_security_ctx(thd, m_sp, false, - &save_secutiry_ctx))) - { - sp_restore_security_context(thd, save_secutiry_ctx); - } + res= set_routine_security_ctx(thd, m_sp, false, &save_secutiry_ctx); + if (!res) + m_sp->m_security_ctx.restore_security_context(thd, save_secutiry_ctx); + #endif /* ! NO_EMBEDDED_ACCESS_CHECKS */ } - return res; + DBUG_RETURN(res); } diff --git a/sql/item_func.h b/sql/item_func.h index 3306b059097..28f11e27306 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -147,11 +147,11 @@ public: void count_only_length(); void count_real_length(); void count_decimal_length(); - inline bool get_arg0_date(TIME *ltime, uint fuzzy_date) + inline bool get_arg0_date(MYSQL_TIME *ltime, uint fuzzy_date) { return (null_value=args[0]->get_date(ltime, fuzzy_date)); } - inline bool get_arg0_time(TIME *ltime) + inline bool get_arg0_time(MYSQL_TIME *ltime) { return (null_value=args[0]->get_time(ltime)); } @@ -1337,12 +1337,12 @@ public: FT_INFO *ft_handler; TABLE *table; Item_func_match *master; // for master-slave optimization - Item *concat; // Item_func_concat_ws - String value; // value of concat + Item *concat_ws; // Item_func_concat_ws + String value; // value of concat_ws String search_value; // key_item()'s value converted to cmp_collation Item_func_match(List<Item> &a, uint b): Item_real_func(a), key(0), flags(b), - join_key(0), ft_handler(0), table(0), master(0), concat(0) { } + join_key(0), ft_handler(0), table(0), master(0), concat_ws(0) { } void cleanup() { DBUG_ENTER("Item_func_match"); @@ -1350,7 +1350,7 @@ public: if (!master && ft_handler) ft_handler->please->close_search(ft_handler); ft_handler= 0; - concat= 0; + concat_ws= 0; DBUG_VOID_RETURN; } enum Functype functype() const { return FT_FUNC; } @@ -1434,13 +1434,16 @@ private: sp_name *m_name; mutable sp_head *m_sp; TABLE *dummy_table; - Field *result_field; char result_buf[64]; + /* + The result field of the concrete stored function. + */ + Field *sp_result_field; - bool execute(Field **flp); - bool execute_impl(THD *thd, Field *return_value_fld); - Field *sp_result_field(void) const; - + bool execute(); + bool execute_impl(THD *thd); + bool init_result_field(THD *thd); + public: Item_func_sp(Name_resolution_context *context_arg, sp_name *name); @@ -1451,6 +1454,8 @@ public: virtual ~Item_func_sp() {} + table_map used_tables() const { return RAND_TABLE_BIT; } + void cleanup(); const char *func_name() const; @@ -1465,23 +1470,23 @@ public: longlong val_int() { - if (execute(&result_field)) + if (execute()) return (longlong) 0; - return result_field->val_int(); + return sp_result_field->val_int(); } double val_real() { - if (execute(&result_field)) + if (execute()) return 0.0; - return result_field->val_real(); + return sp_result_field->val_real(); } my_decimal *val_decimal(my_decimal *dec_buf) { - if (execute(&result_field)) + if (execute()) return NULL; - return result_field->val_decimal(dec_buf); + return sp_result_field->val_decimal(dec_buf); } String *val_str(String *str) @@ -1490,7 +1495,7 @@ public: char buff[20]; buf.set(buff, 20, str->charset()); buf.length(0); - if (execute(&result_field)) + if (execute()) return NULL; /* result_field will set buf pointing to internal buffer @@ -1498,7 +1503,7 @@ public: when SP is executed. In order to prevent occasional corruption of returned value, we make here a copy. */ - result_field->val_str(&buf); + sp_result_field->val_str(&buf); str->copy(buf); return str; } @@ -1506,11 +1511,11 @@ public: virtual bool change_context_processor(byte *cntx) { context= (Name_resolution_context *)cntx; return FALSE; } - void fix_length_and_dec(); - bool find_and_check_access(THD * thd); + bool sp_check_access(THD * thd); virtual enum Functype functype() const { return FUNC_SP; } bool fix_fields(THD *thd, Item **ref); + void fix_length_and_dec(void); bool is_expensive() { return 1; } }; diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index b96567cf1ff..4e28967b2b0 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -964,18 +964,18 @@ String *Item_func_insert::val_str(String *str) args[3]->null_value) goto null; /* purecov: inspected */ - if ((start < 0) || (start > res->length() + 1)) + if ((start < 0) || (start > res->length())) return res; // Wrong param; skip insert - if ((length < 0) || (length > res->length() + 1)) - length= res->length() + 1; + if ((length < 0) || (length > res->length())) + length= res->length(); /* start and length are now sufficiently valid to pass to charpos function */ start= res->charpos((int) start); length= res->charpos((int) length, (uint32) start); /* Re-testing with corrected params */ - if (start > res->length() + 1) - return res; // Wrong param; skip insert + if (start > res->length()) + return res; /* purecov: inspected */ // Wrong param; skip insert if (length > res->length() - start) length= res->length() - start; @@ -1181,11 +1181,10 @@ void Item_func_substr::fix_length_and_dec() if (args[1]->const_item()) { int32 start= (int32) args[1]->val_int(); - start= (int32)((start < 0) ? max_length + start : start - 1); - if (start < 0 || start >= (int32) max_length) - max_length=0; /* purecov: inspected */ + if (start < 0) + max_length= ((uint)(-start) > max_length) ? 0 : (uint)(-start); else - max_length-= (uint) start; + max_length-= min((uint)(start - 1), max_length); } if (arg_count == 3 && args[2]->const_item()) { @@ -1814,7 +1813,8 @@ void Item_func_soundex::fix_length_and_dec() { collation.set(args[0]->collation); max_length=args[0]->max_length; - set_if_bigger(max_length,4); + set_if_bigger(max_length, 4 * collation.collation->mbminlen); + tmp_value.set_charset(collation.collation); } @@ -1824,14 +1824,15 @@ void Item_func_soundex::fix_length_and_dec() else return 0 */ -static char soundex_toupper(char ch) +static int soundex_toupper(int ch) { return (ch >= 'a' && ch <= 'z') ? ch - 'a' + 'A' : ch; } -static char get_scode(char *ptr) + +static char get_scode(int wc) { - uchar ch= soundex_toupper(*ptr); + int ch= soundex_toupper(wc); if (ch < 'A' || ch > 'Z') { // Thread extended alfa (country spec) @@ -1841,46 +1842,121 @@ static char get_scode(char *ptr) } +static bool my_uni_isalpha(int wc) +{ + /* + Return true for all Basic Latin letters: a..z A..Z. + Return true for all Unicode characters with code higher than U+00C0: + - characters between 'z' and U+00C0 are controls and punctuations. + - "U+00C0 LATIN CAPITAL LETTER A WITH GRAVE" is the first letter after 'z'. + */ + return (wc >= 'a' && wc <= 'z') || + (wc >= 'A' && wc <= 'Z') || + (wc >= 0xC0); +} + + String *Item_func_soundex::val_str(String *str) { DBUG_ASSERT(fixed == 1); String *res =args[0]->val_str(str); char last_ch,ch; CHARSET_INFO *cs= collation.collation; + my_wc_t wc; + uint nchars; + int rc; - if ((null_value=args[0]->null_value)) + if ((null_value= args[0]->null_value)) return 0; /* purecov: inspected */ - if (tmp_value.alloc(max(res->length(),4))) + if (tmp_value.alloc(max(res->length(), 4 * cs->mbminlen))) return str; /* purecov: inspected */ char *to= (char *) tmp_value.ptr(); - char *from= (char *) res->ptr(), *end=from+res->length(); - tmp_value.set_charset(cs); + char *to_end= to + tmp_value.alloced_length(); + char *from= (char *) res->ptr(), *end= from + res->length(); - while (from != end && !my_isalpha(cs,*from)) // Skip pre-space - from++; /* purecov: inspected */ - if (from == end) - return &my_empty_string; // No alpha characters. - *to++ = soundex_toupper(*from); // Copy first letter - last_ch = get_scode(from); // code of the first letter - // for the first 'double-letter check. - // Loop on input letters until - // end of input (null) or output - // letter code count = 3 - for (from++ ; from < end ; from++) - { - if (!my_isalpha(cs,*from)) - continue; - ch=get_scode(from); + for ( ; ; ) /* Skip pre-space */ + { + if ((rc= cs->cset->mb_wc(cs, &wc, (uchar*) from, (uchar*) end)) <= 0) + return &my_empty_string; /* EOL or invalid byte sequence */ + + if (rc == 1 && cs->ctype) + { + /* Single byte letter found */ + if (my_isalpha(cs, *from)) + { + last_ch= get_scode(*from); // Code of the first letter + *to++= soundex_toupper(*from++); // Copy first letter + break; + } + from++; + } + else + { + from+= rc; + if (my_uni_isalpha(wc)) + { + /* Multibyte letter found */ + wc= soundex_toupper(wc); + last_ch= get_scode(wc); // Code of the first letter + if ((rc= cs->cset->wc_mb(cs, wc, (uchar*) to, (uchar*) to_end)) <= 0) + { + /* Extra safety - should not really happen */ + DBUG_ASSERT(false); + return &my_empty_string; + } + to+= rc; + break; + } + } + } + + /* + last_ch is now set to the first 'double-letter' check. + loop on input letters until end of input + */ + for (nchars= 1 ; ; ) + { + if ((rc= cs->cset->mb_wc(cs, &wc, (uchar*) from, (uchar*) end)) <= 0) + break; /* EOL or invalid byte sequence */ + + if (rc == 1 && cs->ctype) + { + if (!my_isalpha(cs, *from++)) + continue; + } + else + { + from+= rc; + if (!my_uni_isalpha(wc)) + continue; + } + + ch= get_scode(wc); if ((ch != '0') && (ch != last_ch)) // if not skipped or double { - *to++ = ch; // letter, copy to output - last_ch = ch; // save code of last input letter - } // for next double-letter check + // letter, copy to output + if ((rc= cs->cset->wc_mb(cs, (my_wc_t) ch, + (uchar*) to, (uchar*) to_end)) <= 0) + { + // Extra safety - should not really happen + DBUG_ASSERT(false); + break; + } + to+= rc; + nchars++; + last_ch= ch; // save code of last input letter + } // for next double-letter check } - for (end=(char*) tmp_value.ptr()+4 ; to < end ; to++) - *to = '0'; - *to=0; // end string + + /* Pad up to 4 characters with DIGIT ZERO, if the string is shorter */ + if (nchars < 4) + { + uint nbytes= (4 - nchars) * cs->mbminlen; + cs->cset->fill(cs, to, nbytes, '0'); + to+= nbytes; + } + tmp_value.length((uint) (to-tmp_value.ptr())); return &tmp_value; } @@ -2272,7 +2348,7 @@ String *Item_func_repeat::val_str(String *str) goto err; // string and/or delim are null null_value= 0; - if (count == 0 || count < 0 && !args[1]->unsigned_flag) + if (count <= 0 && (count == 0 || !args[1]->unsigned_flag)) return &my_empty_string; /* Assumes that the maximum length of a String is < INT_MAX32. */ diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index ae11e001551..60e7de42a9d 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -426,8 +426,8 @@ public: bool fix_fields(THD *thd, Item **ref); void fix_length_and_dec() { - max_length= ((USERNAME_LENGTH + HOSTNAME_LENGTH + 1) * - system_charset_info->mbmaxlen); + max_length= (USERNAME_LENGTH + + (HOSTNAME_LENGTH + 1) * SYSTEM_CHARSET_MBMAXLEN); } const char *func_name() const { return "user"; } const char *fully_qualified_func_name() const { return "user()"; } @@ -601,7 +601,11 @@ class Item_func_unhex :public Item_str_func { String tmp_value; public: - Item_func_unhex(Item *a) :Item_str_func(a) {} + Item_func_unhex(Item *a) :Item_str_func(a) + { + /* there can be bad hex strings */ + maybe_null= 1; + } const char *func_name() const { return "unhex"; } String *val_str(String *); void fix_length_and_dec() @@ -786,7 +790,7 @@ class Item_func_crc32 :public Item_int_func { String value; public: - Item_func_crc32(Item *a) :Item_int_func(a) {} + Item_func_crc32(Item *a) :Item_int_func(a) { unsigned_flag= 1; } const char *func_name() const { return "crc32"; } void fix_length_and_dec() { max_length=10; } longlong val_int(); diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 135ed33feec..79b09967a9d 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -1080,7 +1080,8 @@ Item_in_subselect::single_value_transformer(JOIN *join, Item *having= item, *orig_item= item; select_lex->item_list.empty(); select_lex->item_list.push_back(new Item_int("Not_used", - (longlong) 1, 21)); + (longlong) 1, + MY_INT64_NUM_DECIMAL_DIGITS)); select_lex->ref_pointer_array[0]= select_lex->item_list.head(); item= func->create(expr, item); @@ -2045,7 +2046,8 @@ int subselect_uniquesubquery_engine::exec() table->file->ha_index_init(tab->ref.key, 0); error= table->file->index_read(table->record[0], tab->ref.key_buff, - tab->ref.key_length,HA_READ_KEY_EXACT); + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT); if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) error= report_error(table, error); @@ -2154,7 +2156,8 @@ int subselect_indexsubquery_engine::exec() table->file->ha_index_init(tab->ref.key, 1); error= table->file->index_read(table->record[0], tab->ref.key_buff, - tab->ref.key_length,HA_READ_KEY_EXACT); + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT); if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) error= report_error(table, error); diff --git a/sql/item_subselect.h b/sql/item_subselect.h index 37264f2136f..7c21eac6ec3 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -277,7 +277,11 @@ public: { return pushed_cond_guards ? pushed_cond_guards + i : NULL; } - void set_cond_guard_var(int i, bool v) { pushed_cond_guards[i]= v; } + void set_cond_guard_var(int i, bool v) + { + if ( pushed_cond_guards) + pushed_cond_guards[i]= v; + } bool have_guarded_conds() { return test(pushed_cond_guards); } Item_func_not_all *upper_item; // point on NOT/NOP before ALL/SOME subquery diff --git a/sql/item_sum.cc b/sql/item_sum.cc index f34fc008186..f217a6ea953 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -63,6 +63,7 @@ bool Item_sum::init_sum_func_check(THD *thd) nest_level= thd->lex->current_select->nest_level; ref_by= 0; aggr_level= -1; + aggr_sel= NULL; max_arg_level= -1; max_sum_func_level= -1; return FALSE; @@ -148,9 +149,14 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref) if (register_sum_func(thd, ref)) return TRUE; invalid= aggr_level < 0 && !(allow_sum_func & (1 << nest_level)); + if (!invalid && thd->variables.sql_mode & MODE_ANSI) + invalid= aggr_level < 0 && max_arg_level < nest_level; } if (!invalid && aggr_level < 0) + { aggr_level= nest_level; + aggr_sel= thd->lex->current_select; + } /* By this moment we either found a subquery where the set function is to be aggregated and assigned a value that is >= 0 to aggr_level, @@ -160,8 +166,9 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref) Additionally we have to check whether possible nested set functions are acceptable here: they are not, if the level of aggregation of some of them is less than aggr_level. - */ - invalid= aggr_level <= max_sum_func_level; + */ + if (!invalid) + invalid= aggr_level <= max_sum_func_level; if (invalid) { my_message(ER_INVALID_GROUP_FUNC_USE, ER(ER_INVALID_GROUP_FUNC_USE), @@ -176,6 +183,7 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref) */ set_if_bigger(in_sum_func->max_sum_func_level, aggr_level); } + update_used_tables(); thd->lex->in_sum_func= in_sum_func; return FALSE; } @@ -210,7 +218,6 @@ bool Item_sum::check_sum_func(THD *thd, Item **ref) bool Item_sum::register_sum_func(THD *thd, Item **ref) { SELECT_LEX *sl; - SELECT_LEX *aggr_sl= NULL; nesting_map allow_sum_func= thd->lex->allow_sum_func; for (sl= thd->lex->current_select->master_unit()->outer_select() ; sl && sl->nest_level > max_arg_level; @@ -220,7 +227,7 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref) { /* Found the most nested subquery where the function can be aggregated */ aggr_level= sl->nest_level; - aggr_sl= sl; + aggr_sel= sl; } } if (sl && (allow_sum_func & (1 << sl->nest_level))) @@ -231,21 +238,22 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref) The set function will be aggregated in this subquery. */ aggr_level= sl->nest_level; - aggr_sl= sl; + aggr_sel= sl; + } if (aggr_level >= 0) { ref_by= ref; - /* Add the object to the list of registered objects assigned to aggr_sl */ - if (!aggr_sl->inner_sum_func_list) + /* Add the object to the list of registered objects assigned to aggr_sel */ + if (!aggr_sel->inner_sum_func_list) next= this; else { - next= aggr_sl->inner_sum_func_list->next; - aggr_sl->inner_sum_func_list->next= this; + next= aggr_sel->inner_sum_func_list->next; + aggr_sel->inner_sum_func_list->next= this; } - aggr_sl->inner_sum_func_list= this; - aggr_sl->with_sum_func= 1; + aggr_sel->inner_sum_func_list= this; + aggr_sel->with_sum_func= 1; /* Mark Item_subselect(s) as containing aggregate function all the way up @@ -263,16 +271,17 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref) has aggregate functions directly referenced (i.e. not through a sub-select). */ for (sl= thd->lex->current_select; - sl && sl != aggr_sl && sl->master_unit()->item; + sl && sl != aggr_sel && sl->master_unit()->item; sl= sl->master_unit()->outer_select() ) sl->master_unit()->item->with_sum_func= 1; } + thd->lex->current_select->mark_as_dependent(aggr_sel); return FALSE; } -Item_sum::Item_sum(List<Item> &list) - :arg_count(list.elements) +Item_sum::Item_sum(List<Item> &list) :arg_count(list.elements), + forced_const(FALSE) { if ((args=(Item**) sql_alloc(sizeof(Item*)*arg_count))) { @@ -296,7 +305,10 @@ Item_sum::Item_sum(List<Item> &list) Item_sum::Item_sum(THD *thd, Item_sum *item): Item_result_field(thd, item), arg_count(item->arg_count), - quick_group(item->quick_group) + aggr_sel(item->aggr_sel), + nest_level(item->nest_level), aggr_level(item->aggr_level), + quick_group(item->quick_group), used_tables_cache(item->used_tables_cache), + forced_const(item->forced_const) { if (arg_count <= 2) args=tmp_args; @@ -407,7 +419,7 @@ Field *Item_sum::create_tmp_field(bool group, TABLE *table, break; case STRING_RESULT: if (max_length/collation.collation->mbmaxlen <= 255 || - max_length/collation.collation->mbmaxlen >=UINT_MAX16 || + convert_blob_length >=UINT_MAX16 || !convert_blob_length) return make_string_field(table); field= new Field_varstring(convert_blob_length, maybe_null, @@ -429,6 +441,25 @@ Field *Item_sum::create_tmp_field(bool group, TABLE *table, } +void Item_sum::update_used_tables () +{ + if (!forced_const) + { + used_tables_cache= 0; + for (uint i=0 ; i < arg_count ; i++) + { + args[i]->update_used_tables(); + used_tables_cache|= args[i]->used_tables(); + } + + used_tables_cache&= PSEUDO_TABLE_BITS; + + /* the aggregate function is aggregated into its local context */ + used_tables_cache |= (1 << aggr_sel->join->tables) - 1; + } +} + + String * Item_sum_num::val_str(String *str) { @@ -488,7 +519,7 @@ Item_sum_num::fix_fields(THD *thd, Item **ref) Item_sum_hybrid::Item_sum_hybrid(THD *thd, Item_sum_hybrid *item) :Item_sum(thd, item), value(item->value), hybrid_type(item->hybrid_type), hybrid_field_type(item->hybrid_field_type), cmp_sign(item->cmp_sign), - used_table_cache(item->used_table_cache), was_values(item->was_values) + was_values(item->was_values) { /* copy results from old value */ switch (hybrid_type) { @@ -1059,14 +1090,8 @@ void Item_sum_count::clear() bool Item_sum_count::add() { - if (!args[0]->maybe_null) + if (!args[0]->maybe_null || !args[0]->is_null()) count++; - else - { - args[0]->update_null_value(); - if (!args[0]->null_value) - count++; - } return 0; } @@ -1082,7 +1107,6 @@ void Item_sum_count::cleanup() DBUG_ENTER("Item_sum_count::cleanup"); count= 0; Item_sum_int::cleanup(); - used_table_cache= ~(table_map) 0; DBUG_VOID_RETURN; } @@ -1105,8 +1129,10 @@ void Item_sum_avg::fix_length_and_dec() f_scale= args[0]->decimals; dec_bin_size= my_decimal_get_binary_size(f_precision, f_scale); } - else + else { decimals= min(args[0]->decimals + prec_increment, NOT_FIXED_DEC); + max_length= args[0]->max_length + prec_increment; + } } @@ -1572,7 +1598,7 @@ void Item_sum_hybrid::cleanup() { DBUG_ENTER("Item_sum_hybrid::cleanup"); Item_sum::cleanup(); - used_table_cache= ~(table_map) 0; + forced_const= FALSE; /* by default it is TRUE to avoid TRUE reporting by @@ -1908,14 +1934,8 @@ void Item_sum_count::reset_field() char *res=result_field->ptr; longlong nr=0; - if (!args[0]->maybe_null) + if (!args[0]->maybe_null || !args[0]->is_null()) nr=1; - else - { - args[0]->update_null_value(); - if (!args[0]->null_value) - nr=1; - } int8store(res,nr); } @@ -2018,14 +2038,8 @@ void Item_sum_count::update_field() char *res=result_field->ptr; nr=sint8korr(res); - if (!args[0]->maybe_null) + if (!args[0]->maybe_null || !args[0]->is_null()) nr++; - else - { - args[0]->update_null_value(); - if (!args[0]->null_value) - nr++; - } int8store(res,nr); } @@ -2449,12 +2463,8 @@ bool Item_sum_count_distinct::setup(THD *thd) Item *item=args[i]; if (list.push_back(item)) return TRUE; // End of memory - if (item->const_item()) - { - item->update_null_value(); - if (item->null_value) - always_null=1; - } + if (item->const_item() && item->is_null()) + always_null= 1; } if (always_null) return FALSE; diff --git a/sql/item_sum.h b/sql/item_sum.h index 4cf16fc79af..5cf4f93af0e 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -215,7 +215,9 @@ TODO: to catch queries where the limit is exceeded to make the code clean here. -*/ +*/ + +class st_select_lex; class Item_sum :public Item_result_field { @@ -231,25 +233,32 @@ public: Item_sum *next; /* next in the circular chain of registered objects */ uint arg_count; Item_sum *in_sum_func; /* embedding set function if any */ + st_select_lex * aggr_sel; /* select where the function is aggregated */ int8 nest_level; /* number of the nesting level of the set function */ int8 aggr_level; /* nesting level of the aggregating subquery */ int8 max_arg_level; /* max level of unbound column references */ int8 max_sum_func_level;/* max level of aggregation for embedded functions */ bool quick_group; /* If incremental update of fields */ +protected: + table_map used_tables_cache; + bool forced_const; + +public: + void mark_as_sum_func(); - Item_sum() :arg_count(0), quick_group(1) + Item_sum() :arg_count(0), quick_group(1), forced_const(FALSE) { mark_as_sum_func(); } - Item_sum(Item *a) - :args(tmp_args), arg_count(1), quick_group(1) + Item_sum(Item *a) :args(tmp_args), arg_count(1), quick_group(1), + forced_const(FALSE) { args[0]=a; mark_as_sum_func(); } - Item_sum( Item *a, Item *b ) - :args(tmp_args), arg_count(2), quick_group(1) + Item_sum( Item *a, Item *b ) :args(tmp_args), arg_count(2), quick_group(1), + forced_const(FALSE) { args[0]=a; args[1]=b; mark_as_sum_func(); @@ -319,10 +328,20 @@ public: virtual const char *func_name() const= 0; virtual Item *result_item(Field *field) { return new Item_field(field); } - table_map used_tables() const { return ~(table_map) 0; } /* Not used */ - bool const_item() const { return 0; } + table_map used_tables() const { return used_tables_cache; } + void update_used_tables (); + void cleanup() + { + Item::cleanup(); + forced_const= FALSE; + } bool is_null() { return null_value; } - void update_used_tables() { } + void make_const () + { + used_tables_cache= 0; + forced_const= TRUE; + } + virtual bool const_item() const { return forced_const; } void make_field(Send_field *field); void print(String *str); void fix_num_length_and_dec(); @@ -346,6 +365,8 @@ public: bool init_sum_func_check(THD *thd); bool check_sum_func(THD *thd, Item **ref); bool register_sum_func(THD *thd, Item **ref); + st_select_lex *depended_from() + { return (nest_level == aggr_level ? 0 : aggr_sel); } }; @@ -509,23 +530,23 @@ public: class Item_sum_count :public Item_sum_int { longlong count; - table_map used_table_cache; public: Item_sum_count(Item *item_par) - :Item_sum_int(item_par),count(0),used_table_cache(~(table_map) 0) + :Item_sum_int(item_par),count(0) {} Item_sum_count(THD *thd, Item_sum_count *item) - :Item_sum_int(thd, item), count(item->count), - used_table_cache(item->used_table_cache) + :Item_sum_int(thd, item), count(item->count) {} - table_map used_tables() const { return used_table_cache; } - bool const_item() const { return !used_table_cache; } enum Sumfunctype sum_func () const { return COUNT_FUNC; } void clear(); void no_rows_in_result() { count=0; } bool add(); - void make_const(longlong count_arg) { count=count_arg; used_table_cache=0; } + void make_const(longlong count_arg) + { + count=count_arg; + Item_sum::make_const(); + } longlong val_int(); void reset_field(); void cleanup(); @@ -805,28 +826,22 @@ protected: Item_result hybrid_type; enum_field_types hybrid_field_type; int cmp_sign; - table_map used_table_cache; bool was_values; // Set if we have found at least one row (for max/min only) public: Item_sum_hybrid(Item *item_par,int sign) :Item_sum(item_par), sum(0.0), sum_int(0), hybrid_type(INT_RESULT), hybrid_field_type(MYSQL_TYPE_LONGLONG), - cmp_sign(sign), used_table_cache(~(table_map) 0), - was_values(TRUE) + cmp_sign(sign), was_values(TRUE) { collation.set(&my_charset_bin); } Item_sum_hybrid(THD *thd, Item_sum_hybrid *item); bool fix_fields(THD *, Item **); - table_map used_tables() const { return used_table_cache; } - bool const_item() const { return !used_table_cache; } - void clear(); double val_real(); longlong val_int(); my_decimal *val_decimal(my_decimal *); void reset_field(); String *val_str(String *); - void make_const() { used_table_cache=0; } bool keep_field_type(void) const { return 1; } enum Item_result result_type () const { return hybrid_type; } enum enum_field_types field_type() const { return hybrid_field_type; } diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 1bf1fec5cfb..4d6ca2a9b3e 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -46,7 +46,7 @@ the microseconds twice. */ -static bool make_datetime(date_time_format_types format, TIME *ltime, +static bool make_datetime(date_time_format_types format, MYSQL_TIME *ltime, String *str) { char *buff; @@ -95,7 +95,7 @@ static bool make_datetime(date_time_format_types format, TIME *ltime, /* - Wrapper over make_datetime() with validation of the input TIME value + Wrapper over make_datetime() with validation of the input MYSQL_TIME value NOTE see make_datetime() for more information @@ -105,7 +105,7 @@ static bool make_datetime(date_time_format_types format, TIME *ltime, 0 otherwise */ -static bool make_datetime_with_warn(date_time_format_types format, TIME *ltime, +static bool make_datetime_with_warn(date_time_format_types format, MYSQL_TIME *ltime, String *str) { int warning= 0; @@ -117,14 +117,15 @@ static bool make_datetime_with_warn(date_time_format_types format, TIME *ltime, if (!warning) return 0; - make_truncated_value_warning(current_thd, str->ptr(), str->length(), + make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + str->ptr(), str->length(), MYSQL_TIMESTAMP_TIME, NullS); return make_datetime(format, ltime, str); } /* - Wrapper over make_time() with validation of the input TIME value + Wrapper over make_time() with validation of the input MYSQL_TIME value NOTE see make_time() for more info @@ -135,7 +136,7 @@ static bool make_datetime_with_warn(date_time_format_types format, TIME *ltime, */ static bool make_time_with_warn(const DATE_TIME_FORMAT *format, - TIME *l_time, String *str) + MYSQL_TIME *l_time, String *str) { int warning= 0; make_time(format, l_time, str); @@ -143,7 +144,8 @@ static bool make_time_with_warn(const DATE_TIME_FORMAT *format, return 1; if (warning) { - make_truncated_value_warning(current_thd, str->ptr(), str->length(), + make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + str->ptr(), str->length(), MYSQL_TIMESTAMP_TIME, NullS); make_time(format, l_time, str); } @@ -153,16 +155,16 @@ static bool make_time_with_warn(const DATE_TIME_FORMAT *format, /* - Convert seconds to TIME value with overflow checking + Convert seconds to MYSQL_TIME value with overflow checking SYNOPSIS: sec_to_time() seconds number of seconds unsigned_flag 1, if 'seconds' is unsigned, 0, otherwise - ltime output TIME value + ltime output MYSQL_TIME value DESCRIPTION - If the 'seconds' argument is inside TIME data range, convert it to a + If the 'seconds' argument is inside MYSQL_TIME data range, convert it to a corresponding value. Otherwise, truncate the resulting value to the nearest endpoint, and produce a warning message. @@ -172,7 +174,7 @@ static bool make_time_with_warn(const DATE_TIME_FORMAT *format, 0 otherwise */ -static bool sec_to_time(longlong seconds, bool unsigned_flag, TIME *ltime) +static bool sec_to_time(longlong seconds, bool unsigned_flag, MYSQL_TIME *ltime) { uint sec; @@ -205,7 +207,8 @@ overflow: char buf[22]; int len= (int)(longlong10_to_str(seconds, buf, unsigned_flag ? 10 : -10) - buf); - make_truncated_value_warning(current_thd, buf, len, MYSQL_TIMESTAMP_TIME, + make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + buf, len, MYSQL_TIMESTAMP_TIME, NullS); return 1; @@ -224,7 +227,7 @@ static DATE_TIME_FORMAT time_24hrs_format= {{0}, '\0', 0, {(char *)"%H:%i:%S", 8}}; /* - Extract datetime value to TIME struct from string value + Extract datetime value to MYSQL_TIME struct from string value according to format string. SYNOPSIS @@ -257,7 +260,7 @@ static DATE_TIME_FORMAT time_24hrs_format= {{0}, '\0', 0, */ static bool extract_date_time(DATE_TIME_FORMAT *format, - const char *val, uint length, TIME *l_time, + const char *val, uint length, MYSQL_TIME *l_time, timestamp_type cached_timestamp_type, const char **sub_pattern_end, const char *date_time_type) @@ -305,13 +308,15 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, case 'Y': tmp= (char*) val + min(4, val_len); l_time->year= (int) my_strtoll10(val, &tmp, &error); + if ((int) (tmp-val) <= 2) + l_time->year= year_2000_handling(l_time->year); val= tmp; break; case 'y': tmp= (char*) val + min(2, val_len); l_time->year= (int) my_strtoll10(val, &tmp, &error); val= tmp; - l_time->year+= (l_time->year < YY_PART_YEAR ? 2000 : 1900); + l_time->year= year_2000_handling(l_time->year); break; /* Month */ @@ -514,7 +519,8 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, if (yearday > 0) { - uint days= calc_daynr(l_time->year,1,1) + yearday - 1; + uint days; + days= calc_daynr(l_time->year,1,1) + yearday - 1; if (days <= 0 || days > MAX_DAY_NUMBER) goto err; get_date_from_daynr(days,&l_time->year,&l_time->month,&l_time->day); @@ -576,7 +582,8 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, { if (!my_isspace(&my_charset_latin1,*val)) { - make_truncated_value_warning(current_thd, val_begin, length, + make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + val_begin, length, cached_timestamp_type, NullS); break; } @@ -600,7 +607,7 @@ err: Create a formated date/time value in a string */ -bool make_date_time(DATE_TIME_FORMAT *format, TIME *l_time, +bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, timestamp_type type, String *str) { char intbuff[15]; @@ -921,7 +928,7 @@ longlong Item_func_period_diff::val_int() longlong Item_func_to_days::val_int() { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; if (get_arg0_date(<ime, TIME_NO_ZERO_DATE)) return 0; return (longlong) calc_daynr(ltime.year,ltime.month,ltime.day); @@ -958,7 +965,7 @@ enum_monotonicity_info Item_func_to_days::get_monotonicity_info() const longlong Item_func_dayofyear::val_int() { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; if (get_arg0_date(<ime,TIME_NO_ZERO_DATE)) return 0; return (longlong) calc_daynr(ltime.year,ltime.month,ltime.day) - @@ -968,7 +975,7 @@ longlong Item_func_dayofyear::val_int() longlong Item_func_dayofmonth::val_int() { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; (void) get_arg0_date(<ime, TIME_FUZZY_DATE); return (longlong) ltime.day; } @@ -976,7 +983,7 @@ longlong Item_func_dayofmonth::val_int() longlong Item_func_month::val_int() { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; (void) get_arg0_date(<ime, TIME_FUZZY_DATE); return (longlong) ltime.month; } @@ -1006,7 +1013,7 @@ String* Item_func_monthname::val_str(String* str) longlong Item_func_quarter::val_int() { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; if (get_arg0_date(<ime, TIME_FUZZY_DATE)) return 0; return (longlong) ((ltime.month+2)/3); @@ -1015,7 +1022,7 @@ longlong Item_func_quarter::val_int() longlong Item_func_hour::val_int() { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; (void) get_arg0_time(<ime); return ltime.hour; } @@ -1023,7 +1030,7 @@ longlong Item_func_hour::val_int() longlong Item_func_minute::val_int() { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; (void) get_arg0_time(<ime); return ltime.minute; } @@ -1032,7 +1039,7 @@ longlong Item_func_minute::val_int() longlong Item_func_second::val_int() { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; (void) get_arg0_time(<ime); return ltime.second; } @@ -1079,7 +1086,7 @@ longlong Item_func_week::val_int() { DBUG_ASSERT(fixed == 1); uint year; - TIME ltime; + MYSQL_TIME ltime; if (get_arg0_date(<ime, TIME_NO_ZERO_DATE)) return 0; return (longlong) calc_week(<ime, @@ -1092,7 +1099,7 @@ longlong Item_func_yearweek::val_int() { DBUG_ASSERT(fixed == 1); uint year,week; - TIME ltime; + MYSQL_TIME ltime; if (get_arg0_date(<ime, TIME_NO_ZERO_DATE)) return 0; week= calc_week(<ime, @@ -1105,7 +1112,7 @@ longlong Item_func_yearweek::val_int() longlong Item_func_weekday::val_int() { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; if (get_arg0_date(<ime, TIME_NO_ZERO_DATE)) return 0; @@ -1135,7 +1142,7 @@ String* Item_func_dayname::val_str(String* str) longlong Item_func_year::val_int() { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; (void) get_arg0_date(<ime, TIME_FUZZY_DATE); return (longlong) ltime.year; } @@ -1166,7 +1173,7 @@ enum_monotonicity_info Item_func_year::get_monotonicity_info() const longlong Item_func_unix_timestamp::val_int() { - TIME ltime; + MYSQL_TIME ltime; my_bool not_used; DBUG_ASSERT(fixed == 1); @@ -1197,7 +1204,7 @@ longlong Item_func_unix_timestamp::val_int() longlong Item_func_time_to_sec::val_int() { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; longlong seconds; (void) get_arg0_time(<ime); seconds=ltime.hour*3600L+ltime.minute*60+ltime.second; @@ -1369,7 +1376,7 @@ bool get_interval_value(Item *args,interval_type int_type, String *Item_date::val_str(String *str) { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; if (get_date(<ime, TIME_FUZZY_DATE)) return (String *) 0; if (str->alloc(11)) @@ -1385,19 +1392,19 @@ String *Item_date::val_str(String *str) longlong Item_date::val_int() { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; if (get_date(<ime, TIME_FUZZY_DATE)) return 0; return (longlong) (ltime.year*10000L+ltime.month*100+ltime.day); } -bool Item_func_from_days::get_date(TIME *ltime, uint fuzzy_date) +bool Item_func_from_days::get_date(MYSQL_TIME *ltime, uint fuzzy_date) { longlong value=args[0]->val_int(); if ((null_value=args[0]->null_value)) return 1; - bzero(ltime, sizeof(TIME)); + bzero(ltime, sizeof(MYSQL_TIME)); get_date_from_daynr((long) value, <ime->year, <ime->month, <ime->day); ltime->time_type= MYSQL_TIMESTAMP_DATE; return 0; @@ -1431,10 +1438,10 @@ String *Item_func_curdate::val_str(String *str) } /* - Converts current time in my_time_t to TIME represenatation for local + Converts current time in my_time_t to MYSQL_TIME represenatation for local time zone. Defines time zone (local) used for whole CURDATE function. */ -void Item_func_curdate_local::store_now_in_TIME(TIME *now_time) +void Item_func_curdate_local::store_now_in_TIME(MYSQL_TIME *now_time) { THD *thd= current_thd; thd->variables.time_zone->gmt_sec_to_TIME(now_time, @@ -1444,10 +1451,10 @@ void Item_func_curdate_local::store_now_in_TIME(TIME *now_time) /* - Converts current time in my_time_t to TIME represenatation for UTC + Converts current time in my_time_t to MYSQL_TIME represenatation for UTC time zone. Defines time zone (UTC) used for whole UTC_DATE function. */ -void Item_func_curdate_utc::store_now_in_TIME(TIME *now_time) +void Item_func_curdate_utc::store_now_in_TIME(MYSQL_TIME *now_time) { my_tz_UTC->gmt_sec_to_TIME(now_time, (my_time_t)(current_thd->query_start())); @@ -1458,7 +1465,7 @@ void Item_func_curdate_utc::store_now_in_TIME(TIME *now_time) } -bool Item_func_curdate::get_date(TIME *res, +bool Item_func_curdate::get_date(MYSQL_TIME *res, uint fuzzy_date __attribute__((unused))) { *res=ltime; @@ -1476,7 +1483,7 @@ String *Item_func_curtime::val_str(String *str) void Item_func_curtime::fix_length_and_dec() { - TIME ltime; + MYSQL_TIME ltime; decimals= DATETIME_DEC; collation.set(&my_charset_bin); @@ -1488,10 +1495,10 @@ void Item_func_curtime::fix_length_and_dec() /* - Converts current time in my_time_t to TIME represenatation for local + Converts current time in my_time_t to MYSQL_TIME represenatation for local time zone. Defines time zone (local) used for whole CURTIME function. */ -void Item_func_curtime_local::store_now_in_TIME(TIME *now_time) +void Item_func_curtime_local::store_now_in_TIME(MYSQL_TIME *now_time) { THD *thd= current_thd; thd->variables.time_zone->gmt_sec_to_TIME(now_time, @@ -1501,10 +1508,10 @@ void Item_func_curtime_local::store_now_in_TIME(TIME *now_time) /* - Converts current time in my_time_t to TIME represenatation for UTC + Converts current time in my_time_t to MYSQL_TIME represenatation for UTC time zone. Defines time zone (UTC) used for whole UTC_TIME function. */ -void Item_func_curtime_utc::store_now_in_TIME(TIME *now_time) +void Item_func_curtime_utc::store_now_in_TIME(MYSQL_TIME *now_time) { my_tz_UTC->gmt_sec_to_TIME(now_time, (my_time_t)(current_thd->query_start())); @@ -1537,10 +1544,10 @@ void Item_func_now::fix_length_and_dec() /* - Converts current time in my_time_t to TIME represenatation for local + Converts current time in my_time_t to MYSQL_TIME represenatation for local time zone. Defines time zone (local) used for whole NOW function. */ -void Item_func_now_local::store_now_in_TIME(TIME *now_time) +void Item_func_now_local::store_now_in_TIME(MYSQL_TIME *now_time) { THD *thd= current_thd; thd->variables.time_zone->gmt_sec_to_TIME(now_time, @@ -1550,10 +1557,10 @@ void Item_func_now_local::store_now_in_TIME(TIME *now_time) /* - Converts current time in my_time_t to TIME represenatation for UTC + Converts current time in my_time_t to MYSQL_TIME represenatation for UTC time zone. Defines time zone (UTC) used for whole UTC_TIMESTAMP function. */ -void Item_func_now_utc::store_now_in_TIME(TIME *now_time) +void Item_func_now_utc::store_now_in_TIME(MYSQL_TIME *now_time) { my_tz_UTC->gmt_sec_to_TIME(now_time, (my_time_t)(current_thd->query_start())); @@ -1564,7 +1571,7 @@ void Item_func_now_utc::store_now_in_TIME(TIME *now_time) } -bool Item_func_now::get_date(TIME *res, +bool Item_func_now::get_date(MYSQL_TIME *res, uint fuzzy_date __attribute__((unused))) { *res= ltime; @@ -1581,10 +1588,10 @@ int Item_func_now::save_in_field(Field *to, bool no_conversions) /* - Converts current time in my_time_t to TIME represenatation for local + Converts current time in my_time_t to MYSQL_TIME represenatation for local time zone. Defines time zone (local) used for whole SYSDATE function. */ -void Item_func_sysdate_local::store_now_in_TIME(TIME *now_time) +void Item_func_sysdate_local::store_now_in_TIME(MYSQL_TIME *now_time) { THD *thd= current_thd; thd->variables.time_zone->gmt_sec_to_TIME(now_time, (my_time_t) time(NULL)); @@ -1626,7 +1633,7 @@ void Item_func_sysdate_local::fix_length_and_dec() } -bool Item_func_sysdate_local::get_date(TIME *res, +bool Item_func_sysdate_local::get_date(MYSQL_TIME *res, uint fuzzy_date __attribute__((unused))) { store_now_in_TIME(<ime); @@ -1647,7 +1654,7 @@ int Item_func_sysdate_local::save_in_field(Field *to, bool no_conversions) String *Item_func_sec_to_time::val_str(String *str) { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; longlong arg_val= args[0]->val_int(); if ((null_value=args[0]->null_value) || str->alloc(19)) @@ -1666,7 +1673,7 @@ String *Item_func_sec_to_time::val_str(String *str) longlong Item_func_sec_to_time::val_int() { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; longlong arg_val= args[0]->val_int(); if ((null_value=args[0]->null_value)) @@ -1808,7 +1815,7 @@ uint Item_func_date_format::format_length(const String *format) String *Item_func_date_format::val_str(String *str) { String *format; - TIME l_time; + MYSQL_TIME l_time; uint size; DBUG_ASSERT(fixed == 1); @@ -1871,7 +1878,7 @@ void Item_func_from_unixtime::fix_length_and_dec() String *Item_func_from_unixtime::val_str(String *str) { - TIME time_tmp; + MYSQL_TIME time_tmp; DBUG_ASSERT(fixed == 1); @@ -1891,7 +1898,7 @@ String *Item_func_from_unixtime::val_str(String *str) longlong Item_func_from_unixtime::val_int() { - TIME time_tmp; + MYSQL_TIME time_tmp; DBUG_ASSERT(fixed == 1); @@ -1901,7 +1908,7 @@ longlong Item_func_from_unixtime::val_int() return (longlong) TIME_to_ulonglong_datetime(&time_tmp); } -bool Item_func_from_unixtime::get_date(TIME *ltime, +bool Item_func_from_unixtime::get_date(MYSQL_TIME *ltime, uint fuzzy_date __attribute__((unused))) { ulonglong tmp= (ulonglong)(args[0]->val_int()); @@ -1927,22 +1934,9 @@ void Item_func_convert_tz::fix_length_and_dec() } -bool -Item_func_convert_tz::fix_fields(THD *thd_arg, Item **ref) -{ - String str; - if (Item_date_func::fix_fields(thd_arg, ref)) - return TRUE; - - tz_tables= thd_arg->lex->time_zone_tables_used; - - return FALSE; -} - - String *Item_func_convert_tz::val_str(String *str) { - TIME time_tmp; + MYSQL_TIME time_tmp; if (get_date(&time_tmp, 0)) return 0; @@ -1960,7 +1954,7 @@ String *Item_func_convert_tz::val_str(String *str) longlong Item_func_convert_tz::val_int() { - TIME time_tmp; + MYSQL_TIME time_tmp; if (get_date(&time_tmp, 0)) return 0; @@ -1969,21 +1963,22 @@ longlong Item_func_convert_tz::val_int() } -bool Item_func_convert_tz::get_date(TIME *ltime, +bool Item_func_convert_tz::get_date(MYSQL_TIME *ltime, uint fuzzy_date __attribute__((unused))) { my_time_t my_time_tmp; String str; + THD *thd= current_thd; if (!from_tz_cached) { - from_tz= my_tz_find(args[1]->val_str(&str), tz_tables); + from_tz= my_tz_find(thd, args[1]->val_str(&str)); from_tz_cached= args[1]->const_item(); } if (!to_tz_cached) { - to_tz= my_tz_find(args[2]->val_str(&str), tz_tables); + to_tz= my_tz_find(thd, args[2]->val_str(&str)); to_tz_cached= args[2]->const_item(); } @@ -2030,7 +2025,7 @@ void Item_date_add_interval::fix_length_and_dec() - If first arg is a MYSQL_TYPE_DATE and the interval type uses hours, minutes or seconds then type is MYSQL_TYPE_DATETIME. - Otherwise the result is MYSQL_TYPE_STRING - (This is because you can't know if the string contains a DATE, TIME or + (This is because you can't know if the string contains a DATE, MYSQL_TIME or DATETIME argument) */ cached_field_type= MYSQL_TYPE_STRING; @@ -2050,7 +2045,7 @@ void Item_date_add_interval::fix_length_and_dec() /* Here arg[1] is a Item_interval object */ -bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date) +bool Item_date_add_interval::get_date(MYSQL_TIME *ltime, uint fuzzy_date) { INTERVAL interval; @@ -2060,8 +2055,6 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date) if (date_sub_interval) interval.neg = !interval.neg; - if (ltime->year < YY_MAGIC_BELOW) - return (null_value=1); return (null_value= date_add_interval(ltime, int_type, interval)); } @@ -2070,7 +2063,7 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date) String *Item_date_add_interval::val_str(String *str) { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; enum date_time_format_types format; if (Item_date_add_interval::get_date(<ime, TIME_NO_ZERO_DATE)) @@ -2094,7 +2087,7 @@ String *Item_date_add_interval::val_str(String *str) longlong Item_date_add_interval::val_int() { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; longlong date; if (Item_date_add_interval::get_date(<ime, TIME_NO_ZERO_DATE)) return (longlong) 0; @@ -2184,7 +2177,7 @@ void Item_extract::fix_length_and_dec() longlong Item_extract::val_int() { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; uint year; ulong week_format; long neg; @@ -2437,7 +2430,7 @@ void Item_char_typecast::fix_length_and_dec() String *Item_datetime_typecast::val_str(String *str) { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; if (!get_arg0_date(<ime, TIME_FUZZY_DATE) && !make_datetime(ltime.second_part ? DATE_TIME_MICROSECOND : DATE_TIME, <ime, str)) @@ -2451,7 +2444,7 @@ String *Item_datetime_typecast::val_str(String *str) longlong Item_datetime_typecast::val_int() { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; if (get_arg0_date(<ime,1)) { null_value= 1; @@ -2462,7 +2455,7 @@ longlong Item_datetime_typecast::val_int() } -bool Item_time_typecast::get_time(TIME *ltime) +bool Item_time_typecast::get_time(MYSQL_TIME *ltime) { bool res= get_arg0_time(ltime); /* @@ -2478,7 +2471,7 @@ bool Item_time_typecast::get_time(TIME *ltime) longlong Item_time_typecast::val_int() { - TIME ltime; + MYSQL_TIME ltime; if (get_time(<ime)) { null_value= 1; @@ -2490,7 +2483,7 @@ longlong Item_time_typecast::val_int() String *Item_time_typecast::val_str(String *str) { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; if (!get_arg0_time(<ime) && !make_datetime(ltime.second_part ? TIME_MICROSECOND : TIME_ONLY, @@ -2502,7 +2495,7 @@ String *Item_time_typecast::val_str(String *str) } -bool Item_date_typecast::get_date(TIME *ltime, uint fuzzy_date) +bool Item_date_typecast::get_date(MYSQL_TIME *ltime, uint fuzzy_date) { bool res= get_arg0_date(ltime, TIME_FUZZY_DATE); ltime->hour= ltime->minute= ltime->second= ltime->second_part= 0; @@ -2514,7 +2507,7 @@ bool Item_date_typecast::get_date(TIME *ltime, uint fuzzy_date) String *Item_date_typecast::val_str(String *str) { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; if (!get_arg0_date(<ime, TIME_FUZZY_DATE) && !str->alloc(11)) { @@ -2529,7 +2522,7 @@ String *Item_date_typecast::val_str(String *str) longlong Item_date_typecast::val_int() { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; if (args[0]->get_date(<ime, TIME_FUZZY_DATE)) { null_value= 1; @@ -2541,21 +2534,29 @@ longlong Item_date_typecast::val_int() /* MAKEDATE(a,b) is a date function that creates a date value from a year and day value. + + NOTES: + As arguments are integers, we can't know if the year is a 2 digit or 4 digit year. + In this case we treat all years < 100 as 2 digit years. Ie, this is not safe + for dates between 0000-01-01 and 0099-12-31 */ String *Item_func_makedate::val_str(String *str) { DBUG_ASSERT(fixed == 1); - TIME l_time; + MYSQL_TIME l_time; long daynr= (long) args[1]->val_int(); - long yearnr= (long) args[0]->val_int(); + long year= (long) args[0]->val_int(); long days; if (args[0]->null_value || args[1]->null_value || - yearnr < 0 || daynr <= 0) + year < 0 || daynr <= 0) goto err; - days= calc_daynr(yearnr,1,1) + daynr - 1; + if (year < 100) + year= year_2000_handling(year); + + days= calc_daynr(year,1,1) + daynr - 1; /* Day number from year 0 to 9999-12-31 */ if (days >= 0 && days <= MAX_DAY_NUMBER) { @@ -2573,19 +2574,32 @@ err: } +/* + MAKEDATE(a,b) is a date function that creates a date value + from a year and day value. + + NOTES: + As arguments are integers, we can't know if the year is a 2 digit or 4 digit year. + In this case we treat all years < 100 as 2 digit years. Ie, this is not safe + for dates between 0000-01-01 and 0099-12-31 +*/ + longlong Item_func_makedate::val_int() { DBUG_ASSERT(fixed == 1); - TIME l_time; + MYSQL_TIME l_time; long daynr= (long) args[1]->val_int(); - long yearnr= (long) args[0]->val_int(); + long year= (long) args[0]->val_int(); long days; if (args[0]->null_value || args[1]->null_value || - yearnr < 0 || daynr <= 0) + year < 0 || daynr <= 0) goto err; - days= calc_daynr(yearnr,1,1) + daynr - 1; + if (year < 100) + year= year_2000_handling(year); + + days= calc_daynr(year,1,1) + daynr - 1; /* Day number from year 0 to 9999-12-31 */ if (days >= 0 && days < MAX_DAY_NUMBER) { @@ -2640,7 +2654,7 @@ void Item_func_add_time::fix_length_and_dec() String *Item_func_add_time::val_str(String *str) { DBUG_ASSERT(fixed == 1); - TIME l_time1, l_time2, l_time3; + MYSQL_TIME l_time1, l_time2, l_time3; bool is_time= 0; long days, microseconds; longlong seconds; @@ -2742,7 +2756,7 @@ String *Item_func_timediff::val_str(String *str) longlong seconds; long microseconds; int l_sign= 1; - TIME l_time1 ,l_time2, l_time3; + MYSQL_TIME l_time1 ,l_time2, l_time3; null_value= 0; if (args[0]->get_time(&l_time1) || @@ -2787,7 +2801,7 @@ null_date: String *Item_func_maketime::val_str(String *str) { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; bool overflow= 0; longlong hour= args[0]->val_int(); @@ -2831,7 +2845,8 @@ String *Item_func_maketime::val_str(String *str) char *ptr= longlong10_to_str(hour, buf, args[0]->unsigned_flag ? 10 : -10); int len = (int)(ptr - buf) + my_sprintf(ptr, (ptr, ":%02u:%02u", (uint)minute, (uint)second)); - make_truncated_value_warning(current_thd, buf, len, MYSQL_TIMESTAMP_TIME, + make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + buf, len, MYSQL_TIMESTAMP_TIME, NullS); } @@ -2855,7 +2870,7 @@ String *Item_func_maketime::val_str(String *str) longlong Item_func_microsecond::val_int() { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; if (!get_arg0_time(<ime)) return ltime.second_part; return 0; @@ -2864,7 +2879,7 @@ longlong Item_func_microsecond::val_int() longlong Item_func_timestamp_diff::val_int() { - TIME ltime1, ltime2; + MYSQL_TIME ltime1, ltime2; longlong seconds; long microseconds; long months= 0; @@ -3170,7 +3185,7 @@ void Item_func_str_to_date::fix_length_and_dec() } } -bool Item_func_str_to_date::get_date(TIME *ltime, uint fuzzy_date) +bool Item_func_str_to_date::get_date(MYSQL_TIME *ltime, uint fuzzy_date) { DATE_TIME_FORMAT date_time_format; char val_buff[64], format_buff[64]; @@ -3211,7 +3226,7 @@ null_date: String *Item_func_str_to_date::val_str(String *str) { DBUG_ASSERT(fixed == 1); - TIME ltime; + MYSQL_TIME ltime; if (Item_func_str_to_date::get_date(<ime, TIME_FUZZY_DATE)) return 0; @@ -3224,7 +3239,7 @@ String *Item_func_str_to_date::val_str(String *str) } -bool Item_func_last_day::get_date(TIME *ltime, uint fuzzy_date) +bool Item_func_last_day::get_date(MYSQL_TIME *ltime, uint fuzzy_date) { if (get_arg0_date(ltime, fuzzy_date & ~TIME_FUZZY_DATE) || (ltime->month == 0)) diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index e53826ce3df..992b79753ca 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -351,7 +351,7 @@ public: enum_field_types field_type() const { return MYSQL_TYPE_DATE; } String *val_str(String *str); longlong val_int(); - double val_real() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } + double val_real() { return val_real_from_decimal(); } const char *func_name() const { return "date"; } void fix_length_and_dec() { @@ -389,6 +389,7 @@ public: return tmp_table_field_from_field_type(table, 0); } bool result_as_longlong() { return TRUE; } + double val_real() { return (double) val_int(); } my_decimal *val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); @@ -411,13 +412,14 @@ public: enum_field_types field_type() const { return MYSQL_TYPE_TIME; } void fix_length_and_dec() { - decimals=0; + decimals= DATETIME_DEC; max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; } Field *tmp_table_field(TABLE *table) { return tmp_table_field_from_field_type(table, 0); } + double val_real() { return val_real_from_decimal(); } my_decimal *val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); @@ -447,9 +449,9 @@ public: /* Abstract method that defines which time zone is used for conversion. Converts time current time in my_time_t representation to broken-down - TIME representation using UTC-SYSTEM or per-thread time zone. + MYSQL_TIME representation using UTC-SYSTEM or per-thread time zone. */ - virtual void store_now_in_TIME(TIME *now_time)=0; + virtual void store_now_in_TIME(MYSQL_TIME *now_time)=0; bool result_as_longlong() { return TRUE; } }; @@ -460,7 +462,7 @@ public: Item_func_curtime_local() :Item_func_curtime() {} Item_func_curtime_local(Item *a) :Item_func_curtime(a) {} const char *func_name() const { return "curtime"; } - virtual void store_now_in_TIME(TIME *now_time); + virtual void store_now_in_TIME(MYSQL_TIME *now_time); }; @@ -470,7 +472,7 @@ public: Item_func_curtime_utc() :Item_func_curtime() {} Item_func_curtime_utc(Item *a) :Item_func_curtime(a) {} const char *func_name() const { return "utc_time"; } - virtual void store_now_in_TIME(TIME *now_time); + virtual void store_now_in_TIME(MYSQL_TIME *now_time); }; @@ -479,14 +481,14 @@ public: class Item_func_curdate :public Item_date { longlong value; - TIME ltime; + MYSQL_TIME ltime; public: Item_func_curdate() :Item_date() {} longlong val_int() { DBUG_ASSERT(fixed == 1); return (value) ; } String *val_str(String *str); void fix_length_and_dec(); - bool get_date(TIME *res, uint fuzzy_date); - virtual void store_now_in_TIME(TIME *now_time)=0; + bool get_date(MYSQL_TIME *res, uint fuzzy_date); + virtual void store_now_in_TIME(MYSQL_TIME *now_time)=0; }; @@ -495,7 +497,7 @@ class Item_func_curdate_local :public Item_func_curdate public: Item_func_curdate_local() :Item_func_curdate() {} const char *func_name() const { return "curdate"; } - void store_now_in_TIME(TIME *now_time); + void store_now_in_TIME(MYSQL_TIME *now_time); }; @@ -504,7 +506,7 @@ class Item_func_curdate_utc :public Item_func_curdate public: Item_func_curdate_utc() :Item_func_curdate() {} const char *func_name() const { return "utc_date"; } - void store_now_in_TIME(TIME *now_time); + void store_now_in_TIME(MYSQL_TIME *now_time); }; @@ -516,18 +518,17 @@ protected: longlong value; char buff[20*2+32]; // +32 to make my_snprintf_{8bit|ucs2} happy uint buff_length; - TIME ltime; + MYSQL_TIME ltime; public: Item_func_now() :Item_date_func() {} Item_func_now(Item *a) :Item_date_func(a) {} enum Item_result result_type () const { return STRING_RESULT; } - double val_real() { DBUG_ASSERT(fixed == 1); return (double) value; } longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } int save_in_field(Field *to, bool no_conversions); String *val_str(String *str); void fix_length_and_dec(); - bool get_date(TIME *res, uint fuzzy_date); - virtual void store_now_in_TIME(TIME *now_time)=0; + bool get_date(MYSQL_TIME *res, uint fuzzy_date); + virtual void store_now_in_TIME(MYSQL_TIME *now_time)=0; }; @@ -537,7 +538,7 @@ public: Item_func_now_local() :Item_func_now() {} Item_func_now_local(Item *a) :Item_func_now(a) {} const char *func_name() const { return "now"; } - virtual void store_now_in_TIME(TIME *now_time); + virtual void store_now_in_TIME(MYSQL_TIME *now_time); virtual enum Functype functype() const { return NOW_FUNC; } }; @@ -548,7 +549,7 @@ public: Item_func_now_utc() :Item_func_now() {} Item_func_now_utc(Item *a) :Item_func_now(a) {} const char *func_name() const { return "utc_timestamp"; } - virtual void store_now_in_TIME(TIME *now_time); + virtual void store_now_in_TIME(MYSQL_TIME *now_time); }; @@ -563,13 +564,13 @@ public: Item_func_sysdate_local(Item *a) :Item_func_now(a) {} bool const_item() const { return 0; } const char *func_name() const { return "sysdate"; } - void store_now_in_TIME(TIME *now_time); + void store_now_in_TIME(MYSQL_TIME *now_time); double val_real(); longlong val_int(); int save_in_field(Field *to, bool no_conversions); String *val_str(String *str); void fix_length_and_dec(); - bool get_date(TIME *res, uint fuzzy_date); + bool get_date(MYSQL_TIME *res, uint fuzzy_date); void update_used_tables() { Item_func_now::update_used_tables(); @@ -583,7 +584,7 @@ class Item_func_from_days :public Item_date public: Item_func_from_days(Item *a) :Item_date(a) {} const char *func_name() const { return "from_days"; } - bool get_date(TIME *res, uint fuzzy_date); + bool get_date(MYSQL_TIME *res, uint fuzzy_date); bool check_partition_func_processor(byte *int_arg) {return FALSE;} }; @@ -611,16 +612,11 @@ class Item_func_from_unixtime :public Item_date_func THD *thd; public: Item_func_from_unixtime(Item *a) :Item_date_func(a) {} - double val_real() - { - DBUG_ASSERT(fixed == 1); - return (double) Item_func_from_unixtime::val_int(); - } longlong val_int(); String *val_str(String *str); const char *func_name() const { return "from_unixtime"; } void fix_length_and_dec(); - bool get_date(TIME *res, uint fuzzy_date); + bool get_date(MYSQL_TIME *res, uint fuzzy_date); bool check_partition_func_processor(byte *int_arg) {return FALSE;} }; @@ -641,8 +637,6 @@ class Time_zone; */ class Item_func_convert_tz :public Item_date_func { - /* Cached pointer to list of pre-opened time zone tables. */ - TABLE_LIST *tz_tables; /* If time zone parameters are constants we are caching objects that represent them (we use separate from_tz_cached/to_tz_cached members @@ -655,12 +649,10 @@ class Item_func_convert_tz :public Item_date_func Item_func_convert_tz(Item *a, Item *b, Item *c): Item_date_func(a, b, c), from_tz_cached(0), to_tz_cached(0) {} longlong val_int(); - double val_real() { return (double) val_int(); } String *val_str(String *str); const char *func_name() const { return "convert_tz"; } - bool fix_fields(THD *, Item **); void fix_length_and_dec(); - bool get_date(TIME *res, uint fuzzy_date); + bool get_date(MYSQL_TIME *res, uint fuzzy_date); void cleanup(); }; @@ -681,7 +673,6 @@ public: Item_str_timefunc::fix_length_and_dec(); collation.set(&my_charset_bin); maybe_null=1; - decimals= DATETIME_DEC; } const char *func_name() const { return "sec_to_time"; } bool result_as_longlong() { return TRUE; } @@ -703,9 +694,8 @@ public: const char *func_name() const { return "date_add_interval"; } void fix_length_and_dec(); enum_field_types field_type() const { return cached_field_type; } - double val_real() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } longlong val_int(); - bool get_date(TIME *res, uint fuzzy_date); + bool get_date(MYSQL_TIME *res, uint fuzzy_date); bool eq(const Item *item, bool binary_cmp) const; void print(String *str); bool check_partition_func_processor(byte *int_arg) {return FALSE;} @@ -793,7 +783,7 @@ public: Item_date_typecast(Item *a) :Item_typecast_maybe_null(a) {} const char *func_name() const { return "cast_as_date"; } String *val_str(String *str); - bool get_date(TIME *ltime, uint fuzzy_date); + bool get_date(MYSQL_TIME *ltime, uint fuzzy_date); const char *cast_type() const { return "date"; } enum_field_types field_type() const { return MYSQL_TYPE_DATE; } Field *tmp_table_field(TABLE *table) @@ -808,6 +798,7 @@ public: } bool result_as_longlong() { return TRUE; } longlong val_int(); + double val_real() { return (double) val_int(); } my_decimal *val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); @@ -826,7 +817,7 @@ public: Item_time_typecast(Item *a) :Item_typecast_maybe_null(a) {} const char *func_name() const { return "cast_as_time"; } String *val_str(String *str); - bool get_time(TIME *ltime); + bool get_time(MYSQL_TIME *ltime); const char *cast_type() const { return "time"; } enum_field_types field_type() const { return MYSQL_TYPE_TIME; } Field *tmp_table_field(TABLE *table) @@ -835,6 +826,7 @@ public: } bool result_as_longlong() { return TRUE; } longlong val_int(); + double val_real() { return val_real_from_decimal(); } my_decimal *val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); @@ -859,8 +851,15 @@ public: { return tmp_table_field_from_field_type(table, 0); } + void fix_length_and_dec() + { + Item_typecast_maybe_null::fix_length_and_dec(); + decimals= DATETIME_DEC; + } bool result_as_longlong() { return TRUE; } longlong val_int(); + double val_real() { return val_real_from_decimal(); } + double val() { return (double) val_int(); } my_decimal *val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); @@ -909,6 +908,7 @@ public: void print(String *str); const char *func_name() const { return "add_time"; } bool check_partition_func_processor(byte *int_arg) {return FALSE;} + double val_real() { return val_real_from_decimal(); } my_decimal *val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); @@ -1020,7 +1020,7 @@ public: :Item_str_func(a, b) {} String *val_str(String *str); - bool get_date(TIME *ltime, uint fuzzy_date); + bool get_date(MYSQL_TIME *ltime, uint fuzzy_date); const char *func_name() const { return "str_to_date"; } enum_field_types field_type() const { return cached_field_type; } void fix_length_and_dec(); @@ -1037,5 +1037,5 @@ class Item_func_last_day :public Item_date public: Item_func_last_day(Item *a) :Item_date(a) {} const char *func_name() const { return "last_day"; } - bool get_date(TIME *res, uint fuzzy_date); + bool get_date(MYSQL_TIME *res, uint fuzzy_date); }; diff --git a/sql/key.cc b/sql/key.cc index bd614b10a70..faa7bf1f04b 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -29,6 +29,7 @@ field Field to search after key_length On partial match, contains length of fields before field + keypart key part # of a field NOTES Used when calculating key for NEXT_NUMBER @@ -45,7 +46,7 @@ */ int find_ref_key(KEY *key, uint key_count, byte *record, Field *field, - uint *key_length) + uint *key_length, uint *keypart) { reg2 int i; reg3 KEY *key_info; @@ -60,8 +61,8 @@ int find_ref_key(KEY *key, uint key_count, byte *record, Field *field, { if (key_info->key_part[0].offset == fieldpos) { /* Found key. Calc keylength */ - *key_length=0; - return(i); /* Use this key */ + *key_length= *keypart= 0; + return i; /* Use this key */ } } @@ -78,8 +79,11 @@ int find_ref_key(KEY *key, uint key_count, byte *record, Field *field, j++, key_part++) { if (key_part->offset == fieldpos) - return(i); /* Use this key */ - *key_length+=key_part->store_length; + { + *keypart= j; + return i; /* Use this key */ + } + *key_length+= key_part->store_length; } } return(-1); /* No key is ok */ diff --git a/sql/lex.h b/sql/lex.h index 45155da7692..e311379120d 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -312,6 +312,7 @@ static SYMBOL symbols[] = { { "MASTER_SSL_CERT", SYM(MASTER_SSL_CERT_SYM)}, { "MASTER_SSL_CIPHER",SYM(MASTER_SSL_CIPHER_SYM)}, { "MASTER_SSL_KEY", SYM(MASTER_SSL_KEY_SYM)}, + { "MASTER_SSL_VERIFY_SERVER_CERT", SYM(MASTER_SSL_VERIFY_SERVER_CERT_SYM)}, { "MASTER_USER", SYM(MASTER_USER_SYM)}, { "MATCH", SYM(MATCH)}, { "MAX_CONNECTIONS_PER_HOUR", SYM(MAX_CONNECTIONS_PER_HOUR)}, diff --git a/sql/lock.cc b/sql/lock.cc index a93033bfdd0..4427e57a938 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -582,7 +582,7 @@ TABLE_LIST *mysql_lock_have_duplicate(THD *thd, TABLE_LIST *needle, goto end; /* A temporary table does not have locks. */ - if (table->s->tmp_table == TMP_TABLE) + if (table->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE) goto end; /* Get command lock or LOCK TABLES lock. Maybe empty for INSERT DELAYED. */ @@ -607,7 +607,7 @@ TABLE_LIST *mysql_lock_have_duplicate(THD *thd, TABLE_LIST *needle, if (haystack->placeholder()) continue; table2= haystack->table; - if (table2->s->tmp_table == TMP_TABLE) + if (table2->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE) continue; /* All tables in list must be in lock. */ @@ -692,9 +692,10 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, DBUG_PRINT("info", ("count %d", count)); *write_lock_used=0; + uint system_count= 0; for (i=tables=lock_count=0 ; i < count ; i++) { - if (table_ptr[i]->s->tmp_table != TMP_TABLE) + if (table_ptr[i]->s->tmp_table != NON_TRANSACTIONAL_TMP_TABLE) { tables+=table_ptr[i]->file->lock_count(); lock_count++; @@ -705,7 +706,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, */ if (!table_ptr[i]-> file-> check_if_locking_is_allowed(thd->lex->sql_command, thd->lex->type, - table_ptr[i], count, + table_ptr[i], count, i, &system_count, (thd == logger.get_general_log_thd()) || (thd == logger.get_slow_log_thd()) || (thd == logger.get_privileged_thread()))) @@ -736,7 +737,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, TABLE *table; enum thr_lock_type lock_type; - if ((table=table_ptr[i])->s->tmp_table == TMP_TABLE) + if ((table=table_ptr[i])->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE) continue; lock_type= table->reginfo.lock_type; if (lock_type >= TL_WRITE_ALLOW_WRITE) diff --git a/sql/log.cc b/sql/log.cc index deb77890f35..94c744f3f93 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -20,6 +20,7 @@ #include "mysql_priv.h" #include "sql_repl.h" #include "rpl_filter.h" +#include "rpl_rli.h" #include <my_dir.h> #include <stdarg.h> @@ -68,7 +69,7 @@ char *make_default_log_name(char *buff,const char* log_ext) { strmake(buff, pidfile_name, FN_REFLEN-5); return fn_format(buff, buff, mysql_data_home, log_ext, - MYF(MY_UNPACK_FILENAME|MY_APPEND_EXT)); + MYF(MY_UNPACK_FILENAME|MY_REPLACE_EXT)); } /* @@ -569,11 +570,21 @@ bool Log_to_csv_event_handler:: if (query_start_arg) { + /* + A TIME field can not hold the full longlong range; query_time or + lock_time may be truncated without warning here, if greater than + 839 hours (~35 days) + */ + MYSQL_TIME t; + t.neg= 0; + /* fill in query_time field */ - if (table->field[2]->store(query_time, TRUE)) + calc_time_from_sec(&t, (long) min(query_time, (longlong) TIME_MAX_VALUE_SECONDS), 0); + if (table->field[2]->store_time(&t, MYSQL_TIMESTAMP_TIME)) goto err; /* lock_time */ - if (table->field[3]->store(lock_time, TRUE)) + calc_time_from_sec(&t, (long) min(lock_time, (longlong) TIME_MAX_VALUE_SECONDS), 0); + if (table->field[3]->store_time(&t, MYSQL_TIMESTAMP_TIME)) goto err; /* rows_sent */ if (table->field[4]->store((longlong) thd->sent_row_count, TRUE)) @@ -1548,7 +1559,7 @@ static int binlog_commit(handlerton *hton, THD *thd, bool all) (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; DBUG_ASSERT(mysql_bin_log.is_open()); - if (all && trx_data->empty()) + if (trx_data->empty()) { // we're here because trans_log was flushed in MYSQL_BIN_LOG::log_xid() trx_data->reset(); @@ -1587,8 +1598,7 @@ static int binlog_rollback(handlerton *hton, THD *thd, bool all) table. Such cases should be rare (updating a non-transactional table inside a transaction...) */ - if (unlikely(thd->options & (OPTION_STATUS_NO_TRANS_UPDATE | - OPTION_KEEP_LOG))) + if (unlikely(thd->no_trans_update.all || (thd->options & OPTION_KEEP_LOG))) { Query_log_event qev(thd, STRING_WITH_LEN("ROLLBACK"), TRUE, FALSE); qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE) @@ -1643,8 +1653,7 @@ static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv) non-transactional table. Otherwise, truncate the binlog cache starting from the SAVEPOINT command. */ - if (unlikely(thd->options & - (OPTION_STATUS_NO_TRANS_UPDATE | OPTION_KEEP_LOG))) + if (unlikely(thd->no_trans_update.all || (thd->options & OPTION_KEEP_LOG))) { int error= thd->binlog_query(THD::STMT_QUERY_TYPE, @@ -1714,7 +1723,7 @@ err: #ifdef __NT__ static int eventSource = 0; -void setup_windows_event_source() +static void setup_windows_event_source() { HKEY hRegKey= NULL; DWORD dwError= 0; @@ -2499,7 +2508,7 @@ bool MYSQL_BIN_LOG::open(const char *log_name, /* Set 'created' to 0, so that in next relay logs this event does not trigger cleaning actions on the slave in - Format_description_log_event::exec_event(). + Format_description_log_event::apply_event_impl(). */ description_event_for_queue->created= 0; /* Don't set log_pos in event header */ @@ -3206,8 +3215,10 @@ void MYSQL_BIN_LOG::new_file_impl(bool need_lock) { tc_log_page_waits++; pthread_mutex_lock(&LOCK_prep_xids); - while (prepared_xids) + while (prepared_xids) { + DBUG_PRINT("info", ("prepared_xids=%lu", prepared_xids)); pthread_cond_wait(&COND_prep_xids, &LOCK_prep_xids); + } pthread_mutex_unlock(&LOCK_prep_xids); } @@ -3773,7 +3784,7 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info) nb_elements())); /* If the auto_increment was second in a table's index (possible with - MyISAM or BDB) (table->next_number_key_offset != 0), such event is + MyISAM or BDB) (table->next_number_keypart != 0), such event is in fact not necessary. We could avoid logging it. */ Intvar_log_event e(thd, (uchar) INSERT_ID_EVENT, @@ -4216,38 +4227,6 @@ static bool test_if_number(register const char *str, } /* test_if_number */ -void print_buffer_to_file(enum loglevel level, const char *buffer) -{ - time_t skr; - struct tm tm_tmp; - struct tm *start; - DBUG_ENTER("print_buffer_to_file"); - DBUG_PRINT("enter",("buffer: %s", buffer)); - - VOID(pthread_mutex_lock(&LOCK_error_log)); - - skr=time(NULL); - localtime_r(&skr, &tm_tmp); - start=&tm_tmp; - - fprintf(stderr, "%02d%02d%02d %2d:%02d:%02d [%s] %s\n", - start->tm_year % 100, - start->tm_mon+1, - start->tm_mday, - start->tm_hour, - start->tm_min, - start->tm_sec, - (level == ERROR_LEVEL ? "ERROR" : level == WARNING_LEVEL ? - "Warning" : "Note"), - buffer); - - fflush(stderr); - - VOID(pthread_mutex_unlock(&LOCK_error_log)); - DBUG_VOID_RETURN; -} - - void sql_perror(const char *message) { #ifdef HAVE_STRERROR @@ -4314,23 +4293,15 @@ void MYSQL_BIN_LOG::signal_update() } #ifdef __NT__ -void print_buffer_to_nt_eventlog(enum loglevel level, char *buff, - uint length, int buffLen) +static void print_buffer_to_nt_eventlog(enum loglevel level, char *buff, + size_t length, size_t buffLen) { HANDLE event; - char *buffptr; - LPCSTR *buffmsgptr; + char *buffptr= buff; DBUG_ENTER("print_buffer_to_nt_eventlog"); - buffptr= buff; - if (length > (uint)(buffLen-5)) - { - char *newBuff= new char[length + 5]; - strcpy(newBuff, buff); - buffptr= newBuff; - } - strmov(buffptr+length, "\r\n\r\n"); - buffmsgptr= (LPCSTR*) &buffptr; // Keep windows happy + /* Add ending CR/LF's to string, overwrite last chars if necessary */ + strmov(buffptr+min(length, buffLen-5), "\r\n\r\n"); setup_windows_event_source(); if ((event= RegisterEventSource(NULL,"MySQL"))) @@ -4338,24 +4309,20 @@ void print_buffer_to_nt_eventlog(enum loglevel level, char *buff, switch (level) { case ERROR_LEVEL: ReportEvent(event, EVENTLOG_ERROR_TYPE, 0, MSG_DEFAULT, NULL, 1, 0, - buffmsgptr, NULL); + (LPCSTR*)&buffptr, NULL); break; case WARNING_LEVEL: ReportEvent(event, EVENTLOG_WARNING_TYPE, 0, MSG_DEFAULT, NULL, 1, 0, - buffmsgptr, NULL); + (LPCSTR*) &buffptr, NULL); break; case INFORMATION_LEVEL: ReportEvent(event, EVENTLOG_INFORMATION_TYPE, 0, MSG_DEFAULT, NULL, 1, - 0, buffmsgptr, NULL); + 0, (LPCSTR*) &buffptr, NULL); break; } DeregisterEventSource(event); } - /* if we created a string buffer, then delete it */ - if (buffptr != buff) - delete[] buffptr; - DBUG_VOID_RETURN; } #endif /* __NT__ */ @@ -4392,14 +4359,45 @@ int vprint_msg_to_log(enum loglevel level __attribute__((unused)), DBUG_RETURN(0); } #else /*!EMBEDDED_LIBRARY*/ +static void print_buffer_to_file(enum loglevel level, const char *buffer) +{ + time_t skr; + struct tm tm_tmp; + struct tm *start; + DBUG_ENTER("print_buffer_to_file"); + DBUG_PRINT("enter",("buffer: %s", buffer)); + + VOID(pthread_mutex_lock(&LOCK_error_log)); + + skr=time(NULL); + localtime_r(&skr, &tm_tmp); + start=&tm_tmp; + + fprintf(stderr, "%02d%02d%02d %2d:%02d:%02d [%s] %s\n", + start->tm_year % 100, + start->tm_mon+1, + start->tm_mday, + start->tm_hour, + start->tm_min, + start->tm_sec, + (level == ERROR_LEVEL ? "ERROR" : level == WARNING_LEVEL ? + "Warning" : "Note"), + buffer); + + fflush(stderr); + + VOID(pthread_mutex_unlock(&LOCK_error_log)); + DBUG_VOID_RETURN; +} + + int vprint_msg_to_log(enum loglevel level, const char *format, va_list args) { char buff[1024]; - uint length; + size_t length; DBUG_ENTER("vprint_msg_to_log"); - /* "- 5" is because of print_buffer_to_nt_eventlog() */ - length= my_vsnprintf(buff, sizeof(buff) - 5, format, args); + length= my_vsnprintf(buff, sizeof(buff), format, args); print_buffer_to_file(level, buff); #ifdef __NT__ @@ -5061,8 +5059,10 @@ void TC_LOG_BINLOG::unlog(ulong cookie, my_xid xid) { pthread_mutex_lock(&LOCK_prep_xids); DBUG_ASSERT(prepared_xids > 0); - if (--prepared_xids == 0) + if (--prepared_xids == 0) { + DBUG_PRINT("info", ("prepared_xids=%lu", prepared_xids)); pthread_cond_signal(&COND_prep_xids); + } pthread_mutex_unlock(&LOCK_prep_xids); rotate_and_purge(0); // as ::write() did not rotate } diff --git a/sql/log.h b/sql/log.h index 970823dcd4a..ed0c3557d08 100644 --- a/sql/log.h +++ b/sql/log.h @@ -238,7 +238,7 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG fix_max_relay_log_size). */ ulong max_size; - ulong prepared_xids; /* for tc log - number of xids to remember */ + long prepared_xids; /* for tc log - number of xids to remember */ // current file sequence number for load data infile binary logging uint file_id; uint open_count; // For replication diff --git a/sql/log_event.cc b/sql/log_event.cc index f8d3c43bfba..e53cbd310ea 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -22,8 +22,11 @@ #include "mysql_priv.h" #include "slave.h" +#include "rpl_rli.h" +#include "rpl_mi.h" #include "rpl_filter.h" #include "rpl_utility.h" +#include "rpl_record.h" #include <my_dir.h> #endif /* MYSQL_CLIENT */ #include <base64.h> @@ -31,6 +34,8 @@ #define log_cs &my_charset_latin1 +#define FLAGSTR(V,F) ((V)&(F)?#F" ":"") + /* Cache that will automatically be written to a dedicated file on destruction. @@ -75,8 +80,7 @@ public: ~Write_on_release_cache() { - if (!my_b_copy_to_file(m_cache, m_file)) - reinit_io_cache(m_cache, WRITE_CACHE, 0L, FALSE, TRUE); + copy_event_cache_to_file_and_reinit(m_cache, m_file); if (m_flags | FLUSH_F) fflush(m_file); } @@ -88,9 +92,10 @@ public: operator&() DESCRIPTION - Function to return a pointer to the internal, so that the object - can be treated as a IO_CACHE and used with the my_b_* IO_CACHE - functions + + Function to return a pointer to the internal cache, so that the + object can be treated as a IO_CACHE and used with the my_b_* + IO_CACHE functions RETURN VALUE A pointer to the internal IO_CACHE. @@ -305,7 +310,7 @@ static bool write_str(IO_CACHE *file, char *str, uint length) read_str() */ -static inline int read_str(char **buf, char *buf_end, char **str, +static inline int read_str(const char **buf, const char *buf_end, const char **str, uint8 *len) { if (*buf + ((uint) (uchar) **buf) >= buf_end) @@ -420,6 +425,7 @@ const char* Log_event::get_type_str() case DELETE_ROWS_EVENT: return "Delete_rows"; case BEGIN_LOAD_QUERY_EVENT: return "Begin_load_query"; case EXECUTE_LOAD_QUERY_EVENT: return "Execute_load_query"; + case INCIDENT_EVENT: return "Incident"; default: return "Unknown"; /* impossible */ } } @@ -531,71 +537,42 @@ Log_event::Log_event(const char* buf, #ifndef MYSQL_CLIENT #ifdef HAVE_REPLICATION -/* - Log_event::exec_event() -*/ - -int Log_event::exec_event(struct st_relay_log_info* rli) +int Log_event::do_update_pos(RELAY_LOG_INFO *rli) { - DBUG_ENTER("Log_event::exec_event"); - /* - rli is null when (as far as I (Guilhem) know) - the caller is - Load_log_event::exec_event *and* that one is called from - Execute_load_log_event::exec_event. - In this case, we don't do anything here ; - Execute_load_log_event::exec_event will call Log_event::exec_event - again later with the proper rli. - Strictly speaking, if we were sure that rli is null - only in the case discussed above, 'if (rli)' is useless here. - But as we are not 100% sure, keep it for now. + rli is null when (as far as I (Guilhem) know) the caller is + Load_log_event::do_apply_event *and* that one is called from + Execute_load_log_event::do_apply_event. In this case, we don't + do anything here ; Execute_load_log_event::do_apply_event will + call Log_event::do_apply_event again later with the proper rli. + Strictly speaking, if we were sure that rli is null only in the + case discussed above, 'if (rli)' is useless here. But as we are + not 100% sure, keep it for now. + + Matz: I don't think we will need this check with this refactoring. */ if (rli) - { - /* - If in a transaction, and if the slave supports transactions, just - inc_event_relay_log_pos(). We only have to check for OPTION_BEGIN - (not OPTION_NOT_AUTOCOMMIT) as transactions are logged with - BEGIN/COMMIT, not with SET AUTOCOMMIT= . - - CAUTION: opt_using_transactions means - innodb || bdb ; suppose the master supports InnoDB and BDB, - but the slave supports only BDB, problems - will arise: - - suppose an InnoDB table is created on the master, - - then it will be MyISAM on the slave - - but as opt_using_transactions is true, the slave will believe he - is transactional with the MyISAM table. And problems will come - when one does START SLAVE; STOP SLAVE; START SLAVE; (the slave - will resume at BEGIN whereas there has not been any rollback). - This is the problem of using opt_using_transactions instead of a - finer "does the slave support - _the_transactional_handler_used_on_the_master_". - - More generally, we'll have problems when a query mixes a - transactional handler and MyISAM and STOP SLAVE is issued in the - middle of the "transaction". START SLAVE will resume at BEGIN - while the MyISAM table has already been updated. - */ - if ((thd->options & OPTION_BEGIN) && opt_using_transactions) - rli->inc_event_relay_log_pos(); - else - { - rli->inc_group_relay_log_pos(log_pos); - flush_relay_log_info(rli); - /* - Note that Rotate_log_event::exec_event() does not call this - function, so there is no chance that a fake rotate event resets - last_master_timestamp. - Note that we update without mutex (probably ok - except in some very - rare cases, only consequence is that value may take some time to - display in Seconds_Behind_Master - not critical). - */ - rli->last_master_timestamp= when; - } - } - DBUG_RETURN(0); + rli->stmt_done(log_pos, when); + + return 0; // Cannot fail currently +} + + +Log_event::enum_skip_reason +Log_event::do_shall_skip(RELAY_LOG_INFO *rli) +{ + DBUG_PRINT("info", ("ev->server_id=%lu, ::server_id=%lu," + " rli->replicate_same_server_id=%d," + " rli->slave_skip_counter=%d", + (ulong) server_id, (ulong) ::server_id, + rli->replicate_same_server_id, + rli->slave_skip_counter)); + if (server_id == ::server_id && !rli->replicate_same_server_id) + return EVENT_SKIP_IGNORE; + else if (rli->slave_skip_counter > 0) + return EVENT_SKIP_COUNT; + else + return EVENT_SKIP_NOT; } @@ -642,12 +619,13 @@ int Log_event::net_send(Protocol *protocol, const char* log_name, my_off_t pos) void Log_event::init_show_field_list(List<Item>* field_list) { field_list->push_back(new Item_empty_string("Log_name", 20)); - field_list->push_back(new Item_return_int("Pos", 11, + field_list->push_back(new Item_return_int("Pos", MY_INT32_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG)); field_list->push_back(new Item_empty_string("Event_type", 20)); field_list->push_back(new Item_return_int("Server_id", 10, MYSQL_TYPE_LONG)); - field_list->push_back(new Item_return_int("End_log_pos", 11, + field_list->push_back(new Item_return_int("End_log_pos", + MY_INT32_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG)); field_list->push_back(new Item_empty_string("Info", 20)); } @@ -742,7 +720,7 @@ int Log_event::read_log_event(IO_CACHE* file, String* packet, ulong data_len; int result=0; char buf[LOG_EVENT_MINIMAL_HEADER_LEN]; - DBUG_ENTER("read_log_event"); + DBUG_ENTER("Log_event::read_log_event"); if (log_lock) pthread_mutex_lock(log_lock); @@ -769,19 +747,34 @@ int Log_event::read_log_event(IO_CACHE* file, String* packet, LOG_READ_TOO_LARGE); goto end; } - packet->append(buf, sizeof(buf)); + + /* Append the log event header to packet */ + if (packet->append(buf, sizeof(buf))) + { + /* Failed to allocate packet */ + result= LOG_READ_MEM; + goto end; + } data_len-= LOG_EVENT_MINIMAL_HEADER_LEN; if (data_len) { + /* Append rest of event, read directly from file into packet */ if (packet->append(file, data_len)) { /* - Here if we hit EOF it's really an error: as data_len is >=0 - there's supposed to be more bytes available. - EOF means we are reading the event partially, which should - never happen: either we read badly or the binlog is truncated. + Fatal error occured when appending rest of the event + to packet, possible failures: + 1. EOF occured when reading from file, it's really an error + as data_len is >=0 there's supposed to be more bytes available. + file->error will have been set to number of bytes left to read + 2. Read was interrupted, file->error would normally be set to -1 + 3. Failed to allocate memory for packet, my_errno + will be ENOMEM(file->error shuold be 0, but since the + memory allocation occurs before the call to read it might + be uninitialized) */ - result= file->error >= 0 ? LOG_READ_TRUNC: LOG_READ_IO; + result= (my_errno == ENOMEM ? LOG_READ_MEM : + (file->error >= 0 ? LOG_READ_TRUNC: LOG_READ_IO)); /* Implicit goto end; */ } } @@ -817,7 +810,7 @@ Log_event* Log_event::read_log_event(IO_CACHE* file, const Format_description_log_event *description_event) #endif { - DBUG_ENTER("Log_event::read_log_event(IO_CACHE *, Format_description_log_event *"); + DBUG_ENTER("Log_event::read_log_event"); DBUG_ASSERT(description_event != 0); char head[LOG_EVENT_MINIMAL_HEADER_LEN]; /* @@ -980,6 +973,15 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len, ev = new Format_description_log_event(buf, event_len, description_event); break; #if defined(HAVE_REPLICATION) + case PRE_GA_WRITE_ROWS_EVENT: + ev = new Write_rows_log_event_old(buf, event_len, description_event); + break; + case PRE_GA_UPDATE_ROWS_EVENT: + ev = new Update_rows_log_event_old(buf, event_len, description_event); + break; + case PRE_GA_DELETE_ROWS_EVENT: + ev = new Delete_rows_log_event_old(buf, event_len, description_event); + break; case WRITE_ROWS_EVENT: ev = new Write_rows_log_event(buf, event_len, description_event); break; @@ -997,14 +999,22 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len, ev = new Begin_load_query_log_event(buf, event_len, description_event); break; case EXECUTE_LOAD_QUERY_EVENT: - ev = new Execute_load_query_log_event(buf, event_len, description_event); + ev= new Execute_load_query_log_event(buf, event_len, description_event); + break; + case INCIDENT_EVENT: + ev = new Incident_log_event(buf, event_len, description_event); break; default: - DBUG_PRINT("error",("Unknown evernt code: %d",(int) buf[EVENT_TYPE_OFFSET])); + DBUG_PRINT("error",("Unknown event code: %d", + (int) buf[EVENT_TYPE_OFFSET])); ev= NULL; break; } + DBUG_PRINT("read_event", ("%s(type_code: %d; event_len: %d)", + ev ? ev->get_type_str() : "<unknown>", + buf[EVENT_TYPE_OFFSET], + event_len)); /* is_valid() are small event-specific sanity tests which are important; for example there are some my_malloc() in constructors @@ -1887,27 +1897,28 @@ void Query_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) /* - Query_log_event::exec_event() + Query_log_event::do_apply_event() */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Query_log_event::exec_event(struct st_relay_log_info* rli) +int Query_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { - return exec_event(rli, query, q_len); + return do_apply_event(rli, query, q_len); } -int Query_log_event::exec_event(struct st_relay_log_info* rli, - const char *query_arg, uint32 q_len_arg) +int Query_log_event::do_apply_event(RELAY_LOG_INFO const *rli, + const char *query_arg, uint32 q_len_arg) { LEX_STRING new_db; int expected_error,actual_error= 0; /* - Colleagues: please never free(thd->catalog) in MySQL. This would lead to - bugs as here thd->catalog is a part of an alloced block, not an entire - alloced block (see Query_log_event::exec_event()). Same for thd->db. - Thank you. + Colleagues: please never free(thd->catalog) in MySQL. This would + lead to bugs as here thd->catalog is a part of an alloced block, + not an entire alloced block (see + Query_log_event::do_apply_event()). Same for thd->db. Thank + you. */ thd->catalog= catalog_len ? (char *) catalog : (char *)""; new_db.length= db_len; @@ -1926,11 +1937,11 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, END of the current log event (COMMIT). We save it in rli so that InnoDB can access it. */ - rli->future_group_master_log_pos= log_pos; + const_cast<RELAY_LOG_INFO*>(rli)->future_group_master_log_pos= log_pos; DBUG_PRINT("info", ("log_pos: %lu", (ulong) log_pos)); - clear_all_errors(thd, rli); - rli->clear_tables_to_lock(); + clear_all_errors(thd, const_cast<RELAY_LOG_INFO*>(rli)); + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); /* Note: We do not need to execute reset_one_shot_variables() if this @@ -1939,8 +1950,8 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, its companion query. If the SET is ignored because of db_ok(), the companion query will also be ignored, and if the companion query is ignored in the db_ok() test of - ::exec_event(), then the companion SET also have so we - don't need to reset_one_shot_variables(). + ::do_apply_event(), then the companion SET also have so + we don't need to reset_one_shot_variables(). */ if (rpl_filter->db_ok(thd->db)) { @@ -2006,8 +2017,7 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, if (time_zone_len) { String tmp(time_zone_str, time_zone_len, &my_charset_bin); - if (!(thd->variables.time_zone= - my_tz_find_with_opening_tz_tables(thd, &tmp))) + if (!(thd->variables.time_zone= my_tz_find(thd, &tmp))) { my_error(ER_UNKNOWN_TIME_ZONE, MYF(0), tmp.c_ptr()); thd->variables.time_zone= global_system_variables.time_zone; @@ -2056,7 +2066,7 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, to check/fix it. */ if (mysql_test_parse_for_slave(thd, thd->query, thd->query_length)) - clear_all_errors(thd, rli); /* Can ignore query */ + clear_all_errors(thd, const_cast<RELAY_LOG_INFO*>(rli)); /* Can ignore query */ else { slave_print_msg(ERROR_LEVEL, rli, expected_error, @@ -2107,7 +2117,7 @@ Default database: '%s'. Query: '%s'", ignored_error_code(actual_error)) { DBUG_PRINT("info",("error ignored")); - clear_all_errors(thd, rli); + clear_all_errors(thd, const_cast<RELAY_LOG_INFO*>(rli)); } /* Other cases: mostly we expected no error and get one. @@ -2174,16 +2184,26 @@ end: thd->first_successful_insert_id_in_prev_stmt= 0; thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); + return thd->query_error; +} + +int Query_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ /* - If there was an error we stop. Otherwise we increment positions. Note that - we will not increment group* positions if we are just after a SET - ONE_SHOT, because SET ONE_SHOT should not be separated from its following - updating query. + Note that we will not increment group* positions if we are just + after a SET ONE_SHOT, because SET ONE_SHOT should not be separated + from its following updating query. */ - return (thd->query_error ? thd->query_error : - (thd->one_shot_set ? (rli->inc_event_relay_log_pos(),0) : - Log_event::exec_event(rli))); + if (thd->one_shot_set) + { + rli->inc_event_relay_log_pos(); + return 0; + } + else + return Log_event::do_update_pos(rli); } + + #endif @@ -2312,7 +2332,7 @@ bool Start_log_event_v3::write(IO_CACHE* file) /* - Start_log_event_v3::exec_event() + Start_log_event_v3::do_apply_event() The master started @@ -2331,9 +2351,9 @@ bool Start_log_event_v3::write(IO_CACHE* file) */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Start_log_event_v3::exec_event(struct st_relay_log_info* rli) +int Start_log_event_v3::do_apply_event(RELAY_LOG_INFO const *rli) { - DBUG_ENTER("Start_log_event_v3::exec_event"); + DBUG_ENTER("Start_log_event_v3::do_apply_event"); switch (binlog_version) { case 3: @@ -2375,7 +2395,7 @@ int Start_log_event_v3::exec_event(struct st_relay_log_info* rli) /* this case is impossible */ DBUG_RETURN(1); } - DBUG_RETURN(Log_event::exec_event(rli)); + DBUG_RETURN(0); } #endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ @@ -2456,6 +2476,7 @@ Format_description_log_event(uint8 binlog_ver, const char* server_ver) post_header_len[DELETE_ROWS_EVENT-1]= 6;); post_header_len[BEGIN_LOAD_QUERY_EVENT-1]= post_header_len[APPEND_BLOCK_EVENT-1]; post_header_len[EXECUTE_LOAD_QUERY_EVENT-1]= EXECUTE_LOAD_QUERY_HEADER_LEN; + post_header_len[INCIDENT_EVENT-1]= INCIDENT_HEADER_LEN; } break; @@ -2566,24 +2587,10 @@ bool Format_description_log_event::write(IO_CACHE* file) } #endif -/* - SYNOPSIS - Format_description_log_event::exec_event() - - IMPLEMENTATION - Save the information which describes the binlog's format, to be able to - read all coming events. - Call Start_log_event_v3::exec_event(). -*/ - #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Format_description_log_event::exec_event(struct st_relay_log_info* rli) +int Format_description_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { - DBUG_ENTER("Format_description_log_event::exec_event"); - - /* save the information describing this binlog */ - delete rli->relay_log.description_event_for_exec; - rli->relay_log.description_event_for_exec= this; + DBUG_ENTER("Format_description_log_event::do_apply_event"); #ifdef USING_TRANSACTIONS /* @@ -2605,14 +2612,36 @@ int Format_description_log_event::exec_event(struct st_relay_log_info* rli) "or ROLLBACK in relay log). A probable cause is that " "the master died while writing the transaction to " "its binary log, thus rolled back too."); - rli->cleanup_context(thd, 1); + const_cast<RELAY_LOG_INFO*>(rli)->cleanup_context(thd, 1); } #endif /* - If this event comes from ourselves, there is no cleaning task to perform, - we don't call Start_log_event_v3::exec_event() (this was just to update the - log's description event). + If this event comes from ourselves, there is no cleaning task to + perform, we don't call Start_log_event_v3::do_apply_event() + (this was just to update the log's description event). */ + if (server_id != (uint32) ::server_id) + { + /* + If the event was not requested by the slave i.e. the master sent + it while the slave asked for a position >4, the event will make + rli->group_master_log_pos advance. Say that the slave asked for + position 1000, and the Format_desc event's end is 96. Then in + the beginning of replication rli->group_master_log_pos will be + 0, then 96, then jump to first really asked event (which is + >96). So this is ok. + */ + DBUG_RETURN(Start_log_event_v3::do_apply_event(rli)); + } + DBUG_RETURN(0); +} + +int Format_description_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ + /* save the information describing this binlog */ + delete rli->relay_log.description_event_for_exec; + rli->relay_log.description_event_for_exec= this; + if (server_id == (uint32) ::server_id) { /* @@ -2629,19 +2658,20 @@ int Format_description_log_event::exec_event(struct st_relay_log_info* rli) the Intvar_log_event respectively. */ rli->inc_event_relay_log_pos(); - DBUG_RETURN(0); + return 0; } + else + { + return Log_event::do_update_pos(rli); + } +} - /* - If the event was not requested by the slave i.e. the master sent it while - the slave asked for a position >4, the event will make - rli->group_master_log_pos advance. Say that the slave asked for position - 1000, and the Format_desc event's end is 96. Then in the beginning of - replication rli->group_master_log_pos will be 0, then 96, then jump to - first really asked event (which is >96). So this is ok. - */ - DBUG_RETURN(Start_log_event_v3::exec_event(rli)); +Log_event::enum_skip_reason +Format_description_log_event::do_shall_skip(RELAY_LOG_INFO *rli) +{ + return Log_event::EVENT_SKIP_NOT; } + #endif @@ -3155,30 +3185,32 @@ void Load_log_event::set_fields(const char* affected_db, Does the data loading job when executing a LOAD DATA on the slave SYNOPSIS - Load_log_event::exec_event - net - rli - use_rli_only_for_errors - if set to 1, rli is provided to - Load_log_event::exec_event only for this - function to have RPL_LOG_NAME and - rli->last_slave_error, both being used by - error reports. rli's position advancing - is skipped (done by the caller which is - Execute_load_log_event::exec_event). - - if set to 0, rli is provided for full use, - i.e. for error reports and position - advancing. + Load_log_event::do_apply_event + net + rli + use_rli_only_for_errors - if set to 1, rli is provided to + Load_log_event::do_apply_event + only for this function to have + RPL_LOG_NAME and + rli->last_slave_error, both being + used by error reports. rli's + position advancing is skipped (done + by the caller which is + Execute_load_log_event::do_apply_event). + - if set to 0, rli is provided for + full use, i.e. for error reports and + position advancing. DESCRIPTION Does the data loading job when executing a LOAD DATA on the slave - + RETURN VALUE - 0 Success + 0 Success 1 Failure */ -int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, - bool use_rli_only_for_errors) +int Load_log_event::do_apply_event(NET* net, RELAY_LOG_INFO const *rli, + bool use_rli_only_for_errors) { LEX_STRING new_db; new_db.length= db_len; @@ -3187,9 +3219,9 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, DBUG_ASSERT(thd->query == 0); thd->query_length= 0; // Should not be needed thd->query_error= 0; - clear_all_errors(thd, rli); + clear_all_errors(thd, const_cast<RELAY_LOG_INFO*>(rli)); - /* see Query_log_event::exec_event() and BUG#13360 */ + /* see Query_log_event::do_apply_event() and BUG#13360 */ DBUG_ASSERT(!rli->m_table_map.count()); /* Usually mysql_init_query() is called by mysql_parse(), but we need it here @@ -3198,22 +3230,26 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, mysql_init_query(thd, 0, 0); if (!use_rli_only_for_errors) { - /* Saved for InnoDB, see comment in Query_log_event::exec_event() */ - rli->future_group_master_log_pos= log_pos; + /* + Saved for InnoDB, see comment in + Query_log_event::do_apply_event() + */ + const_cast<RELAY_LOG_INFO*>(rli)->future_group_master_log_pos= log_pos; DBUG_PRINT("info", ("log_pos: %lu", (ulong) log_pos)); } /* - We test replicate_*_db rules. Note that we have already prepared the file - to load, even if we are going to ignore and delete it now. So it is - possible that we did a lot of disk writes for nothing. In other words, a - big LOAD DATA INFILE on the master will still consume a lot of space on - the slave (space in the relay log + space of temp files: twice the space - of the file to load...) even if it will finally be ignored. - TODO: fix this; this can be done by testing rules in - Create_file_log_event::exec_event() and then discarding Append_block and - al. Another way is do the filtering in the I/O thread (more efficient: no - disk writes at all). + We test replicate_*_db rules. Note that we have already prepared + the file to load, even if we are going to ignore and delete it + now. So it is possible that we did a lot of disk writes for + nothing. In other words, a big LOAD DATA INFILE on the master will + still consume a lot of space on the slave (space in the relay log + + space of temp files: twice the space of the file to load...) + even if it will finally be ignored. TODO: fix this; this can be + done by testing rules in Create_file_log_event::do_apply_event() + and then discarding Append_block and al. Another way is do the + filtering in the I/O thread (more efficient: no disk writes at + all). Note: We do not need to execute reset_one_shot_variables() if this @@ -3222,8 +3258,8 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, its companion query. If the SET is ignored because of db_ok(), the companion query will also be ignored, and if the companion query is ignored in the db_ok() test of - ::exec_event(), then the companion SET also have so we - don't need to reset_one_shot_variables(). + ::do_apply_event(), then the companion SET also have so + we don't need to reset_one_shot_variables(). */ if (rpl_filter->db_ok(thd->db)) { @@ -3419,7 +3455,7 @@ Fatal error running LOAD DATA INFILE on table '%s'. Default database: '%s'", return 1; } - return ( use_rli_only_for_errors ? 0 : Log_event::exec_event(rli) ); + return ( use_rli_only_for_errors ? 0 : Log_event::do_apply_event(rli) ); } #endif @@ -3513,6 +3549,7 @@ Rotate_log_event::Rotate_log_event(const char* buf, uint event_len, ident_offset = post_header_len; set_if_smaller(ident_len,FN_REFLEN-1); new_log_ident= my_strndup(buf + ident_offset, (uint) ident_len, MYF(MY_WME)); + DBUG_PRINT("debug", ("new_log_ident: '%s'", new_log_ident)); DBUG_VOID_RETURN; } @@ -3532,8 +3569,9 @@ bool Rotate_log_event::write(IO_CACHE* file) } #endif + /* - Rotate_log_event::exec_event() + Rotate_log_event::do_apply_event() Got a rotate log event from the master @@ -3550,34 +3588,52 @@ bool Rotate_log_event::write(IO_CACHE* file) */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Rotate_log_event::exec_event(struct st_relay_log_info* rli) +int Rotate_log_event::do_update_pos(RELAY_LOG_INFO *rli) { - DBUG_ENTER("Rotate_log_event::exec_event"); + DBUG_ENTER("Rotate_log_event::do_update_pos"); +#ifndef DBUG_OFF + char buf[32]; +#endif + + DBUG_PRINT("info", ("server_id=%lu; ::server_id=%lu", + (ulong) this->server_id, (ulong) ::server_id)); + DBUG_PRINT("info", ("new_log_ident: %s", this->new_log_ident)); + DBUG_PRINT("info", ("pos: %s", llstr(this->pos, buf))); pthread_mutex_lock(&rli->data_lock); rli->event_relay_log_pos= my_b_tell(rli->cur_log); /* - If we are in a transaction: the only normal case is when the I/O thread was - copying a big transaction, then it was stopped and restarted: we have this - in the relay log: + If we are in a transaction or in a group: the only normal case is + when the I/O thread was copying a big transaction, then it was + stopped and restarted: we have this in the relay log: + BEGIN ... ROTATE (a fake one) ... COMMIT or ROLLBACK - In that case, we don't want to touch the coordinates which correspond to - the beginning of the transaction. - Starting from 5.0.0, there also are some rotates from the slave itself, in - the relay log. + + In that case, we don't want to touch the coordinates which + correspond to the beginning of the transaction. Starting from + 5.0.0, there also are some rotates from the slave itself, in the + relay log, which shall not change the group positions. */ - if (!(thd->options & OPTION_BEGIN)) + if ((server_id != ::server_id || rli->replicate_same_server_id) && + !rli->is_in_group()) { + DBUG_PRINT("info", ("old group_master_log_name: '%s' " + "old group_master_log_pos: %lu", + rli->group_master_log_name, + (ulong) rli->group_master_log_pos)); memcpy(rli->group_master_log_name, new_log_ident, ident_len+1); rli->notify_group_master_log_name_update(); rli->group_master_log_pos= pos; + strmake(rli->group_relay_log_name, rli->event_relay_log_name, + sizeof(rli->group_relay_log_name) - 1); + rli->notify_group_relay_log_name_update(); rli->group_relay_log_pos= rli->event_relay_log_pos; - DBUG_PRINT("info", ("group_master_log_name: '%s' " - "group_master_log_pos: %lu", + DBUG_PRINT("info", ("new group_master_log_name: '%s' " + "new group_master_log_pos: %lu", rli->group_master_log_name, (ulong) rli->group_master_log_pos)); /* @@ -3596,8 +3652,28 @@ int Rotate_log_event::exec_event(struct st_relay_log_info* rli) pthread_mutex_unlock(&rli->data_lock); pthread_cond_broadcast(&rli->data_cond); flush_relay_log_info(rli); + DBUG_RETURN(0); } + + +Log_event::enum_skip_reason +Rotate_log_event::do_shall_skip(RELAY_LOG_INFO *rli) +{ + enum_skip_reason reason= Log_event::do_shall_skip(rli); + + switch (reason) { + case Log_event::EVENT_SKIP_NOT: + case Log_event::EVENT_SKIP_COUNT: + return Log_event::EVENT_SKIP_NOT; + + case Log_event::EVENT_SKIP_IGNORE: + return Log_event::EVENT_SKIP_IGNORE; + } + DBUG_ASSERT(0); + return Log_event::EVENT_SKIP_NOT; // To keep compiler happy +} + #endif @@ -3704,12 +3780,18 @@ void Intvar_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) /* - Intvar_log_event::exec_event() + Intvar_log_event::do_apply_event() */ #if defined(HAVE_REPLICATION)&& !defined(MYSQL_CLIENT) -int Intvar_log_event::exec_event(struct st_relay_log_info* rli) +int Intvar_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { + /* + We are now in a statement until the associated query log event has + been processed. + */ + const_cast<RELAY_LOG_INFO*>(rli)->set_flag(RELAY_LOG_INFO::IN_STMT); + switch (type) { case LAST_INSERT_ID_EVENT: thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 1; @@ -3719,9 +3801,33 @@ int Intvar_log_event::exec_event(struct st_relay_log_info* rli) thd->force_one_auto_inc_interval(val); break; } + return 0; +} + +int Intvar_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ rli->inc_event_relay_log_pos(); return 0; } + + +Log_event::enum_skip_reason +Intvar_log_event::do_shall_skip(RELAY_LOG_INFO *rli) +{ + /* + It is a common error to set the slave skip counter to 1 instead of + 2 when recovering from an insert which used a auto increment, + rand, or user var. Therefore, if the slave skip counter is 1, we + just say that this event should be skipped by ignoring it, meaning + that we do not change the value of the slave skip counter since it + will be decreased by the following insert event. + */ + if (rli->slave_skip_counter == 1) + return Log_event::EVENT_SKIP_IGNORE; + else + return Log_event::do_shall_skip(rli); +} + #endif @@ -3784,13 +3890,43 @@ void Rand_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Rand_log_event::exec_event(struct st_relay_log_info* rli) +int Rand_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { + /* + We are now in a statement until the associated query log event has + been processed. + */ + const_cast<RELAY_LOG_INFO*>(rli)->set_flag(RELAY_LOG_INFO::IN_STMT); + thd->rand.seed1= (ulong) seed1; thd->rand.seed2= (ulong) seed2; + return 0; +} + +int Rand_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ rli->inc_event_relay_log_pos(); return 0; } + + +Log_event::enum_skip_reason +Rand_log_event::do_shall_skip(RELAY_LOG_INFO *rli) +{ + /* + It is a common error to set the slave skip counter to 1 instead of + 2 when recovering from an insert which used a auto increment, + rand, or user var. Therefore, if the slave skip counter is 1, we + just say that this event should be skipped by ignoring it, meaning + that we do not change the value of the slave skip counter since it + will be decreased by the following insert event. + */ + if (rli->slave_skip_counter == 1) + return Log_event::EVENT_SKIP_IGNORE; + else + return Log_event::do_shall_skip(rli); +} + #endif /* !MYSQL_CLIENT */ @@ -3857,12 +3993,12 @@ void Xid_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Xid_log_event::exec_event(struct st_relay_log_info* rli) +int Xid_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { /* For a slave Xid_log_event is COMMIT */ general_log_print(thd, COM_QUERY, "COMMIT /* implicit, from Xid_log_event */"); - return end_trans(thd, COMMIT) || Log_event::exec_event(rli); + return end_trans(thd, COMMIT); } #endif /* !MYSQL_CLIENT */ @@ -4140,11 +4276,11 @@ void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) /* - User_var_log_event::exec_event() + User_var_log_event::do_apply_event() */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int User_var_log_event::exec_event(struct st_relay_log_info* rli) +int User_var_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { Item *it= 0; CHARSET_INFO *charset; @@ -4156,6 +4292,12 @@ int User_var_log_event::exec_event(struct st_relay_log_info* rli) double real_val; longlong int_val; + /* + We are now in a statement until the associated query log event has + been processed. + */ + const_cast<RELAY_LOG_INFO*>(rli)->set_flag(RELAY_LOG_INFO::IN_STMT); + if (is_null) { it= new Item_null(); @@ -4206,9 +4348,31 @@ int User_var_log_event::exec_event(struct st_relay_log_info* rli) e.update_hash(val, val_len, type, charset, DERIVATION_IMPLICIT, 0); free_root(thd->mem_root,0); + return 0; +} + +int User_var_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ rli->inc_event_relay_log_pos(); return 0; } + +Log_event::enum_skip_reason +User_var_log_event::do_shall_skip(RELAY_LOG_INFO *rli) +{ + /* + It is a common error to set the slave skip counter to 1 instead + of 2 when recovering from an insert which used a auto increment, + rand, or user var. Therefore, if the slave skip counter is 1, we + just say that this event should be skipped by ignoring it, meaning + that we do not change the value of the slave skip counter since it + will be decreased by the following insert event. + */ + if (rli->slave_skip_counter == 1) + return Log_event::EVENT_SKIP_IGNORE; + else + return Log_event::do_shall_skip(rli); +} #endif /* !MYSQL_CLIENT */ @@ -4248,7 +4412,7 @@ void Slave_log_event::pack_info(Protocol *protocol) #ifndef MYSQL_CLIENT Slave_log_event::Slave_log_event(THD* thd_arg, - struct st_relay_log_info* rli) + RELAY_LOG_INFO* rli) :Log_event(thd_arg, 0, 0) , mem_pool(0), master_host(0) { DBUG_ENTER("Slave_log_event"); @@ -4358,11 +4522,11 @@ Slave_log_event::Slave_log_event(const char* buf, uint event_len) #ifndef MYSQL_CLIENT -int Slave_log_event::exec_event(struct st_relay_log_info* rli) +int Slave_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { if (mysql_bin_log.is_open()) mysql_bin_log.write(this); - return Log_event::exec_event(rli); + return 0; } #endif /* !MYSQL_CLIENT */ @@ -4391,21 +4555,21 @@ void Stop_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) /* - Stop_log_event::exec_event() - - The master stopped. - We used to clean up all temporary tables but this is useless as, as the - master has shut down properly, it has written all DROP TEMPORARY TABLE - (prepared statements' deletion is TODO only when we binlog prep stmts). - We used to clean up slave_load_tmpdir, but this is useless as it has been - cleared at the end of LOAD DATA INFILE. - So we have nothing to do here. - The place were we must do this cleaning is in Start_log_event_v3::exec_event(), - not here. Because if we come here, the master was sane. + Stop_log_event::do_apply_event() + + The master stopped. We used to clean up all temporary tables but + this is useless as, as the master has shut down properly, it has + written all DROP TEMPORARY TABLE (prepared statements' deletion is + TODO only when we binlog prep stmts). We used to clean up + slave_load_tmpdir, but this is useless as it has been cleared at the + end of LOAD DATA INFILE. So we have nothing to do here. The place + were we must do this cleaning is in + Start_log_event_v3::do_apply_event(), not here. Because if we come + here, the master was sane. */ #ifndef MYSQL_CLIENT -int Stop_log_event::exec_event(struct st_relay_log_info* rli) +int Stop_log_event::do_update_pos(RELAY_LOG_INFO *rli) { /* We do not want to update master_log pos because we get a rotate event @@ -4423,6 +4587,7 @@ int Stop_log_event::exec_event(struct st_relay_log_info* rli) } return 0; } + #endif /* !MYSQL_CLIENT */ #endif /* HAVE_REPLICATION */ @@ -4613,11 +4778,11 @@ void Create_file_log_event::pack_info(Protocol *protocol) /* - Create_file_log_event::exec_event() + Create_file_log_event::do_apply_event() */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Create_file_log_event::exec_event(struct st_relay_log_info* rli) +int Create_file_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { char proc_info[17+FN_REFLEN+10], *fname_buf; char *ext; @@ -4679,7 +4844,7 @@ err: if (fd >= 0) my_close(fd, MYF(0)); thd->proc_info= 0; - return error ? 1 : Log_event::exec_event(rli); + return error == 0; } #endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ @@ -4787,15 +4952,15 @@ int Append_block_log_event::get_create_or_append() const } /* - Append_block_log_event::exec_event() + Append_block_log_event::do_apply_event() */ -int Append_block_log_event::exec_event(struct st_relay_log_info* rli) +int Append_block_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { char proc_info[17+FN_REFLEN+10], *fname= proc_info+17; int fd; int error = 1; - DBUG_ENTER("Append_block_log_event::exec_event"); + DBUG_ENTER("Append_block_log_event::do_apply_event"); fname= strmov(proc_info, "Making temp file "); slave_load_file_stem(fname, file_id, server_id, ".data"); @@ -4834,7 +4999,7 @@ err: if (fd >= 0) my_close(fd, MYF(0)); thd->proc_info= 0; - DBUG_RETURN(error ? error : Log_event::exec_event(rli)); + DBUG_RETURN(error); } #endif @@ -4918,18 +5083,18 @@ void Delete_file_log_event::pack_info(Protocol *protocol) #endif /* - Delete_file_log_event::exec_event() + Delete_file_log_event::do_apply_event() */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Delete_file_log_event::exec_event(struct st_relay_log_info* rli) +int Delete_file_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { char fname[FN_REFLEN+10]; char *ext= slave_load_file_stem(fname, file_id, server_id, ".data"); (void) my_delete(fname, MYF(MY_WME)); strmov(ext, ".info"); (void) my_delete(fname, MYF(MY_WME)); - return Log_event::exec_event(rli); + return 0; } #endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ @@ -5015,10 +5180,10 @@ void Execute_load_log_event::pack_info(Protocol *protocol) /* - Execute_load_log_event::exec_event() + Execute_load_log_event::do_apply_event() */ -int Execute_load_log_event::exec_event(struct st_relay_log_info* rli) +int Execute_load_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { char fname[FN_REFLEN+10]; char *ext; @@ -5049,14 +5214,15 @@ int Execute_load_log_event::exec_event(struct st_relay_log_info* rli) lev->thd = thd; /* - lev->exec_event should use rli only for errors - i.e. should not advance rli's position. - lev->exec_event is the place where the table is loaded (it calls - mysql_load()). + lev->do_apply_event should use rli only for errors i.e. should + not advance rli's position. + + lev->do_apply_event is the place where the table is loaded (it + calls mysql_load()). */ - rli->future_group_master_log_pos= log_pos; - if (lev->exec_event(0,rli,1)) + const_cast<RELAY_LOG_INFO*>(rli)->future_group_master_log_pos= log_pos; + if (lev->do_apply_event(0,rli,1)) { /* We want to indicate the name of the file that could not be loaded @@ -5099,7 +5265,7 @@ err: my_close(fd, MYF(0)); end_io_cache(&file); } - return error ? error : Log_event::exec_event(rli); + return error; } #endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ @@ -5267,7 +5433,7 @@ void Execute_load_query_log_event::pack_info(Protocol *protocol) int -Execute_load_query_log_event::exec_event(struct st_relay_log_info* rli) +Execute_load_query_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { char *p; char *buf; @@ -5304,7 +5470,7 @@ Execute_load_query_log_event::exec_event(struct st_relay_log_info* rli) p= strmake(p, STRING_WITH_LEN(" INTO")); p= strmake(p, query+fn_pos_end, q_len-fn_pos_end); - error= Query_log_event::exec_event(rli, buf, p-buf); + error= Query_log_event::do_apply_event(rli, buf, p-buf); /* Forging file name for deletion in same buffer */ *fname_end= 0; @@ -5360,7 +5526,7 @@ bool sql_ex_info::write_data(IO_CACHE* file) sql_ex_info::init() */ -char* sql_ex_info::init(char* buf,char* buf_end,bool use_new_format) +char *sql_ex_info::init(char *buf, char *buf_end, bool use_new_format) { cached_new_format = use_new_format; if (use_new_format) @@ -5373,11 +5539,12 @@ char* sql_ex_info::init(char* buf,char* buf_end,bool use_new_format) the case when we have old format because we will be reusing net buffer to read the actual file before we write out the Create_file event. */ - if (read_str(&buf, buf_end, &field_term, &field_term_len) || - read_str(&buf, buf_end, &enclosed, &enclosed_len) || - read_str(&buf, buf_end, &line_term, &line_term_len) || - read_str(&buf, buf_end, &line_start, &line_start_len) || - read_str(&buf, buf_end, &escaped, &escaped_len)) + const char *ptr= buf; + if (read_str(&ptr, buf_end, (const char **) &field_term, &field_term_len) || + read_str(&ptr, buf_end, (const char **) &enclosed, &enclosed_len) || + read_str(&ptr, buf_end, (const char **) &line_term, &line_term_len) || + read_str(&ptr, buf_end, (const char **) &line_start, &line_start_len) || + read_str(&ptr, buf_end, (const char **) &escaped, &escaped_len)) return 0; opt_flags = *buf++; } @@ -5445,7 +5612,10 @@ Rows_log_event::Rows_log_event(THD *thd_arg, TABLE *tbl_arg, ulong tid, memcpy(m_cols.bitmap, cols->bitmap, no_bytes_in_map(cols)); } else - m_cols.bitmap= 0; // to not free it + { + // Needed because bitmap_init() does not set it to null on failure + m_cols.bitmap= 0; + } } #endif @@ -5482,14 +5652,57 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, m_flags= uint2korr(post_start); - byte const *const var_start= (const byte *)buf + common_header_len + - post_header_len; + byte const *const var_start= + (const byte *)buf + common_header_len + post_header_len; byte const *const ptr_width= var_start; uchar *ptr_after_width= (uchar*) ptr_width; + DBUG_PRINT("debug", ("Reading from %p", ptr_after_width)); m_width = net_field_length(&ptr_after_width); + DBUG_PRINT("debug", ("m_width=%lu", m_width)); + /* if bitmap_init fails, catched in is_valid() */ + if (likely(!bitmap_init(&m_cols, + m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL, + (m_width + 7) & ~7UL, + false))) + { + DBUG_PRINT("debug", ("Reading from %p", ptr_after_width)); + memcpy(m_cols.bitmap, ptr_after_width, (m_width + 7) / 8); + ptr_after_width+= (m_width + 7) / 8; + DBUG_DUMP("m_cols", (char*) m_cols.bitmap, no_bytes_in_map(&m_cols)); + } + else + { + // Needed because bitmap_init() does not set it to null on failure + m_cols.bitmap= NULL; + DBUG_VOID_RETURN; + } - const uint byte_count= (m_width + 7) / 8; - const byte* const ptr_rows_data= var_start + byte_count + 1; + m_cols_ai.bitmap= m_cols.bitmap; /* See explanation in is_valid() */ + + if (event_type == UPDATE_ROWS_EVENT) + { + DBUG_PRINT("debug", ("Reading from %p", ptr_after_width)); + + /* if bitmap_init fails, catched in is_valid() */ + if (likely(!bitmap_init(&m_cols_ai, + m_width <= sizeof(m_bitbuf_ai)*8 ? m_bitbuf_ai : NULL, + (m_width + 7) & ~7UL, + false))) + { + DBUG_PRINT("debug", ("Reading from %p", ptr_after_width)); + memcpy(m_cols_ai.bitmap, ptr_after_width, (m_width + 7) / 8); + ptr_after_width+= (m_width + 7) / 8; + DBUG_DUMP("m_cols_ai", (char*) m_cols_ai.bitmap, no_bytes_in_map(&m_cols_ai)); + } + else + { + // Needed because bitmap_init() does not set it to null on failure + m_cols_ai.bitmap= 0; + DBUG_VOID_RETURN; + } + } + + const byte* const ptr_rows_data= (const byte*) ptr_after_width; my_size_t const data_size= event_len - (ptr_rows_data - (const byte *) buf); DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %lu", @@ -5498,12 +5711,6 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, m_rows_buf= (byte*)my_malloc(data_size, MYF(MY_WME)); if (likely((bool)m_rows_buf)) { - /* if bitmap_init fails, catched in is_valid() */ - if (likely(!bitmap_init(&m_cols, - m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL, - (m_width + 7) & ~7UL, - false))) - memcpy(m_cols.bitmap, ptr_after_width, byte_count); m_rows_end= m_rows_buf + data_size; m_rows_cur= m_rows_end; memcpy(m_rows_buf, ptr_rows_data, data_size); @@ -5522,9 +5729,31 @@ Rows_log_event::~Rows_log_event() my_free((gptr)m_rows_buf, MYF(MY_ALLOW_ZERO_PTR)); } +int Rows_log_event::get_data_size() +{ + int const type_code= get_type_code(); + + char buf[sizeof(m_width)+1]; + char *end= net_store_length(buf, (m_width + 7) / 8); + + DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master", + return 6 + no_bytes_in_map(&m_cols) + (end - buf) + + (type_code == UPDATE_ROWS_EVENT ? no_bytes_in_map(&m_cols_ai) : 0) + + (m_rows_cur - m_rows_buf);); + int data_size= ROWS_HEADER_LEN; + data_size+= no_bytes_in_map(&m_cols); + data_size+= end - buf; + + if (type_code == UPDATE_ROWS_EVENT) + data_size+= no_bytes_in_map(&m_cols_ai); + + data_size+= (m_rows_cur - m_rows_buf); + return data_size; +} + + #ifndef MYSQL_CLIENT -int Rows_log_event::do_add_row_data(byte *const row_data, - my_size_t const length) +int Rows_log_event::do_add_row_data(byte *row_data, my_size_t length) { /* When the table has a primary key, we would probably want, by default, to @@ -5582,165 +5811,17 @@ int Rows_log_event::do_add_row_data(byte *const row_data, #endif #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) -/* - Unpack a row into table->record[0]. - - SYNOPSIS - unpack_row() - rli Relay log info - table Table to unpack into - colcnt Number of columns to read from record - row Packed row data - cols Pointer to columns data to fill in - row_end Pointer to variable that will hold the value of the - one-after-end position for the row - master_reclength - Pointer to variable that will be set to the length of the - record on the master side - rw_set Pointer to bitmap that holds either the read_set or the - write_set of the table - - DESCRIPTION - - The function will always unpack into the table->record[0] - record. This is because there are too many dependencies on - where the various member functions of Field and subclasses - expect to write. - - The row is assumed to only consist of the fields for which the - bitset represented by 'arr' and 'bits'; the other parts of the - record are left alone. - - At most 'colcnt' columns are read: if the table is larger than - that, the remaining fields are not filled in. - - RETURN VALUE - - Error code, or zero if no error. The following error codes can - be returned: - - ER_NO_DEFAULT_FOR_FIELD - Returned if one of the fields existing on the slave but not on - the master does not have a default value (and isn't nullable) - */ -static int -unpack_row(RELAY_LOG_INFO *rli, - TABLE *table, uint const colcnt, - char const *row, MY_BITMAP const *cols, - char const **row_end, ulong *master_reclength, - MY_BITMAP* const rw_set, Log_event_type const event_type) -{ - byte *const record= table->record[0]; - DBUG_ENTER("unpack_row"); - DBUG_ASSERT(record && row); - DBUG_PRINT("enter", ("row: 0x%lx table->record[0]: 0x%lx", (long) row, (long) record)); - my_size_t master_null_bytes= table->s->null_bytes; - - if (colcnt != table->s->fields) - { - Field **fptr= &table->field[colcnt-1]; - do - master_null_bytes= (*fptr)->last_null_byte(); - while (master_null_bytes == Field::LAST_NULL_BYTE_UNDEF && - fptr-- > table->field); - - /* - If master_null_bytes is LAST_NULL_BYTE_UNDEF (0) at this time, - there were no nullable fields nor BIT fields at all in the - columns that are common to the master and the slave. In that - case, there is only one null byte holding the X bit. - - OBSERVE! There might still be nullable columns following the - common columns, so table->s->null_bytes might be greater than 1. - */ - if (master_null_bytes == Field::LAST_NULL_BYTE_UNDEF) - master_null_bytes= 1; - } - - DBUG_ASSERT(master_null_bytes <= table->s->null_bytes); - memcpy(record, row, master_null_bytes); // [1] - int error= 0; - - bitmap_set_all(rw_set); - - Field **const begin_ptr = table->field; - Field **field_ptr; - char const *ptr= row + master_null_bytes; - Field **const end_ptr= begin_ptr + colcnt; - for (field_ptr= begin_ptr ; field_ptr < end_ptr ; ++field_ptr) - { - Field *const f= *field_ptr; - - if (bitmap_is_set(cols, field_ptr - begin_ptr)) - { - DBUG_ASSERT((const char *)table->record[0] <= f->ptr); - DBUG_ASSERT(f->ptr < (char*)(table->record[0] + table->s->reclength + - (f->pack_length_in_rec() == 0))); - - DBUG_PRINT("info", ("unpacking column '%s' to 0x%lx", f->field_name, - (long) f->ptr)); - ptr= f->unpack(f->ptr, ptr); - /* Field...::unpack() cannot return 0 */ - DBUG_ASSERT(ptr != NULL); - } - else - bitmap_clear_bit(rw_set, field_ptr - begin_ptr); - } - - *row_end = ptr; - if (master_reclength) - { - if (*field_ptr) - *master_reclength = (*field_ptr)->ptr - (char*) table->record[0]; - else - *master_reclength = table->s->reclength; - } - - /* - Set properties for remaining columns, if there are any. We let the - corresponding bit in the write_set be set, to write the value if - it was not there already. We iterate over all remaining columns, - even if there were an error, to get as many error messages as - possible. We are still able to return a pointer to the next row, - so redo that. - - This generation of error messages is only relevant when inserting - new rows. - */ - for ( ; *field_ptr ; ++field_ptr) - { - uint32 const mask= NOT_NULL_FLAG | NO_DEFAULT_VALUE_FLAG; - Field *const f= *field_ptr; - - DBUG_PRINT("info", ("processing column '%s' @ 0x%lx", f->field_name, - (long) f->ptr)); - if (event_type == WRITE_ROWS_EVENT && (f->flags & mask) == mask) - { - slave_print_msg(ERROR_LEVEL, rli, ER_NO_DEFAULT_FOR_FIELD, - "Field `%s` of table `%s`.`%s` " - "has no default value and cannot be NULL", - (*field_ptr)->field_name, table->s->db.str, - table->s->table_name.str); - error = ER_NO_DEFAULT_FOR_FIELD; - } - else - f->set_default(); - } - - DBUG_RETURN(error); -} - -int Rows_log_event::exec_event(st_relay_log_info *rli) +int Rows_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { - DBUG_ENTER("Rows_log_event::exec_event(st_relay_log_info*)"); + DBUG_ENTER("Rows_log_event::do_apply_event(st_relay_log_info*)"); int error= 0; char const *row_start= (char const *)m_rows_buf; /* - If m_table_id == ~0UL, then we have a dummy event that does - not contain any data. In that case, we just remove all tables in - the tables_to_lock list, close the thread tables, step the relay - log position, and return with success. + If m_table_id == ~0UL, then we have a dummy event that does not + contain any data. In that case, we just remove all tables in the + tables_to_lock list, close the thread tables, and return with + success. */ if (m_table_id == ~0UL) { @@ -5750,16 +5831,16 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) */ DBUG_ASSERT(get_flags(STMT_END_F)); - rli->clear_tables_to_lock(); + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); close_thread_tables(thd); thd->clear_error(); - rli->inc_event_relay_log_pos(); DBUG_RETURN(0); } /* 'thd' has been set by exec_relay_log_event(), just before calling - exec_event(). We still check here to prevent future coding errors. + do_apply_event(). We still check here to prevent future coding + errors. */ DBUG_ASSERT(rli->sql_thd == thd); @@ -5775,8 +5856,9 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) /* lock_tables() reads the contents of thd->lex, so they must be - initialized. Contrary to in Table_map_log_event::exec_event() we don't - call mysql_init_query() as that may reset the binlog format. + initialized. Contrary to in + Table_map_log_event::do_apply_event() we don't call + mysql_init_query() as that may reset the binlog format. */ lex_start(thd, NULL, 0); @@ -5805,7 +5887,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) "Error in %s event: when locking tables", get_type_str()); } - rli->clear_tables_to_lock(); + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); DBUG_RETURN(error); } @@ -5826,7 +5908,8 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) TABLE_LIST *tables= rli->tables_to_lock; close_tables_for_reopen(thd, &tables); - if ((error= open_tables(thd, &tables, &rli->tables_to_lock_count, 0))) + uint tables_count= rli->tables_to_lock_count; + if ((error= open_tables(thd, &tables, &tables_count, 0))) { if (thd->query_error || thd->is_fatal_error) { @@ -5841,7 +5924,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) "unexpected success or fatal error")); thd->query_error= 1; } - rli->clear_tables_to_lock(); + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); DBUG_RETURN(error); } } @@ -5863,7 +5946,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) mysql_unlock_tables(thd, thd->lock); thd->lock= 0; thd->query_error= 1; - rli->clear_tables_to_lock(); + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); DBUG_RETURN(ERR_BAD_TABLE_DEF); } } @@ -5885,24 +5968,24 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) */ for (TABLE_LIST *ptr= rli->tables_to_lock ; ptr ; ptr= ptr->next_global) { - rli->m_table_map.set_table(ptr->table_id, ptr->table); + const_cast<RELAY_LOG_INFO*>(rli)->m_table_map.set_table(ptr->table_id, ptr->table); } #ifdef HAVE_QUERY_CACHE query_cache.invalidate_locked_for_write(rli->tables_to_lock); #endif - rli->clear_tables_to_lock(); + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); } DBUG_ASSERT(rli->tables_to_lock == NULL && rli->tables_to_lock_count == 0); - TABLE* table= rli->m_table_map.get_table(m_table_id); + TABLE* table= const_cast<RELAY_LOG_INFO*>(rli)->m_table_map.get_table(m_table_id); if (table) { /* table == NULL means that this table should not be replicated - (this was set up by Table_map_log_event::exec_event() which - tested replicate-* rules). + (this was set up by Table_map_log_event::do_apply_event() + which tested replicate-* rules). */ /* @@ -5931,6 +6014,17 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) /* A small test to verify that objects have consistent types */ DBUG_ASSERT(sizeof(thd->options) == sizeof(OPTION_RELAXED_UNIQUE_CHECKS)); + /* + Now we are in a statement and will stay in a statement until we + see a STMT_END_F. + + We set this flag here, before actually applying any rows, in + case the SQL thread is stopped and we need to detect that we're + inside a statement and halting abruptly might cause problems + when restarting. + */ + const_cast<RELAY_LOG_INFO*>(rli)->set_flag(RELAY_LOG_INFO::IN_STMT); + error= do_before_row_operations(table); while (error == 0 && row_start < (const char*) m_rows_end) { @@ -5959,7 +6053,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) break; default: - slave_print_msg(ERROR_LEVEL, rli, error, + slave_print_msg(ERROR_LEVEL, rli, thd->net.last_errno, "Error in %s event: row application failed", get_type_str()); thd->query_error= 1; @@ -5969,7 +6063,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) row_start= row_end; } DBUG_EXECUTE_IF("STOP_SLAVE_after_first_Rows_event", - rli->abort_slave=1;); + const_cast<RELAY_LOG_INFO*>(rli)->abort_slave= 1;); error= do_after_row_operations(table, error); if (!cache_stmt) { @@ -5980,11 +6074,12 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) if (error) { /* error has occured during the transaction */ - slave_print_msg(ERROR_LEVEL, rli, error, + slave_print_msg(ERROR_LEVEL, rli, thd->net.last_errno, "Error in %s event: error during transaction execution " "on table %s.%s", get_type_str(), table->s->db.str, table->s->table_name.str); + /* If one day we honour --skip-slave-errors in row-based replication, and the error should be skipped, then we would clear mappings, rollback, @@ -5997,11 +6092,50 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) rollback at the caller along with sbr. */ thd->reset_current_stmt_binlog_row_based(); - rli->cleanup_context(thd, 0); /* rollback at caller in step with sbr */ + const_cast<RELAY_LOG_INFO*>(rli)->cleanup_context(thd, error); thd->query_error= 1; DBUG_RETURN(error); } + /* + This code would ideally be placed in do_update_pos() instead, but + since we have no access to table there, we do the setting of + last_event_start_time here instead. + */ + if (table && (table->s->primary_key == MAX_KEY) && + !cache_stmt && get_flags(STMT_END_F) == RLE_NO_FLAGS) + { + /* + ------------ Temporary fix until WL#2975 is implemented --------- + + This event is not the last one (no STMT_END_F). If we stop now + (in case of terminate_slave_thread()), how will we restart? We + have to restart from Table_map_log_event, but as this table is + not transactional, the rows already inserted will still be + present, and idempotency is not guaranteed (no PK) so we risk + that repeating leads to double insert. So we desperately try to + continue, hope we'll eventually leave this buggy situation (by + executing the final Rows_log_event). If we are in a hopeless + wait (reached end of last relay log and nothing gets appended + there), we timeout after one minute, and notify DBA about the + problem. When WL#2975 is implemented, just remove the member + st_relay_log_info::last_event_start_time and all its occurences. + */ + const_cast<RELAY_LOG_INFO*>(rli)->last_event_start_time= time(0); + } + + DBUG_RETURN(0); +} + +int +Rows_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ + DBUG_ENTER("Rows_log_event::do_update_pos"); + int error= 0; + + DBUG_PRINT("info", ("flags: %s", + get_flags(STMT_END_F) ? "STMT_END_F " : "")); + if (get_flags(STMT_END_F)) { /* @@ -6020,6 +6154,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) replicate-ignore rules). */ thd->binlog_flush_pending_rows_event(true); + /* If this event is not in a transaction, the call below will, if some transactional storage engines are involved, commit the statement into @@ -6030,6 +6165,7 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) binlog. */ error= ha_autocommit_or_rollback(thd, 0); + /* Now what if this is not a transactional engine? we still need to flush the pending event to the binlog; we did it with @@ -6042,11 +6178,16 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) thd->reset_current_stmt_binlog_row_based(); rli->cleanup_context(thd, 0); - rli->transaction_end(thd); - if (error == 0) { /* + Indicate that a statement is finished. + Step the group log position if we are not in a transaction, + otherwise increase the event log position. + */ + rli->stmt_done(log_pos, when); + + /* Clear any errors pushed in thd->net.last_err* if for example "no key found" (as this is allowed). This is a safety measure; apparently those errors (e.g. when executing a Delete_rows_log_event of a @@ -6055,44 +6196,20 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) do not become visible. We still prefer to wipe them out. */ thd->clear_error(); - error= Log_event::exec_event(rli); } else slave_print_msg(ERROR_LEVEL, rli, error, - "Error in %s event: commit of row events failed, " - "table `%s`.`%s`", - get_type_str(), table->s->db.str, - table->s->table_name.str); - DBUG_RETURN(error); + "Error in %s event: commit of row events failed", + get_type_str()); } - - if (table && (table->s->primary_key == MAX_KEY) && !cache_stmt) + else { - /* - ------------ Temporary fix until WL#2975 is implemented --------- - - This event is not the last one (no STMT_END_F). If we stop now - (in case of terminate_slave_thread()), how will we restart? We - have to restart from Table_map_log_event, but as this table is - not transactional, the rows already inserted will still be - present, and idempotency is not guaranteed (no PK) so we risk - that repeating leads to double insert. So we desperately try to - continue, hope we'll eventually leave this buggy situation (by - executing the final Rows_log_event). If we are in a hopeless - wait (reached end of last relay log and nothing gets appended - there), we timeout after one minute, and notify DBA about the - problem. When WL#2975 is implemented, just remove the member - st_relay_log_info::unsafe_to_stop_at and all its occurences. - */ - rli->unsafe_to_stop_at= time(0); + rli->inc_event_relay_log_pos(); } - DBUG_ASSERT(error == 0); - thd->clear_error(); - rli->inc_event_relay_log_pos(); - - DBUG_RETURN(0); + DBUG_RETURN(error); } + #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ #ifndef MYSQL_CLIENT @@ -6119,15 +6236,35 @@ bool Rows_log_event::write_data_body(IO_CACHE*file) */ char sbuf[sizeof(m_width)]; my_ptrdiff_t const data_size= m_rows_cur - m_rows_buf; + bool res= false; char *const sbuf_end= net_store_length((char*) sbuf, (uint) m_width); DBUG_ASSERT(static_cast<my_size_t>(sbuf_end - sbuf) <= sizeof(sbuf)); - return (my_b_safe_write(file, reinterpret_cast<byte*>(sbuf), - sbuf_end - sbuf) || - my_b_safe_write(file, reinterpret_cast<byte*>(m_cols.bitmap), - no_bytes_in_map(&m_cols)) || - my_b_safe_write(file, m_rows_buf, (uint) data_size)); + DBUG_DUMP("m_width", sbuf, sbuf_end - sbuf); + res= res || my_b_safe_write(file, + reinterpret_cast<byte*>(sbuf), + sbuf_end - sbuf); + + DBUG_DUMP("m_cols", (char*) m_cols.bitmap, no_bytes_in_map(&m_cols)); + res= res || my_b_safe_write(file, + reinterpret_cast<byte*>(m_cols.bitmap), + no_bytes_in_map(&m_cols)); + /* + TODO[refactor write]: Remove the "down cast" here (and elsewhere). + */ + if (get_type_code() == UPDATE_ROWS_EVENT) + { + DBUG_DUMP("m_cols_ai", (char*) m_cols_ai.bitmap, no_bytes_in_map(&m_cols_ai)); + res= res || my_b_safe_write(file, + reinterpret_cast<byte*>(m_cols_ai.bitmap), + no_bytes_in_map(&m_cols_ai)); + } + DBUG_DUMP("rows", m_rows_buf, data_size); + res= res || my_b_safe_write(file, m_rows_buf, (uint) data_size); + + return res; + } #endif @@ -6154,16 +6291,16 @@ void Rows_log_event::print_helper(FILE *file, { bool const last_stmt_event= get_flags(STMT_END_F); print_header(head, print_event_info, !last_stmt_event); - my_b_printf(head, "\t%s: table id %lu\n", name, m_table_id); + my_b_printf(head, "\t%s: table id %lu%s\n", + name, m_table_id, + last_stmt_event ? " flags: STMT_END_F" : ""); print_base64(body, print_event_info, !last_stmt_event); } if (get_flags(STMT_END_F)) { - my_b_copy_to_file(head, file); - my_b_copy_to_file(body, file); - reinit_io_cache(head, WRITE_CACHE, 0, FALSE, TRUE); - reinit_io_cache(body, WRITE_CACHE, 0, FALSE, TRUE); + copy_event_cache_to_file_and_reinit(head, file); + copy_event_cache_to_file_and_reinit(body, file); } } #endif @@ -6272,15 +6409,15 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len, const char *const vpart= buf + common_header_len + post_header_len; /* Extract the length of the various parts from the buffer */ - byte const* const ptr_dblen= (byte const*)vpart + 0; + byte const *const ptr_dblen= (byte const*)vpart + 0; m_dblen= *(uchar*) ptr_dblen; /* Length of database name + counter + terminating null */ - byte const* const ptr_tbllen= ptr_dblen + m_dblen + 2; + byte const *const ptr_tbllen= ptr_dblen + m_dblen + 2; m_tbllen= *(uchar*) ptr_tbllen; /* Length of table name + counter + terminating null */ - byte const* const ptr_colcnt= ptr_tbllen + m_tbllen + 2; + byte const *const ptr_colcnt= ptr_tbllen + m_tbllen + 2; uchar *ptr_after_colcnt= (uchar*) ptr_colcnt; m_colcnt= net_field_length(&ptr_after_colcnt); @@ -6325,9 +6462,9 @@ Table_map_log_event::~Table_map_log_event() */ #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) -int Table_map_log_event::exec_event(st_relay_log_info *rli) +int Table_map_log_event::do_apply_event(RELAY_LOG_INFO const *rli) { - DBUG_ENTER("Table_map_log_event::exec_event(st_relay_log_info*)"); + DBUG_ENTER("Table_map_log_event::do_apply_event(st_relay_log_info*)"); DBUG_ASSERT(rli->sql_thd == thd); @@ -6450,29 +6587,24 @@ int Table_map_log_event::exec_event(st_relay_log_info *rli) locked by linking the table into the list of tables to lock. */ table_list->next_global= table_list->next_local= rli->tables_to_lock; - rli->tables_to_lock= table_list; - rli->tables_to_lock_count++; + const_cast<RELAY_LOG_INFO*>(rli)->tables_to_lock= table_list; + const_cast<RELAY_LOG_INFO*>(rli)->tables_to_lock_count++; /* 'memory' is freed in clear_tables_to_lock */ } - /* - We explicitly do not call Log_event::exec_event() here since we do not - want the relay log position to be flushed to disk. The flushing will be - done by the last Rows_log_event that either ends a statement (outside a - transaction) or a transaction. - - A table map event can *never* end a transaction or a statement, so we - just step the relay log position. - */ - - if (likely(!error)) - rli->inc_event_relay_log_pos(); DBUG_RETURN(error); err: my_free((gptr) memory, MYF(MY_WME)); DBUG_RETURN(error); } + +int Table_map_log_event::do_update_pos(RELAY_LOG_INFO *rli) +{ + rli->inc_event_relay_log_pos(); + return 0; +} + #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ #ifndef MYSQL_CLIENT @@ -6637,7 +6769,7 @@ int Write_rows_log_event::do_after_row_operations(TABLE *table, int error) return error; } -int Write_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli, +int Write_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO const *rli, TABLE *table, char const *const row_start, char const **const row_end) @@ -6778,6 +6910,42 @@ copy_extra_record_fields(TABLE *table, return 0; // All OK } +#define DBUG_PRINT_BITSET(N,FRM,BS) \ + do { \ + char buf[256]; \ + for (uint i = 0 ; i < (BS)->n_bits ; ++i) \ + buf[i] = bitmap_is_set((BS), i) ? '1' : '0'; \ + buf[(BS)->n_bits] = '\0'; \ + DBUG_PRINT((N), ((FRM), buf)); \ + } while (0) + + +/** + Check if an error is a duplicate key error. + + This function is used to check if an error code is one of the + duplicate key error, i.e., and error code for which it is sensible + to do a <code>get_dup_key()</code> to retrieve the duplicate key. + + @param errcode The error code to check. + + @return <code>true</code> if the error code is such that + <code>get_dup_key()</code> will return true, <code>false</code> + otherwise. + */ +bool +is_duplicate_key_error(int errcode) +{ + switch (errcode) + { + case HA_ERR_FOUND_DUPP_KEY: + case HA_ERR_FOUND_DUPP_UNIQUE: + return true; + } + return false; +} + + /* Replace the provided record in the database. @@ -6810,6 +6978,12 @@ replace_record(THD *thd, TABLE *table, int keynum; auto_afree_ptr<char> key(NULL); +#ifndef DBUG_OFF + DBUG_DUMP("record[0]", table->record[0], table->s->reclength); + DBUG_PRINT_BITSET("debug", "write_set = %s", table->write_set); + DBUG_PRINT_BITSET("debug", "read_set = %s", table->read_set); +#endif + while ((error= table->file->ha_write_row(table->record[0]))) { if (error == HA_ERR_LOCK_DEADLOCK || error == HA_ERR_LOCK_WAIT_TIMEOUT) @@ -6820,7 +6994,7 @@ replace_record(THD *thd, TABLE *table, if ((keynum= table->file->get_dup_key(error)) < 0) { /* We failed to retrieve the duplicate key */ - DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY); + DBUG_RETURN(error); } /* @@ -6837,7 +7011,10 @@ replace_record(THD *thd, TABLE *table, { error= table->file->rnd_pos(table->record[1], table->file->dup_ref); if (error) + { + table->file->print_error(error, MYF(0)); DBUG_RETURN(error); + } } else { @@ -6854,12 +7031,15 @@ replace_record(THD *thd, TABLE *table, } key_copy((byte*)key.get(), table->record[0], table->key_info + keynum, 0); - error= table->file->index_read_idx(table->record[1], keynum, + error= table->file->index_read_idx(table->record[1], keynum, (const byte*)key.get(), - table->key_info[keynum].key_length, + HA_WHOLE_KEY, HA_READ_KEY_EXACT); if (error) + { + table->file->print_error(error, MYF(0)); DBUG_RETURN(error); + } } /* @@ -6892,15 +7072,21 @@ replace_record(THD *thd, TABLE *table, { error=table->file->ha_update_row(table->record[1], table->record[0]); + if (error) + table->file->print_error(error, MYF(0)); DBUG_RETURN(error); } else { if ((error= table->file->ha_delete_row(table->record[1]))) + { + table->file->print_error(error, MYF(0)); DBUG_RETURN(error); + } /* Will retry ha_write_row() with the offending row removed. */ } } + DBUG_RETURN(error); } @@ -6931,20 +7117,75 @@ void Write_rows_log_event::print(FILE *file, PRINT_EVENT_INFO* print_event_info) */ static bool record_compare(TABLE *table) { + /* + Need to set the X bit and the filler bits in both records since + there are engines that do not set it correctly. + + In addition, since MyISAM checks that one hasn't tampered with the + record, it is necessary to restore the old bytes into the record + after doing the comparison. + + TODO[record format ndb]: Remove it once NDB returns correct + records. Check that the other engines also return correct records. + */ + + bool result= FALSE; + byte saved_x[2], saved_filler[2]; + + if (table->s->null_bytes > 0) + { + for (int i = 0 ; i < 2 ; ++i) + { + saved_x[i]= table->record[i][0]; + saved_filler[i]= table->record[i][table->s->null_bytes - 1]; + table->record[i][0]|= 1U; + table->record[i][table->s->null_bytes - 1]|= + 256U - (1U << table->s->last_null_bit_pos); + } + } + if (table->s->blob_fields + table->s->varchar_fields == 0) - return cmp_record(table,record[1]); + { + result= cmp_record(table,record[1]); + goto record_compare_exit; + } + /* Compare null bits */ if (memcmp(table->null_flags, table->null_flags+table->s->rec_buff_length, table->s->null_bytes)) - return TRUE; // Diff in NULL value + { + result= TRUE; // Diff in NULL value + goto record_compare_exit; + } + /* Compare updated fields */ for (Field **ptr=table->field ; *ptr ; ptr++) { if ((*ptr)->cmp_binary_offset(table->s->rec_buff_length)) - return TRUE; + { + result= TRUE; + goto record_compare_exit; + } } - return FALSE; + +record_compare_exit: + /* + Restore the saved bytes. + + TODO[record format ndb]: Remove this code once NDB returns the + correct record format. + */ + if (table->s->null_bytes > 0) + { + for (int i = 0 ; i < 2 ; ++i) + { + table->record[i][0]= saved_x[i]; + table->record[i][table->s->null_bytes - 1]= saved_filler[i]; + } + } + + return result; } @@ -6980,6 +7221,8 @@ static int find_and_fetch_row(TABLE *table, byte *key) DBUG_ASSERT(table->in_use != NULL); + DBUG_DUMP("record[0]", table->record[0], table->s->reclength); + if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) && table->s->primary_key < MAX_KEY) { @@ -7040,8 +7283,7 @@ static int find_and_fetch_row(TABLE *table, byte *key) my_ptrdiff_t const pos= table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0; table->record[1][pos]= 0xFF; - if ((error= table->file->index_read(table->record[1], key, - table->key_info->key_length, + if ((error= table->file->index_read(table->record[1], key, HA_WHOLE_KEY, HA_READ_KEY_EXACT))) { table->file->print_error(error, MYF(0)); @@ -7080,15 +7322,22 @@ static int find_and_fetch_row(TABLE *table, byte *key) while (record_compare(table)) { int error; + /* We need to set the null bytes to ensure that the filler bit are all set when returning. There are storage engines that just set the necessary bits on the bytes and don't set the filler bits correctly. + + TODO[record format ndb]: Remove this code once NDB returns the + correct record format. */ - my_ptrdiff_t const pos= - table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0; - table->record[1][pos]= 0xFF; + if (table->s->null_bytes > 0) + { + table->record[1][table->s->null_bytes - 1]|= + 256U - (1U << table->s->last_null_bit_pos); + } + if ((error= table->file->index_next(table->record[1]))) { table->file->print_error(error, MYF(0)); @@ -7114,17 +7363,11 @@ static int find_and_fetch_row(TABLE *table, byte *key) /* Continue until we find the right record or have made a full loop */ do { - /* - We need to set the null bytes to ensure that the filler bit - are all set when returning. There are storage engines that - just set the necessary bits on the bytes and don't set the - filler bits correctly. - */ - my_ptrdiff_t const pos= - table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0; - table->record[1][pos]= 0xFF; error= table->file->rnd_next(table->record[1]); + DBUG_DUMP("record[0]", table->record[0], table->s->reclength); + DBUG_DUMP("record[1]", table->record[1], table->s->reclength); + switch (error) { case 0: @@ -7138,6 +7381,7 @@ static int find_and_fetch_row(TABLE *table, byte *key) default: table->file->print_error(error, MYF(0)); + DBUG_PRINT("info", ("Record not found")); table->file->ha_rnd_end(); DBUG_RETURN(error); } @@ -7147,6 +7391,7 @@ static int find_and_fetch_row(TABLE *table, byte *key) /* Have to restart the scan to be able to fetch the next row. */ + DBUG_PRINT("info", ("Record %sfound", restart_count == 2 ? "not " : "")); table->file->ha_rnd_end(); DBUG_ASSERT(error == HA_ERR_END_OF_FILE || error == 0); @@ -7239,7 +7484,7 @@ int Delete_rows_log_event::do_after_row_operations(TABLE *table, int error) return error; } -int Delete_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli, +int Delete_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO const *rli, TABLE *table, char const *const row_start, char const **const row_end) @@ -7305,16 +7550,55 @@ void Delete_rows_log_event::print(FILE *file, */ #if !defined(MYSQL_CLIENT) Update_rows_log_event::Update_rows_log_event(THD *thd_arg, TABLE *tbl_arg, - ulong tid, MY_BITMAP const *cols, + ulong tid, + MY_BITMAP const *cols_bi, + MY_BITMAP const *cols_ai, + bool is_transactional) +: Rows_log_event(thd_arg, tbl_arg, tid, cols_bi, is_transactional) +#ifdef HAVE_REPLICATION + , m_memory(NULL), m_key(NULL) + +#endif +{ + init(cols_ai); +} + +Update_rows_log_event::Update_rows_log_event(THD *thd_arg, TABLE *tbl_arg, + ulong tid, + MY_BITMAP const *cols, bool is_transactional) : Rows_log_event(thd_arg, tbl_arg, tid, cols, is_transactional) #ifdef HAVE_REPLICATION , m_memory(NULL), m_key(NULL) #endif { + init(cols); +} + +void Update_rows_log_event::init(MY_BITMAP const *cols) +{ + /* if bitmap_init fails, catched in is_valid() */ + if (likely(!bitmap_init(&m_cols_ai, + m_width <= sizeof(m_bitbuf_ai)*8 ? m_bitbuf_ai : NULL, + (m_width + 7) & ~7UL, + false))) + { + /* Cols can be zero if this is a dummy binrows event */ + if (likely(cols != NULL)) + memcpy(m_cols_ai.bitmap, cols->bitmap, no_bytes_in_map(cols)); + } } #endif /* !defined(MYSQL_CLIENT) */ + +Update_rows_log_event::~Update_rows_log_event() +{ + if (m_cols_ai.bitmap == m_bitbuf_ai) // no my_malloc happened + m_cols_ai.bitmap= 0; // so no my_free in bitmap_free + bitmap_free(&m_cols_ai); // To pair with bitmap_init(). +} + + /* Constructor used by slave to read the event from the binary log. */ @@ -7374,7 +7658,7 @@ int Update_rows_log_event::do_after_row_operations(TABLE *table, int error) return error; } -int Update_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli, +int Update_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO const *rli, TABLE *table, char const *const row_start, char const **const row_end) @@ -7399,7 +7683,7 @@ int Update_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli, store_record(table, record[1]); char const *next_start = *row_end; /* m_after_image is the after image for the update */ - error= unpack_row(rli, table, m_width, next_start, &m_cols, row_end, + error= unpack_row(rli, table, m_width, next_start, &m_cols_ai, row_end, &m_master_reclength, table->write_set, UPDATE_ROWS_EVENT); bmove_align(m_after_image, table->record[0], table->s->reclength); restore_record(table, record[1]); @@ -7471,3 +7755,113 @@ void Update_rows_log_event::print(FILE *file, } #endif + +Incident_log_event::Incident_log_event(const char *buf, uint event_len, + const Format_description_log_event *descr_event) + : Log_event(buf, descr_event) +{ + DBUG_ENTER("Incident_log_event::Incident_log_event"); + uint8 const common_header_len= + descr_event->common_header_len; + uint8 const post_header_len= + descr_event->post_header_len[INCIDENT_EVENT-1]; + + DBUG_PRINT("info",("event_len: %u; common_header_len: %d; post_header_len: %d", + event_len, common_header_len, post_header_len)); + + m_incident= static_cast<Incident>(uint2korr(buf + common_header_len)); + char const *ptr= buf + common_header_len + post_header_len; + char const *const str_end= buf + event_len; + uint8 len= 0; // Assignment to keep compiler happy + const char *str= NULL; // Assignment to keep compiler happy + read_str(&ptr, str_end, &str, &len); + m_message.str= const_cast<char*>(str); + m_message.length= len; + DBUG_PRINT("info", ("m_incident: %d", m_incident)); + DBUG_VOID_RETURN; +} + + +Incident_log_event::~Incident_log_event() +{ +} + + +const char * +Incident_log_event::description() const +{ + static const char *const description[]= { + "NOTHING", // Not used + "LOST_EVENTS" + }; + + DBUG_PRINT("info", ("m_incident: %d", m_incident)); + + DBUG_ASSERT(0 <= m_incident); + DBUG_ASSERT((my_size_t) m_incident <= sizeof(description)/sizeof(*description)); + + return description[m_incident]; +} + + +#ifndef MYSQL_CLIENT +void Incident_log_event::pack_info(Protocol *protocol) +{ + char buf[256]; + my_size_t bytes; + if (m_message.length > 0) + bytes= my_snprintf(buf, sizeof(buf), "#%d (%s)", + m_incident, description()); + else + bytes= my_snprintf(buf, sizeof(buf), "#%d (%s): %s", + m_incident, description(), m_message.str); + protocol->store(buf, bytes, &my_charset_bin); +} +#endif + + +#ifdef MYSQL_CLIENT +void +Incident_log_event::print(FILE *file, + PRINT_EVENT_INFO *print_event_info) +{ + if (print_event_info->short_form) + return; + + Write_on_release_cache cache(&print_event_info->head_cache, file); + print_header(&cache, print_event_info, FALSE); + my_b_printf(&cache, "\n# Incident: %s", description()); +} +#endif + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +int +Incident_log_event::do_apply_event(RELAY_LOG_INFO const *rli) +{ + DBUG_ENTER("Incident_log_event::do_apply_event"); + slave_print_msg(ERROR_LEVEL, rli, ER_SLAVE_INCIDENT, + ER(ER_SLAVE_INCIDENT), + description(), + m_message.length > 0 ? m_message.str : "<none>"); + DBUG_RETURN(1); +} +#endif + +bool +Incident_log_event::write_data_header(IO_CACHE *file) +{ + DBUG_ENTER("Incident_log_event::write_data_header"); + DBUG_PRINT("enter", ("m_incident: %d", m_incident)); + byte buf[sizeof(int16)]; + int2store(buf, (int16) m_incident); + DBUG_RETURN(my_b_safe_write(file, buf, sizeof(buf))); +} + +bool +Incident_log_event::write_data_body(IO_CACHE *file) +{ + DBUG_ENTER("Incident_log_event::write_data_body"); + DBUG_RETURN(write_str(file, m_message.str, m_message.length)); +} + + diff --git a/sql/log_event.h b/sql/log_event.h index 7cbe8925d9a..4e43822cb38 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -22,6 +22,7 @@ #endif #include <my_bitmap.h> +#include "rpl_constants.h" #define LOG_READ_EOF -1 #define LOG_READ_BOGUS -2 @@ -198,7 +199,7 @@ struct sql_ex_info #define TABLE_MAP_HEADER_LEN 8 #define EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN (4 + 4 + 4 + 1) #define EXECUTE_LOAD_QUERY_HEADER_LEN (QUERY_HEADER_LEN + EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN) - +#define INCIDENT_HEADER_LEN 2 /* Max number of possible extra bytes in a replication event compared to a packet (i.e. a query) sent from client to master; @@ -468,10 +469,28 @@ enum Log_event_type XID_EVENT= 16, BEGIN_LOAD_QUERY_EVENT= 17, EXECUTE_LOAD_QUERY_EVENT= 18, + TABLE_MAP_EVENT = 19, - WRITE_ROWS_EVENT = 20, - UPDATE_ROWS_EVENT = 21, - DELETE_ROWS_EVENT = 22, + + /* + These event numbers were used for 5.1.0 to 5.1.15 and are + therefore obsolete. + */ + PRE_GA_WRITE_ROWS_EVENT = 20, + PRE_GA_UPDATE_ROWS_EVENT = 21, + PRE_GA_DELETE_ROWS_EVENT = 22, + + /* + These event numbers are used from 5.1.16 and forward + */ + WRITE_ROWS_EVENT = 23, + UPDATE_ROWS_EVENT = 24, + DELETE_ROWS_EVENT = 25, + + /* + Something out of the ordinary happened on the master + */ + INCIDENT_EVENT= 26, /* Add new events here - right above this comment! @@ -503,6 +522,7 @@ class THD; class Format_description_log_event; struct st_relay_log_info; +typedef st_relay_log_info RELAY_LOG_INFO; #ifdef MYSQL_CLIENT /* @@ -591,6 +611,33 @@ typedef struct st_print_event_info class Log_event { public: + /** + Enumeration of what kinds of skipping (and non-skipping) that can + occur when the slave executes an event. + + @see shall_skip + @see do_shall_skip + */ + enum enum_skip_reason { + /** + Don't skip event. + */ + EVENT_SKIP_NOT, + + /** + Skip event by ignoring it. + + This means that the slave skip counter will not be changed. + */ + EVENT_SKIP_IGNORE, + + /** + Skip event and decrease skip counter. + */ + EVENT_SKIP_COUNT + }; + + /* The following type definition is to be used whenever data is placed and manipulated in a common buffer. Use this typedef for buffers @@ -672,16 +719,14 @@ public: static void init_show_field_list(List<Item>* field_list); #ifdef HAVE_REPLICATION int net_send(Protocol *protocol, const char* log_name, my_off_t pos); + /* pack_info() is used by SHOW BINLOG EVENTS; as print() it prepares and sends a string to display to the user, so it resembles print(). */ + virtual void pack_info(Protocol *protocol); - /* - The SQL slave thread calls exec_event() to execute the event; this is where - the slave's data is modified. - */ - virtual int exec_event(struct st_relay_log_info* rli); + #endif /* HAVE_REPLICATION */ virtual const char* get_db() { @@ -754,6 +799,128 @@ public: *description_event); /* returns the human readable name of the event's type */ const char* get_type_str(); + +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) +public: + + /** + Apply the event to the database. + + This function represents the public interface for applying an + event. + + @see do_apply_event + */ + int apply_event(RELAY_LOG_INFO const *rli) { + return do_apply_event(rli); + } + + + /** + Update the relay log position. + + This function represents the public interface for "stepping over" + the event and will update the relay log information. + + @see do_update_pos + */ + int update_pos(RELAY_LOG_INFO *rli) + { + return do_update_pos(rli); + } + + /** + Decide if the event shall be skipped, and the reason for skipping + it. + + @see do_shall_skip + */ + enum_skip_reason shall_skip(RELAY_LOG_INFO *rli) + { + return do_shall_skip(rli); + } + +protected: + + /** + Primitive to apply an event to the database. + + This is where the change to the database is made. + + @note The primitive is protected instead of private, since there + is a hierarchy of actions to be performed in some cases. + + @see Format_description_log_event::do_apply_event() + + @param rli Pointer to relay log info structure + + @retval 0 Event applied successfully + @retval errno Error code if event application failed + */ + virtual int do_apply_event(RELAY_LOG_INFO const *rli) + { + return 0; /* Default implementation does nothing */ + } + + + /** + Advance relay log coordinates. + + This function is called to advance the relay log coordinates to + just after the event. It is essential that both the relay log + coordinate and the group log position is updated correctly, since + this function is used also for skipping events. + + Normally, each implementation of do_update_pos() shall: + + - Update the event position to refer to the position just after + the event. + + - Update the group log position to refer to the position just + after the event <em>if the event is last in a group</em> + + @param rli Pointer to relay log info structure + + @retval 0 Coordinates changed successfully + @retval errno Error code if advancing failed (usually just + 1). Observe that handler errors are returned by the + do_apply_event() function, and not by this one. + */ + virtual int do_update_pos(RELAY_LOG_INFO *rli); + + + /** + Decide if this event shall be skipped or not and the reason for + skipping it. + + The default implementation decide that the event shall be skipped + if either: + + - the server id of the event is the same as the server id of the + server and <code>rli->replicate_same_server_id</code> is true, + or + + - if <code>rli->slave_skip_counter</code> is greater than zero. + + @see do_apply_event + @see do_update_pos + + @retval Log_event::EVENT_SKIP_NOT + The event shall not be skipped and should be applied. + + @retval Log_event::EVENT_SKIP_IGNORE + The event shall be skipped by just ignoring it, i.e., the slave + skip counter shall not be changed. This happends if, for example, + the originating server id of the event is the same as the server + id of the slave. + + @retval Log_event::EVENT_SKIP_COUNT + The event shall be skipped because the slave skip counter was + non-zero. The caller shall decrease the counter by one. + */ + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli); + +#endif }; /* @@ -794,10 +961,10 @@ public: uint16 error_code; ulong thread_id; /* - For events created by Query_log_event::exec_event (and - Load_log_event::exec_event()) we need the *original* thread id, to be able - to log the event with the original (=master's) thread id (fix for - BUG#1686). + For events created by Query_log_event::do_apply_event (and + Load_log_event::do_apply_event()) we need the *original* thread + id, to be able to log the event with the original (=master's) + thread id (fix for BUG#1686). */ ulong slave_proxy_id; @@ -860,9 +1027,6 @@ public: const char* get_db() { return db; } #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); - int exec_event(struct st_relay_log_info* rli, const char *query_arg, - uint32 q_len_arg); #endif /* HAVE_REPLICATION */ #else void print_query_header(IO_CACHE* file, PRINT_EVENT_INFO* print_event_info); @@ -891,6 +1055,16 @@ public: */ virtual ulong get_post_header_size_for_derived() { return 0; } /* Writes derived event-specific part of post header. */ + +public: /* !!! Public in this patch to allow old usage */ +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); + + int do_apply_event(RELAY_LOG_INFO const *rli, + const char *query_arg, + uint32 q_len_arg); +#endif /* HAVE_REPLICATION */ }; @@ -939,9 +1113,8 @@ public: uint16 master_port; #ifndef MYSQL_CLIENT - Slave_log_event(THD* thd_arg, struct st_relay_log_info* rli); + Slave_log_event(THD* thd_arg, RELAY_LOG_INFO* rli); void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif @@ -954,6 +1127,11 @@ public: #ifndef MYSQL_CLIENT bool write(IO_CACHE* file); #endif + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const* rli); +#endif }; #endif /* HAVE_REPLICATION */ @@ -1023,12 +1201,6 @@ public: const char* get_db() { return db; } #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli) - { - return exec_event(thd->slave_net,rli,0); - } - int exec_event(NET* net, struct st_relay_log_info* rli, - bool use_rli_only_for_errors); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1060,6 +1232,17 @@ public: + LOAD_HEADER_LEN + sql_ex.data_size() + field_block_len + num_fields); } + +public: /* !!! Public in this patch to allow old usage */ +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const* rli) + { + return do_apply_event(thd->slave_net,rli,0); + } + + int do_apply_event(NET *net, RELAY_LOG_INFO const *rli, + bool use_rli_only_for_errors); +#endif }; extern char server_version[SERVER_VERSION_LENGTH]; @@ -1117,7 +1300,6 @@ public: Start_log_event_v3(); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else Start_log_event_v3() {} @@ -1137,6 +1319,22 @@ public: return START_V3_HEADER_LEN; //no variable-sized part } virtual bool is_artificial_event() { return artificial_event; } + +protected: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO*) + { + /* + Events from ourself should be skipped, but they should not + decrease the slave skip counter. + */ + if (this->server_id == ::server_id) + return Log_event::EVENT_SKIP_IGNORE; + else + return Log_event::EVENT_SKIP_NOT; + } +#endif }; @@ -1162,13 +1360,6 @@ public: uchar server_version_split[3]; Format_description_log_event(uint8 binlog_ver, const char* server_ver=0); - -#ifndef MYSQL_CLIENT -#ifdef HAVE_REPLICATION - int exec_event(struct st_relay_log_info* rli); -#endif /* HAVE_REPLICATION */ -#endif - Format_description_log_event(const char* buf, uint event_len, const Format_description_log_event* description_event); ~Format_description_log_event() { my_free((gptr)post_header_len, MYF(0)); } @@ -1191,7 +1382,15 @@ public: */ return FORMAT_DESCRIPTION_HEADER_LEN; } + void calc_server_version_split(); + +protected: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli); +#endif }; @@ -1215,7 +1414,6 @@ public: {} #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1230,6 +1428,13 @@ public: bool write(IO_CACHE* file); #endif bool is_valid() const { return 1; } + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli); +#endif }; @@ -1256,7 +1461,6 @@ class Rand_log_event: public Log_event {} #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1270,6 +1474,13 @@ class Rand_log_event: public Log_event bool write(IO_CACHE* file); #endif bool is_valid() const { return 1; } + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli); +#endif }; /***************************************************************************** @@ -1293,7 +1504,6 @@ class Xid_log_event: public Log_event Xid_log_event(THD* thd_arg, my_xid x): Log_event(thd_arg,0,0), xid(x) {} #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1307,6 +1517,11 @@ class Xid_log_event: public Log_event bool write(IO_CACHE* file); #endif bool is_valid() const { return 1; } + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif }; /***************************************************************************** @@ -1336,7 +1551,6 @@ public: val_len(val_len_arg), type(type_arg), charset_number(charset_number_arg) { is_null= !val; } void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif @@ -1348,6 +1562,13 @@ public: bool write(IO_CACHE* file); #endif bool is_valid() const { return 1; } + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli); +#endif }; @@ -1362,7 +1583,6 @@ public: #ifndef MYSQL_CLIENT Stop_log_event() :Log_event() {} - int exec_event(struct st_relay_log_info* rli); #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif @@ -1373,6 +1593,22 @@ public: ~Stop_log_event() {} Log_event_type get_type_code() { return STOP_EVENT;} bool is_valid() const { return 1; } + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_update_pos(RELAY_LOG_INFO *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli) + { + /* + Events from ourself should be skipped, but they should not + decrease the slave skip counter. + */ + if (this->server_id == ::server_id) + return Log_event::EVENT_SKIP_IGNORE; + else + return Log_event::EVENT_SKIP_NOT; + } +#endif }; /***************************************************************************** @@ -1399,7 +1635,6 @@ public: ulonglong pos_arg, uint flags); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1418,6 +1653,12 @@ public: #ifndef MYSQL_CLIENT bool write(IO_CACHE* file); #endif + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_update_pos(RELAY_LOG_INFO *rli); + virtual enum_skip_reason do_shall_skip(RELAY_LOG_INFO *rli); +#endif }; @@ -1452,7 +1693,6 @@ public: bool using_trans); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1486,6 +1726,11 @@ public: */ bool write_base(IO_CACHE* file); #endif + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif }; @@ -1518,7 +1763,6 @@ public: Append_block_log_event(THD* thd, const char* db_arg, char* block_arg, uint block_len_arg, bool using_trans); #ifdef HAVE_REPLICATION - int exec_event(struct st_relay_log_info* rli); void pack_info(Protocol* protocol); virtual int get_create_or_append() const; #endif /* HAVE_REPLICATION */ @@ -1536,6 +1780,11 @@ public: bool write(IO_CACHE* file); const char* get_db() { return db; } #endif + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif }; @@ -1555,7 +1804,6 @@ public: Delete_file_log_event(THD* thd, const char* db_arg, bool using_trans); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1572,6 +1820,11 @@ public: bool write(IO_CACHE* file); const char* get_db() { return db; } #endif + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif }; @@ -1591,7 +1844,6 @@ public: Execute_load_log_event(THD* thd, const char* db_arg, bool using_trans); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1607,6 +1859,11 @@ public: bool write(IO_CACHE* file); const char* get_db() { return db; } #endif + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif }; @@ -1676,7 +1933,6 @@ public: bool using_trans, bool suppress_use); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); - int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); @@ -1695,7 +1951,12 @@ public: #ifndef MYSQL_CLIENT bool write_post_header_for_derived(IO_CACHE* file); #endif - }; + +private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif +}; #ifdef MYSQL_CLIENT @@ -1793,7 +2054,6 @@ public: #endif #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) - virtual int exec_event(struct st_relay_log_info *rli); virtual void pack_info(Protocol *protocol); #endif @@ -1803,6 +2063,11 @@ public: private: +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); +#endif + #ifndef MYSQL_CLIENT TABLE *m_table; #endif @@ -1881,12 +2146,11 @@ public: virtual ~Rows_log_event(); - void set_flags(flag_set flags) { m_flags |= flags; } - void clear_flags(flag_set flags) { m_flags &= ~flags; } - flag_set get_flags(flag_set flags) const { return m_flags & flags; } + void set_flags(flag_set flags_arg) { m_flags |= flags_arg; } + void clear_flags(flag_set flags_arg) { m_flags &= ~flags_arg; } + flag_set get_flags(flag_set flags_arg) const { return m_flags & flags_arg; } #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) - virtual int exec_event(struct st_relay_log_info *rli); virtual void pack_info(Protocol *protocol); #endif @@ -1903,14 +2167,7 @@ public: #endif /* Member functions to implement superclass interface */ - virtual int get_data_size() - { - DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master", - return 6 + 1 + no_bytes_in_map(&m_cols) + - (m_rows_cur - m_rows_buf);); - return ROWS_HEADER_LEN + 1 + no_bytes_in_map(&m_cols) + - (m_rows_cur - m_rows_buf); - } + virtual int get_data_size(); MY_BITMAP const *get_cols() const { return &m_cols; } my_size_t get_width() const { return m_width; } @@ -1921,9 +2178,14 @@ public: virtual bool write_data_body(IO_CACHE *file); virtual const char *get_db() { return m_table->s->db.str; } #endif + /* + Check that malloc() succeeded in allocating memory for the rows + buffer and the COLS vector. Checking that an Update_rows_log_event + is valid is done in the Update_rows_log_event::is_valid() + function. + */ virtual bool is_valid() const { - /* that's how we check malloc() succeeded */ return m_rows_buf && m_cols.bitmap; } @@ -1956,10 +2218,20 @@ protected: ulong m_table_id; /* Table ID */ MY_BITMAP m_cols; /* Bitmap denoting columns available */ ulong m_width; /* The width of the columns bitmap */ + /* + Bitmap for columns available in the after image, if present. These + fields are only available for Update_rows events. Observe that the + width of both the before image COLS vector and the after image + COLS vector is the same: the number of columns of the table on the + master. + */ + MY_BITMAP m_cols_ai; + ulong m_master_reclength; /* Length of record on master side */ - /* Bit buffer in the same memory as the class */ + /* Bit buffers in the same memory as the class */ uint32 m_bitbuf[128/(sizeof(uint32)*8)]; + uint32 m_bitbuf_ai[128/(sizeof(uint32)*8)]; byte *m_rows_buf; /* The rows in packed format */ byte *m_rows_cur; /* One-after the end of the data */ @@ -1970,6 +2242,9 @@ protected: private: #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); + virtual int do_update_pos(RELAY_LOG_INFO *rli); + /* Primitive to prepare for a sequence of row executions. @@ -2017,7 +2292,7 @@ private: RETURN VALUE Error code, if something went wrong, 0 otherwise. */ - virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, char const *row_start, char const **row_end) = 0; /* @@ -2088,7 +2363,7 @@ private: virtual int do_before_row_operations(TABLE *table); virtual int do_after_row_operations(TABLE *table, int error); - virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, char const *row_start, char const **row_end); virtual int do_exec_row(TABLE *table); #endif @@ -2117,10 +2392,20 @@ public: }; #ifndef MYSQL_CLIENT - Update_rows_log_event(THD*, TABLE*, ulong table_id, - MY_BITMAP const *cols, bool is_transactional); + Update_rows_log_event(THD*, TABLE*, ulong table_id, + MY_BITMAP const *cols_bi, + MY_BITMAP const *cols_ai, + bool is_transactional); + + Update_rows_log_event(THD*, TABLE*, ulong table_id, + MY_BITMAP const *cols, + bool is_transactional); + + void init(MY_BITMAP const *cols); #endif + virtual ~Update_rows_log_event(); + #ifdef HAVE_REPLICATION Update_rows_log_event(const char *buf, uint event_len, const Format_description_log_event *description_event); @@ -2139,7 +2424,12 @@ public: } #endif -private: + virtual bool is_valid() const + { + return Rows_log_event::is_valid() && m_cols_ai.bitmap; + } + +protected: virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; } #ifdef MYSQL_CLIENT @@ -2153,7 +2443,7 @@ private: virtual int do_before_row_operations(TABLE *table); virtual int do_after_row_operations(TABLE *table, int error); - virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, char const *row_start, char const **row_end); virtual int do_exec_row(TABLE *table); #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ @@ -2210,7 +2500,7 @@ public: } #endif -private: +protected: virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; } #ifdef MYSQL_CLIENT @@ -2224,10 +2514,110 @@ private: virtual int do_before_row_operations(TABLE *table); virtual int do_after_row_operations(TABLE *table, int error); - virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, char const *row_start, char const **row_end); virtual int do_exec_row(TABLE *table); #endif }; + +#include "log_event_old.h" + +/** + Class representing an incident, an occurance out of the ordinary, + that happened on the master. + + The event is used to inform the slave that something out of the + ordinary happened on the master that might cause the database to be + in an inconsistent state. + + <table id="IncidentFormat"> + <caption>Incident event format</caption> + <tr> + <th>Symbol</th> + <th>Size<br/>(bytes)</th> + <th>Description</th> + </tr> + <tr> + <td>INCIDENT</td> + <td align="right">2</td> + <td>Incident number as an unsigned integer</td> + </tr> + <tr> + <td>MSGLEN</td> + <td align="right">1</td> + <td>Message length as an unsigned integer</td> + </tr> + <tr> + <td>MESSAGE</td> + <td align="right">MSGLEN</td> + <td>The message, if present. Not null terminated.</td> + </tr> + </table> + */ +class Incident_log_event : public Log_event { +public: +#ifndef MYSQL_CLIENT + Incident_log_event(THD *thd_arg, Incident incident) + : Log_event(thd_arg, 0, FALSE), m_incident(incident) + { + DBUG_ENTER("Incident_log_event::Incident_log_event"); + DBUG_PRINT("enter", ("m_incident: %d", m_incident)); + m_message.str= NULL; /* Just as a precaution */ + m_message.length= 0; + DBUG_VOID_RETURN; + } + + Incident_log_event(THD *thd_arg, Incident incident, LEX_STRING const msg) + : Log_event(thd_arg, 0, FALSE), m_incident(incident) + { + DBUG_ENTER("Incident_log_event::Incident_log_event"); + DBUG_PRINT("enter", ("m_incident: %d", m_incident)); + m_message= msg; + DBUG_VOID_RETURN; + } +#endif + +#ifndef MYSQL_CLIENT + void pack_info(Protocol*); +#endif + + Incident_log_event(const char *buf, uint event_len, + const Format_description_log_event *descr_event); + + virtual ~Incident_log_event(); + +#ifdef MYSQL_CLIENT + virtual void print(FILE *file, PRINT_EVENT_INFO *print_event_info); +#endif + +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_apply_event(RELAY_LOG_INFO const *rli); +#endif + + virtual bool write_data_header(IO_CACHE *file); + virtual bool write_data_body(IO_CACHE *file); + + virtual Log_event_type get_type_code() { return INCIDENT_EVENT; } + + virtual bool is_valid() const { return 1; } + virtual int get_data_size() { + return INCIDENT_HEADER_LEN + 1 + m_message.length; + } + +private: + const char *description() const; + + Incident m_incident; + LEX_STRING m_message; +}; + +static inline bool copy_event_cache_to_file_and_reinit(IO_CACHE *cache, + FILE *file) +{ + return + my_b_copy_to_file(cache, file) || + reinit_io_cache(cache, WRITE_CACHE, 0, FALSE, TRUE); +} + #endif /* _log_event_h */ diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc new file mode 100644 index 00000000000..500feab0542 --- /dev/null +++ b/sql/log_event_old.cc @@ -0,0 +1,105 @@ + +#include "mysql_priv.h" +#include "log_event_old.h" +#include "rpl_record_old.h" + +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) +int +Write_rows_log_event_old::do_prepare_row(THD *thd, + RELAY_LOG_INFO const *rli, + TABLE *table, + char const *row_start, + char const **row_end) +{ + DBUG_ASSERT(table != NULL); + DBUG_ASSERT(row_start && row_end); + + int error; + error= unpack_row_old(const_cast<RELAY_LOG_INFO*>(rli), + table, m_width, table->record[0], + row_start, &m_cols, row_end, &m_master_reclength, + table->write_set, PRE_GA_WRITE_ROWS_EVENT); + bitmap_copy(table->read_set, table->write_set); + return error; +} + + +int +Delete_rows_log_event_old::do_prepare_row(THD *thd, + RELAY_LOG_INFO const *rli, + TABLE *table, + char const *row_start, + char const **row_end) +{ + int error; + DBUG_ASSERT(row_start && row_end); + /* + This assertion actually checks that there is at least as many + columns on the slave as on the master. + */ + DBUG_ASSERT(table->s->fields >= m_width); + + error= unpack_row_old(const_cast<RELAY_LOG_INFO*>(rli), + table, m_width, table->record[0], + row_start, &m_cols, row_end, &m_master_reclength, + table->read_set, PRE_GA_DELETE_ROWS_EVENT); + /* + If we will access rows using the random access method, m_key will + be set to NULL, so we do not need to make a key copy in that case. + */ + if (m_key) + { + KEY *const key_info= table->key_info; + + key_copy(m_key, table->record[0], key_info, 0); + } + + return error; +} + + +int Update_rows_log_event_old::do_prepare_row(THD *thd, + RELAY_LOG_INFO const *rli, + TABLE *table, + char const *row_start, + char const **row_end) +{ + int error; + DBUG_ASSERT(row_start && row_end); + /* + This assertion actually checks that there is at least as many + columns on the slave as on the master. + */ + DBUG_ASSERT(table->s->fields >= m_width); + + /* record[0] is the before image for the update */ + error= unpack_row_old(const_cast<RELAY_LOG_INFO*>(rli), + table, m_width, table->record[0], + row_start, &m_cols, row_end, &m_master_reclength, + table->read_set, PRE_GA_UPDATE_ROWS_EVENT); + row_start = *row_end; + /* m_after_image is the after image for the update */ + error= unpack_row_old(const_cast<RELAY_LOG_INFO*>(rli), + table, m_width, m_after_image, + row_start, &m_cols, row_end, &m_master_reclength, + table->write_set, PRE_GA_UPDATE_ROWS_EVENT); + + DBUG_DUMP("record[0]", table->record[0], table->s->reclength); + DBUG_DUMP("m_after_image", m_after_image, table->s->reclength); + + + /* + If we will access rows using the random access method, m_key will + be set to NULL, so we do not need to make a key copy in that case. + */ + if (m_key) + { + KEY *const key_info= table->key_info; + + key_copy(m_key, table->record[0], key_info, 0); + } + + return error; +} + +#endif diff --git a/sql/log_event_old.h b/sql/log_event_old.h new file mode 100644 index 00000000000..87b11aea3a3 --- /dev/null +++ b/sql/log_event_old.h @@ -0,0 +1,127 @@ +/* Copyright 2007 MySQL AB. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef LOG_EVENT_OLD_H +#define LOG_EVENT_OLD_H + +/* + Need to include this file at the proper position of log_event.h + */ + + +class Write_rows_log_event_old : public Write_rows_log_event +{ +public: + enum + { + /* Support interface to THD::binlog_prepare_pending_rows_event */ + TYPE_CODE = PRE_GA_WRITE_ROWS_EVENT + }; + +#if !defined(MYSQL_CLIENT) + Write_rows_log_event_old(THD *thd, TABLE *table, ulong table_id, + MY_BITMAP const *cols, bool is_transactional) + : Write_rows_log_event(thd, table, table_id, cols, is_transactional) + { + } +#endif +#if defined(HAVE_REPLICATION) + Write_rows_log_event_old(const char *buf, uint event_len, + const Format_description_log_event *descr) + : Write_rows_log_event(buf, event_len, descr) + { + } +#endif + +private: + virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; } + +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, + char const *row_start, char const **row_end); +#endif +}; + + +class Update_rows_log_event_old : public Update_rows_log_event +{ +public: + enum + { + /* Support interface to THD::binlog_prepare_pending_rows_event */ + TYPE_CODE = PRE_GA_UPDATE_ROWS_EVENT + }; + +#if !defined(MYSQL_CLIENT) + Update_rows_log_event_old(THD *thd, TABLE *table, ulong table_id, + MY_BITMAP const *cols, bool is_transactional) + : Update_rows_log_event(thd, table, table_id, cols, is_transactional) + { + } +#endif +#if defined(HAVE_REPLICATION) + Update_rows_log_event_old(const char *buf, uint event_len, + const Format_description_log_event *descr) + : Update_rows_log_event(buf, event_len, descr) + { + } +#endif + +private: + virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; } + +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, + char const *row_start, char const **row_end); +#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ +}; + + +class Delete_rows_log_event_old : public Delete_rows_log_event +{ +public: + enum + { + /* Support interface to THD::binlog_prepare_pending_rows_event */ + TYPE_CODE = PRE_GA_DELETE_ROWS_EVENT + }; + +#if !defined(MYSQL_CLIENT) + Delete_rows_log_event_old(THD *thd, TABLE *table, ulong table_id, + MY_BITMAP const *cols, bool is_transactional) + : Delete_rows_log_event(thd, table, table_id, cols, is_transactional) + { + } +#endif +#if defined(HAVE_REPLICATION) + Delete_rows_log_event_old(const char *buf, uint event_len, + const Format_description_log_event *descr) + : Delete_rows_log_event(buf, event_len, descr) + { + } +#endif + +private: + virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; } + +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, + char const *row_start, char const **row_end); +#endif +}; + + +#endif + diff --git a/sql/my_decimal.cc b/sql/my_decimal.cc index 511942e9a5d..7b2d271639f 100644 --- a/sql/my_decimal.cc +++ b/sql/my_decimal.cc @@ -191,7 +191,7 @@ int str2my_decimal(uint mask, const char *from, uint length, } -my_decimal *date2my_decimal(TIME *ltime, my_decimal *dec) +my_decimal *date2my_decimal(MYSQL_TIME *ltime, my_decimal *dec) { longlong date; date = (ltime->year*100L + ltime->month)*100L + ltime->day; diff --git a/sql/my_decimal.h b/sql/my_decimal.h index cefc5ee00fd..eade029677f 100644 --- a/sql/my_decimal.h +++ b/sql/my_decimal.h @@ -296,7 +296,7 @@ int string2my_decimal(uint mask, const String *str, my_decimal *d) } -my_decimal *date2my_decimal(TIME *ltime, my_decimal *dec); +my_decimal *date2my_decimal(MYSQL_TIME *ltime, my_decimal *dec); #endif /*defined(MYSQL_SERVER) || defined(EMBEDDED_LIBRARY) */ diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index a1a7c20fc50..68f5ff022a4 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -47,7 +47,6 @@ typedef Bitmap<64> key_map; /* Used for finding keys */ #else typedef Bitmap<((MAX_INDEXES+7)/8*8)> key_map; /* Used for finding keys */ #endif -typedef ulong key_part_map; /* Used for finding key parts */ typedef ulong nesting_map; /* Used for flags of nesting constructs */ /* Used to identify NESTED_JOIN structures within a join (applicable only to @@ -148,6 +147,7 @@ typedef struct my_locale_st extern MY_LOCALE my_locale_en_US; extern MY_LOCALE *my_locales[]; +extern MY_LOCALE *my_default_lc_time_names; MY_LOCALE *my_locale_by_name(const char *name); MY_LOCALE *my_locale_by_number(uint number); @@ -264,7 +264,6 @@ MY_LOCALE *my_locale_by_number(uint number); #endif #if defined(__WIN__) -#define IF_WIN(A,B) (A) #undef FLUSH_TIME #define FLUSH_TIME 1800 /* Flush every half hour */ @@ -273,7 +272,6 @@ MY_LOCALE *my_locale_by_number(uint number); #define WAIT_PRIOR 0 #define QUERY_PRIOR 2 #else -#define IF_WIN(A,B) (B) #define INTERRUPT_PRIOR 10 #define CONNECT_PRIOR 9 #define WAIT_PRIOR 8 @@ -341,9 +339,6 @@ MY_LOCALE *my_locale_by_number(uint number); /* The following is used to detect a conflict with DISTINCT */ #define SELECT_ALL (ULL(1) << 24) // SELECT, user, parser -/* Set if we are updating a non-transaction safe table */ -#define OPTION_STATUS_NO_TRANS_UPDATE (ULL(1) << 25) // THD, intern - /* The following can be set when importing tables in a 'wrong order' to suppress foreign key checks */ #define OPTION_NO_FOREIGN_KEY_CHECKS (ULL(1) << 26) // THD, user, binlog @@ -367,7 +362,7 @@ MY_LOCALE *my_locale_by_number(uint number); Maximum length of time zone name that we support (Time zone name is char(64) in db). mysqlbinlog needs it. */ -#define MAX_TIME_ZONE_NAME_LENGTH 72 +#define MAX_TIME_ZONE_NAME_LENGTH (NAME_LEN + 1) /* The rest of the file is included in the server only */ #ifndef MYSQL_CLIENT @@ -411,8 +406,9 @@ MY_LOCALE *my_locale_by_number(uint number); updated (to store more bytes on disk). NOTE: When adding new SQL_MODE types, make sure to also add them to - ../scripts/mysql_create_system_tables.sh and - ../scripts/mysql_fix_privilege_tables.sql + the scripts used for creating the MySQL system tables + in scripts/mysql_system_tables.sql and scripts/mysql_system_tables_fix.sql + */ #define RAID_BLOCK_SIZE 1024 @@ -599,7 +595,7 @@ class THD; void close_thread_tables(THD *thd, bool locked=0, bool skip_derived=0); bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *tables); bool check_single_table_access(THD *thd, ulong privilege, - TABLE_LIST *tables); + TABLE_LIST *tables, bool no_errors); bool check_routine_access(THD *thd,ulong want_access,char *db,char *name, bool is_proc, bool no_errors); bool check_some_access(THD *thd, ulong want_access, TABLE_LIST *table); @@ -622,8 +618,11 @@ void get_default_definer(THD *thd, LEX_USER *definer); LEX_USER *create_default_definer(THD *thd); LEX_USER *create_definer(THD *thd, LEX_STRING *user_name, LEX_STRING *host_name); LEX_USER *get_current_user(THD *thd, LEX_USER *user); -bool check_string_length(LEX_STRING *str, - const char *err_msg, uint max_length); +bool check_string_byte_length(LEX_STRING *str, const char *err_msg, + uint max_byte_length); +bool check_string_char_length(LEX_STRING *str, const char *err_msg, + uint max_char_length, CHARSET_INFO *cs, + bool no_error); enum enum_mysql_completiontype { ROLLBACK_RELEASE=-2, ROLLBACK=1, ROLLBACK_AND_CHAIN=7, @@ -647,6 +646,7 @@ struct Query_cache_query_flags { unsigned int client_long_flag:1; unsigned int client_protocol_41:1; + unsigned int result_in_binary_protocol:1; unsigned int more_results_exists:1; unsigned int pkt_nr; uint character_set_client_num; @@ -673,6 +673,11 @@ struct Query_cache_query_flags query_cache.send_result_to_client(A, B, C) #define query_cache_invalidate_by_MyISAM_filename_ref \ &query_cache_invalidate_by_MyISAM_filename +/* note the "maybe": it's a read without mutex */ +#define query_cache_maybe_disabled(T) \ + (T->variables.query_cache_type == 0 || query_cache.query_cache_size == 0) +#define query_cache_is_cacheable_query(L) \ + (((L)->sql_command == SQLCOM_SELECT) && (L)->safe_to_cache_query) #else #define QUERY_CACHE_FLAGS_SIZE 0 #define query_cache_store_query(A, B) @@ -689,6 +694,8 @@ struct Query_cache_query_flags #define query_cache_abort(A) #define query_cache_end_of_result(A) #define query_cache_invalidate_by_MyISAM_filename_ref NULL +#define query_cache_maybe_disabled(T) 1 +#define query_cache_is_cacheable_query(L) 0 #endif /*HAVE_QUERY_CACHE*/ /* @@ -827,14 +834,15 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent); bool do_rename(THD *thd, TABLE_LIST *ren_table, char *new_db, char *new_table_name, char *new_table_alias, bool skip_error); -bool mysql_change_db(THD *thd,const char *name,bool no_access_check); +bool mysql_change_db(THD *thd, const LEX_STRING *new_db_name, + bool force_switch); void mysql_parse(THD *thd,char *inBuf,uint length); bool mysql_test_parse_for_slave(THD *thd,char *inBuf,uint length); bool is_update_query(enum enum_sql_command command); bool alloc_query(THD *thd, const char *packet, uint packet_length); void mysql_init_select(LEX *lex); void mysql_reset_thd_for_next_command(THD *thd); -void mysql_init_query(THD *thd, uchar *buf, uint length); +void mysql_init_query(THD *thd, const char *buf, uint length); bool mysql_new_select(LEX *lex, bool move_down); void create_select_for_variable(const char *var_name); void mysql_init_multi_delete(LEX *lex); @@ -991,13 +999,15 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE *table, List<Item> &fields, List_item *values, List<Item> &update_fields, List<Item> &update_values, enum_duplicates duplic, - COND **where, bool select_insert); + COND **where, bool select_insert, + bool check_fields, bool abort_on_warning); bool mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields, List<List_item> &values, List<Item> &update_fields, List<Item> &update_values, enum_duplicates flag, bool ignore); int check_that_all_fields_are_given_values(THD *thd, TABLE *entry, TABLE_LIST *table_list); +void prepare_triggers_for_insert_stmt(TABLE *table); bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds); bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, SQL_LIST *order, ha_rows rows, ulonglong options, @@ -1105,7 +1115,7 @@ void init_status_vars(); void free_status_vars(); /* information schema */ -extern LEX_STRING information_schema_name; +extern LEX_STRING INFORMATION_SCHEMA_NAME; extern const LEX_STRING partition_keywords[]; LEX_STRING *make_lex_string(THD *thd, LEX_STRING *lex_str, const char* str, uint length, @@ -1123,8 +1133,10 @@ int fill_schema_table_privileges(THD *thd, TABLE_LIST *tables, COND *cond); int fill_schema_column_privileges(THD *thd, TABLE_LIST *tables, COND *cond); bool get_schema_tables_result(JOIN *join, enum enum_schema_table_state executed_place); +enum enum_schema_tables get_schema_table_idx(ST_SCHEMA_TABLE *schema_table); + #define is_schema_db(X) \ - !my_strcasecmp(system_charset_info, information_schema_name.str, (X)) + !my_strcasecmp(system_charset_info, INFORMATION_SCHEMA_NAME.str, (X)) /* sql_prepare.cc */ @@ -1155,7 +1167,7 @@ void mysql_ha_mark_tables_for_reopen(THD *thd, TABLE *table); /* sql_base.cc */ #define TMP_TABLE_KEY_EXTRA 8 void set_item_name(Item *item,char *pos,uint length); -bool add_field_to_list(THD *thd, char *field_name, enum enum_field_types type, +bool add_field_to_list(THD *thd, LEX_STRING *field_name, enum enum_field_types type, char *length, char *decimal, uint type_modifier, Item *default_value, Item *on_update_value, @@ -1188,9 +1200,29 @@ SQL_SELECT *make_select(TABLE *head, table_map const_tables, table_map read_tables, COND *conds, bool allow_null_cond, int *error); extern Item **not_found_item; + +/* + This enumeration type is used only by the function find_item_in_list + to return the info on how an item has been resolved against a list + of possibly aliased items. + The item can be resolved: + - against an alias name of the list's element (RESOLVED_AGAINST_ALIAS) + - against non-aliased field name of the list (RESOLVED_WITH_NO_ALIAS) + - against an aliased field name of the list (RESOLVED_BEHIND_ALIAS) + - ignoring the alias name in cases when SQL requires to ignore aliases + (e.g. when the resolved field reference contains a table name or + when the resolved item is an expression) (RESOLVED_IGNORING_ALIAS) +*/ +enum enum_resolution_type { + NOT_RESOLVED=0, + RESOLVED_IGNORING_ALIAS, + RESOLVED_BEHIND_ALIAS, + RESOLVED_WITH_NO_ALIAS, + RESOLVED_AGAINST_ALIAS +}; Item ** find_item_in_list(Item *item, List<Item> &items, uint *counter, find_item_error_report_type report_error, - bool *unaliased); + enum_resolution_type *resolution); bool get_key_map_from_key_list(key_map *map, TABLE *table, List<String> *index_list); bool insert_fields(THD *thd, Name_resolution_context *context, @@ -1248,7 +1280,8 @@ TABLE_LIST *find_table_in_list(TABLE_LIST *table, st_table_list *TABLE_LIST::*link, const char *db_name, const char *table_name); -TABLE_LIST *unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list); +TABLE_LIST *unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list, + bool check_alias); TABLE *find_temporary_table(THD *thd, const char *db, const char *table_name); TABLE *find_temporary_table(THD *thd, TABLE_LIST *table_list); bool close_temporary_table(THD *thd, TABLE_LIST *table_list); @@ -1408,7 +1441,16 @@ int abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt); void close_open_tables_and_downgrade(ALTER_PARTITION_PARAM_TYPE *lpt); void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table); +/* Functions to work with system tables. */ +bool open_system_tables_for_read(THD *thd, TABLE_LIST *table_list, + Open_tables_state *backup); +void close_system_tables(THD *thd, Open_tables_state *backup); +TABLE *open_system_table_for_update(THD *thd, TABLE_LIST *one_table); + bool close_cached_tables(THD *thd, bool wait_for_refresh, TABLE_LIST *tables, bool have_lock = FALSE); +bool close_cached_connection_tables(THD *thd, bool wait_for_refresh, + LEX_STRING *connect_string, + bool have_lock = FALSE); void copy_field_from_tmp_record(Field *field,int offset); bool fill_record(THD *thd, Field **field, List<Item> &values, bool ignore_errors); @@ -1471,7 +1513,7 @@ void print_plan(JOIN* join,uint idx, double record_count, double read_time, void mysql_print_status(); /* key.cc */ int find_ref_key(KEY *key, uint key_count, byte *record, Field *field, - uint *key_length); + uint *key_length, uint *keypart); void key_copy(byte *to_key, byte *from_record, KEY *key_info, uint key_length); void key_restore(byte *to_record, byte *from_key, KEY *key_info, uint key_length); @@ -1526,8 +1568,10 @@ extern bool check_reserved_words(LEX_STRING *name); /* strfunc.cc */ ulonglong find_set(TYPELIB *lib, const char *x, uint length, CHARSET_INFO *cs, char **err_pos, uint *err_len, bool *set_warning); -uint find_type(TYPELIB *lib, const char *find, uint length, bool part_match); -uint find_type2(TYPELIB *lib, const char *find, uint length, CHARSET_INFO *cs); +uint find_type(const TYPELIB *lib, const char *find, uint length, + bool part_match); +uint find_type2(const TYPELIB *lib, const char *find, uint length, + CHARSET_INFO *cs); void unhex_type2(TYPELIB *lib); uint check_word(TYPELIB *lib, const char *val, const char *end, const char **end_of_word); @@ -1797,19 +1841,20 @@ ulong convert_period_to_month(ulong period); ulong convert_month_to_period(ulong month); void get_date_from_daynr(long daynr,uint *year, uint *month, uint *day); -my_time_t TIME_to_timestamp(THD *thd, const TIME *t, my_bool *not_exist); -bool str_to_time_with_warn(const char *str,uint length,TIME *l_time); +my_time_t TIME_to_timestamp(THD *thd, const MYSQL_TIME *t, my_bool *not_exist); +bool str_to_time_with_warn(const char *str,uint length,MYSQL_TIME *l_time); timestamp_type str_to_datetime_with_warn(const char *str, uint length, - TIME *l_time, uint flags); -void localtime_to_TIME(TIME *to, struct tm *from); -void calc_time_from_sec(TIME *to, long seconds, long microseconds); + MYSQL_TIME *l_time, uint flags); +void localtime_to_TIME(MYSQL_TIME *to, struct tm *from); +void calc_time_from_sec(MYSQL_TIME *to, long seconds, long microseconds); -void make_truncated_value_warning(THD *thd, const char *str_val, +void make_truncated_value_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, + const char *str_val, uint str_length, timestamp_type time_type, const char *field_name); -bool date_add_interval(TIME *ltime, interval_type int_type, INTERVAL interval); -bool calc_time_diff(TIME *l_time1, TIME *l_time2, int l_sign, +bool date_add_interval(MYSQL_TIME *ltime, interval_type int_type, INTERVAL interval); +bool calc_time_diff(MYSQL_TIME *l_time1, MYSQL_TIME *l_time2, int l_sign, longlong *seconds_out, long *microseconds_out); extern LEX_STRING interval_type_to_name[]; @@ -1821,15 +1866,15 @@ extern DATE_TIME_FORMAT *date_time_format_copy(THD *thd, DATE_TIME_FORMAT *format); const char *get_date_time_format_str(KNOWN_DATE_TIME_FORMAT *format, timestamp_type type); -extern bool make_date_time(DATE_TIME_FORMAT *format, TIME *l_time, +extern bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, timestamp_type type, String *str); -void make_datetime(const DATE_TIME_FORMAT *format, const TIME *l_time, +void make_datetime(const DATE_TIME_FORMAT *format, const MYSQL_TIME *l_time, String *str); -void make_date(const DATE_TIME_FORMAT *format, const TIME *l_time, +void make_date(const DATE_TIME_FORMAT *format, const MYSQL_TIME *l_time, String *str); -void make_time(const DATE_TIME_FORMAT *format, const TIME *l_time, +void make_time(const DATE_TIME_FORMAT *format, const MYSQL_TIME *l_time, String *str); -int my_time_compare(TIME *a, TIME *b); +int my_time_compare(MYSQL_TIME *a, MYSQL_TIME *b); int test_if_number(char *str,int *res,bool allow_wildcards); void change_byte(byte *,uint,char,char); @@ -1849,7 +1894,7 @@ double my_double_round(double value, int dec, bool truncate); int get_quick_record(SQL_SELECT *select); int calc_weekday(long daynr,bool sunday_first_day_of_week); -uint calc_week(TIME *l_time, uint week_behaviour, uint *year); +uint calc_week(MYSQL_TIME *l_time, uint week_behaviour, uint *year); void find_date(char *pos,uint *vek,uint flag); TYPELIB *convert_strings_to_array_type(my_string *typelibs, my_string *end); TYPELIB *typelib(MEM_ROOT *mem_root, List<String> &strings); @@ -2000,7 +2045,6 @@ inline void setup_table_map(TABLE *table, TABLE_LIST *table_list, uint tablenr) table->const_table= 0; table->null_row= 0; table->status= STATUS_NO_RECORD; - table->keys_in_use_for_query= table->s->keys_in_use; table->maybe_null= table_list->outer_join; TABLE_LIST *embedding= table_list->embedding; while (!table->maybe_null && embedding) @@ -2011,6 +2055,8 @@ inline void setup_table_map(TABLE *table, TABLE_LIST *table_list, uint tablenr) table->tablenr= tablenr; table->map= (table_map) 1 << tablenr; table->force_index= table_list->force_index; + table->covering_keys= table->s->keys_for_keyread; + table->merge_keys.clear_all(); } diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 758dc54316e..6f7b8d53e05 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -17,6 +17,7 @@ #include <m_ctype.h> #include <my_dir.h> #include "slave.h" +#include "rpl_mi.h" #include "sql_repl.h" #include "rpl_filter.h" #include "repl_failsafe.h" @@ -195,12 +196,6 @@ inline void reset_floating_point_exceptions() } /* cplusplus */ - -#if defined(HAVE_LINUXTHREADS) -#define THR_KILL_SIGNAL SIGINT -#else -#define THR_KILL_SIGNAL SIGUSR2 // Can't use this with LinuxThreads -#endif #define MYSQL_KILL_SIGNAL SIGTERM #ifdef HAVE_GLIBC2_STYLE_GETHOSTBYNAME_R @@ -280,7 +275,11 @@ static TYPELIB tc_heuristic_recover_typelib= }; static const char *thread_handling_names[]= -{ "one-thread-per-connection", "no-threads", "pool-of-threads", NullS}; +{ "one-thread-per-connection", "no-threads", +#if HAVE_POOL_OF_THREADS == 1 + "pool-of-threads", +#endif + NullS}; TYPELIB thread_handling_typelib= { @@ -335,6 +334,7 @@ static char *mysqld_user, *mysqld_chroot, *log_error_file_ptr; static char *opt_init_slave, *language_ptr, *opt_init_connect; static char *default_character_set_name; static char *character_set_filesystem_name; +static char *lc_time_names_name; static char *my_bind_addr_str; static char *default_collation_name; static char *default_storage_engine_str; @@ -571,6 +571,8 @@ CHARSET_INFO *system_charset_info, *files_charset_info ; CHARSET_INFO *national_charset_info, *table_alias_charset; CHARSET_INFO *character_set_filesystem; +MY_LOCALE *my_default_lc_time_names; + SHOW_COMP_OPTION have_ssl, have_symlink, have_dlopen, have_query_cache; SHOW_COMP_OPTION have_geometry, have_rtree_keys; SHOW_COMP_OPTION have_crypt, have_compress; @@ -638,6 +640,7 @@ struct rand_struct sql_rand; // used by sql_class.cc:THD::THD() #ifndef EMBEDDED_LIBRARY struct passwd *user_info; static pthread_t select_thread; +static uint thr_kill_signal; #endif /* OS specific variables */ @@ -737,6 +740,8 @@ pthread_handler_t handle_connections_shared_memory(void *arg); #endif pthread_handler_t handle_slave(void *arg); static ulong find_bit_type(const char *x, TYPELIB *bit_lib); +static ulong find_bit_type_or_exit(const char *x, TYPELIB *bit_lib, + const char *option); static void clean_up(bool print_message); static int test_if_case_insensitive(const char *dir_name); @@ -788,7 +793,6 @@ static void close_connections(void) DBUG_PRINT("info",("Waiting for select thread")); #ifndef DONT_USE_THR_ALARM - if (pthread_kill(select_thread,THR_CLIENT_ALARM)) break; // allready dead #endif set_timespec(abstime, 2); @@ -890,7 +894,7 @@ static void close_connections(void) } (void) pthread_mutex_unlock(&LOCK_thread_count); // For unlink from list - Events::get_instance()->deinit(); + Events::deinit(); end_slave(); if (thread_count) @@ -1335,7 +1339,7 @@ static void clean_up_mutexes() (void) pthread_mutex_destroy(&LOCK_bytes_sent); (void) pthread_mutex_destroy(&LOCK_bytes_received); (void) pthread_mutex_destroy(&LOCK_user_conn); - Events::get_instance()->destroy_mutexes(); + Events::destroy_mutexes(); #ifdef HAVE_OPENSSL (void) pthread_mutex_destroy(&LOCK_des_key_file); #ifndef HAVE_YASSL @@ -1804,6 +1808,12 @@ static bool cache_thread() thd= thread_cache.get(); thd->thread_stack= (char*) &thd; // For store_globals (void) thd->store_globals(); + /* + THD::mysys_var::abort is associated with physical thread rather + than with THD object. So we need to reset this flag before using + this thread for handling of new THD object/connection. + */ + thd->mysys_var->abort= 0; thd->thr_create_time= time(NULL); threads.append(thd); return(1); @@ -2288,7 +2298,9 @@ static void init_signals(void) DBUG_ENTER("init_signals"); if (test_flags & TEST_SIGINT) - my_sigset(THR_KILL_SIGNAL,end_thread_signal); + { + my_sigset(thr_kill_signal, end_thread_signal); + } my_sigset(THR_SERVER_ALARM,print_signal_warning); // Should never be called! if (!(test_flags & TEST_NO_STACKTRACE) || (test_flags & TEST_CORE_ON_SIGNAL)) @@ -2343,10 +2355,13 @@ static void init_signals(void) #ifdef SIGTSTP sigaddset(&set,SIGTSTP); #endif - sigaddset(&set,THR_SERVER_ALARM); + if (thd_lib_detected != THD_LIB_LT) + sigaddset(&set,THR_SERVER_ALARM); if (test_flags & TEST_SIGINT) - sigdelset(&set,THR_KILL_SIGNAL); // May be SIGINT - sigdelset(&set,THR_CLIENT_ALARM); // For alarms + { + // May be SIGINT + sigdelset(&set, thr_kill_signal); + } sigprocmask(SIG_SETMASK,&set,NULL); pthread_sigmask(SIG_SETMASK,&set,NULL); DBUG_VOID_RETURN; @@ -2409,24 +2424,20 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused))) */ init_thr_alarm(thread_scheduler.max_threads + global_system_variables.max_insert_delayed_threads + 10); -#if SIGINT != THR_KILL_SIGNAL - if (test_flags & TEST_SIGINT) + if (thd_lib_detected != THD_LIB_LT && (test_flags & TEST_SIGINT)) { (void) sigemptyset(&set); // Setup up SIGINT for debug (void) sigaddset(&set,SIGINT); // For debugging (void) pthread_sigmask(SIG_UNBLOCK,&set,NULL); } -#endif (void) sigemptyset(&set); // Setup up SIGINT for debug #ifdef USE_ONE_SIGNAL_HAND (void) sigaddset(&set,THR_SERVER_ALARM); // For alarms #endif #ifndef IGNORE_SIGHUP_SIGQUIT (void) sigaddset(&set,SIGQUIT); -#if THR_CLIENT_ALARM != SIGHUP (void) sigaddset(&set,SIGHUP); #endif -#endif (void) sigaddset(&set,SIGTERM); (void) sigaddset(&set,SIGTSTP); @@ -2911,6 +2922,14 @@ static int init_common_variables(const char *conf_file_name, int argc, return 1; global_system_variables.character_set_filesystem= character_set_filesystem; + if (!(my_default_lc_time_names= + my_locale_by_name(lc_time_names_name))) + { + sql_print_error("Unknown locale: '%s'", lc_time_names_name); + return 1; + } + global_system_variables.lc_time_names= my_default_lc_time_names; + sys_init_connect.value_length= 0; if ((sys_init_connect.value= opt_init_connect)) sys_init_connect.value_length= strlen(opt_init_connect); @@ -3056,7 +3075,7 @@ static int init_thread_environment() (void) pthread_mutex_init(&LOCK_server_started, MY_MUTEX_INIT_FAST); (void) pthread_cond_init(&COND_server_started,NULL); sp_cache_init(); - Events::get_instance()->init_mutexes(); + Events::init_mutexes(); /* Parameter for threads created for connections */ (void) pthread_attr_init(&connection_attrib); (void) pthread_attr_setdetachstate(&connection_attrib, @@ -3158,6 +3177,7 @@ static void init_ssl() DBUG_PRINT("info",("ssl_acceptor_fd: 0x%lx", (long) ssl_acceptor_fd)); if (!ssl_acceptor_fd) { + sql_print_warning("Failed to setup SSL"); opt_use_ssl = 0; have_ssl= SHOW_OPTION_DISABLED; } @@ -3620,6 +3640,13 @@ int main(int argc, char **argv) MY_INIT(argv[0]); // init my_sys library & pthreads /* nothing should come before this line ^^^ */ + /* Set signal used to kill MySQL */ +#if defined(SIGUSR2) + thr_kill_signal= thd_lib_detected == THD_LIB_LT ? SIGINT : SIGUSR2; +#else + thr_kill_signal= SIGINT; +#endif + /* Perform basic logger initialization logger. Should be called after MY_INIT, as it initializes mutexes. Log tables are inited later. @@ -3799,6 +3826,7 @@ we force server id to 2, but this MySQL server will not act as a slave."); udf_init(); #endif } + init_status_vars(); if (opt_bootstrap) /* If running with bootstrap, do not start replication. */ opt_skip_slave_start= 1; @@ -3834,17 +3862,15 @@ we force server id to 2, but this MySQL server will not act as a slave."); create_shutdown_thread(); create_maintenance_thread(); + if (Events::init(opt_noacl)) + unireg_abort(1); + sql_print_information(ER(ER_STARTUP),my_progname,server_version, ((unix_sock == INVALID_SOCKET) ? (char*) "" : mysqld_unix_port), mysqld_port, MYSQL_COMPILATION_COMMENT); - if (!opt_noacl) - { - if (Events::get_instance()->init()) - unireg_abort(1); - } /* Signal threads waiting for server to be started */ pthread_mutex_lock(&LOCK_server_started); @@ -4997,6 +5023,7 @@ enum options_mysqld OPT_DEFAULT_COLLATION, OPT_CHARACTER_SET_CLIENT_HANDSHAKE, OPT_CHARACTER_SET_FILESYSTEM, + OPT_LC_TIME_NAMES, OPT_INIT_CONNECT, OPT_INIT_SLAVE, OPT_SECURE_AUTH, @@ -5025,7 +5052,8 @@ enum options_mysqld OPT_MERGE, OPT_THREAD_HANDLING, OPT_INNODB_ROLLBACK_ON_TIMEOUT, - OPT_SECURE_FILE_PRIV + OPT_SECURE_FILE_PRIV, + OPT_OLD_MODE }; @@ -5339,6 +5367,11 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite, "Client error messages in given language. May be given as a full path.", (gptr*) &language_ptr, (gptr*) &language_ptr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"lc-time-names", OPT_LC_TIME_NAMES, + "Set the language used for the month names and the days of the week.", + (gptr*) &lc_time_names_name, + (gptr*) &lc_time_names_name, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, {"local-infile", OPT_LOCAL_INFILE, "Enable/disable LOAD DATA LOCAL INFILE (takes values 1|0).", (gptr*) &opt_local_infile, @@ -6256,6 +6289,10 @@ The minimum value for this variable is 4096.", (gptr*) &global_system_variables.net_write_timeout, (gptr*) &max_system_variables.net_write_timeout, 0, GET_ULONG, REQUIRED_ARG, NET_WRITE_TIMEOUT, 1, LONG_TIMEOUT, 0, 1, 0}, + { "old", OPT_OLD_MODE, "Use compatible behavior.", + (gptr*) &global_system_variables.old_mode, + (gptr*) &max_system_variables.old_mode, 0, GET_BOOL, NO_ARG, + 0, 0, 0, 0, 0, 0}, {"open_files_limit", OPT_OPEN_FILES_LIMIT, "If this is not 0, then mysqld will use this value to reserve file descriptors to use with setrlimit(). If this value is 0 then mysqld will reserve max_connections*5 or max_connections + table_cache*2 (whichever is larger) number of files.", (gptr*) &open_files_limit, (gptr*) &open_files_limit, 0, GET_ULONG, @@ -6435,12 +6472,12 @@ The minimum value for this variable is 4096.", (gptr*) &max_system_variables.tmp_table_size, 0, GET_ULL, REQUIRED_ARG, 16*1024*1024L, 1024, MAX_MEM_TABLE_SIZE, 0, 1, 0}, {"transaction_alloc_block_size", OPT_TRANS_ALLOC_BLOCK_SIZE, - "Allocation block size for transactions to be stored in binary log", + "Allocation block size for various transaction-related structures", (gptr*) &global_system_variables.trans_alloc_block_size, (gptr*) &max_system_variables.trans_alloc_block_size, 0, GET_ULONG, REQUIRED_ARG, QUERY_ALLOC_BLOCK_SIZE, 1024, ~0L, 0, 1024, 0}, {"transaction_prealloc_size", OPT_TRANS_PREALLOC_SIZE, - "Persistent buffer for transactions to be stored in binary log", + "Persistent buffer for various transaction-related structures", (gptr*) &global_system_variables.trans_prealloc_size, (gptr*) &max_system_variables.trans_prealloc_size, 0, GET_ULONG, REQUIRED_ARG, TRANS_ALLOC_PREALLOC_SIZE, 1024, ~0L, 0, 1024, 0}, @@ -6494,10 +6531,11 @@ static int show_rpl_status(THD *thd, SHOW_VAR *var, char *buff) static int show_slave_running(THD *thd, SHOW_VAR *var, char *buff) { - var->type= SHOW_CHAR; + var->type= SHOW_MY_BOOL; pthread_mutex_lock(&LOCK_active_mi); - var->value= const_cast<char*>((active_mi && active_mi->slave_running && - active_mi->rli.slave_running) ? "ON" : "OFF"); + var->value= buff; + *((my_bool *)buff)= (my_bool) (active_mi && active_mi->slave_running && + active_mi->rli.slave_running); pthread_mutex_unlock(&LOCK_active_mi); return 0; } @@ -6713,12 +6751,20 @@ static int show_ssl_ctx_get_session_cache_mode(THD *thd, SHOW_VAR *var, char *bu return 0; } -/* Functions relying on SSL */ +/* + Functions relying on SSL + Note: In the show_ssl_* functions, we need to check if we have a + valid vio-object since this isn't always true, specifically + when session_status or global_status is requested from + inside an Event. + */ static int show_ssl_get_version(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_CHAR; - var->value= const_cast<char*>(thd->net.vio->ssl_arg ? - SSL_get_version((SSL*) thd->net.vio->ssl_arg) : ""); + if( thd->vio_ok() && thd->net.vio->ssl_arg ) + var->value= const_cast<char*>(SSL_get_version((SSL*) thd->net.vio->ssl_arg)); + else + var->value= (char *)""; return 0; } @@ -6726,9 +6772,10 @@ static int show_ssl_session_reused(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_LONG; var->value= buff; - *((long *)buff)= (long)thd->net.vio->ssl_arg ? - SSL_session_reused((SSL*) thd->net.vio->ssl_arg) : - 0; + if( thd->vio_ok() && thd->net.vio->ssl_arg ) + *((long *)buff)= (long)SSL_session_reused((SSL*) thd->net.vio->ssl_arg); + else + *((long *)buff)= 0; return 0; } @@ -6736,9 +6783,10 @@ static int show_ssl_get_default_timeout(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_LONG; var->value= buff; - *((long *)buff)= (long)thd->net.vio->ssl_arg ? - SSL_get_default_timeout((SSL*)thd->net.vio->ssl_arg) : - 0; + if( thd->vio_ok() && thd->net.vio->ssl_arg ) + *((long *)buff)= (long)SSL_get_default_timeout((SSL*)thd->net.vio->ssl_arg); + else + *((long *)buff)= 0; return 0; } @@ -6746,9 +6794,10 @@ static int show_ssl_get_verify_mode(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_LONG; var->value= buff; - *((long *)buff)= (long)thd->net.vio->ssl_arg ? - SSL_get_verify_mode((SSL*)thd->net.vio->ssl_arg) : - 0; + if( thd->net.vio && thd->net.vio->ssl_arg ) + *((long *)buff)= (long)SSL_get_verify_mode((SSL*)thd->net.vio->ssl_arg); + else + *((long *)buff)= 0; return 0; } @@ -6756,17 +6805,20 @@ static int show_ssl_get_verify_depth(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_LONG; var->value= buff; - *((long *)buff)= (long)thd->net.vio->ssl_arg ? - SSL_get_verify_depth((SSL*)thd->net.vio->ssl_arg) : - 0; + if( thd->vio_ok() && thd->net.vio->ssl_arg ) + *((long *)buff)= (long)SSL_get_verify_depth((SSL*)thd->net.vio->ssl_arg); + else + *((long *)buff)= 0; return 0; } static int show_ssl_get_cipher(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_CHAR; - var->value= const_cast<char*>(thd->net.vio->ssl_arg ? - SSL_get_cipher((SSL*) thd->net.vio->ssl_arg) : ""); + if( thd->vio_ok() && thd->net.vio->ssl_arg ) + var->value= const_cast<char*>(SSL_get_cipher((SSL*) thd->net.vio->ssl_arg)); + else + var->value= (char *)""; return 0; } @@ -6774,7 +6826,7 @@ static int show_ssl_get_cipher_list(THD *thd, SHOW_VAR *var, char *buff) { var->type= SHOW_CHAR; var->value= buff; - if (thd->net.vio->ssl_arg) + if (thd->vio_ok() && thd->net.vio->ssl_arg) { int i; const char *p; @@ -7201,7 +7253,7 @@ static void mysql_init_variables(void) default_collation_name= compiled_default_collation_name; sys_charset_system.value= (char*) system_charset_info->csname; character_set_filesystem_name= (char*) "binary"; - + lc_time_names_name= (char*) "en_US"; /* Set default values for some option variables */ default_storage_engine_str= (char*) "MyISAM"; global_system_variables.table_type= myisam_hton; @@ -7314,6 +7366,18 @@ static void mysql_init_variables(void) /* Allow Win32 and NetWare users to move MySQL anywhere */ { char prg_dev[LIBLEN]; +#if defined __WIN__ + char executing_path_name[LIBLEN]; + if (!test_if_hard_path(my_progname)) + { + // we don't want to use GetModuleFileName inside of my_path since + // my_path is a generic path dereferencing function and here we care + // only about the executing binary. + GetModuleFileName(NULL, executing_path_name, sizeof(executing_path_name)); + my_path(prg_dev, executing_path_name, NULL); + } + else +#endif my_path(prg_dev,my_progname,"mysql/bin"); strcat(prg_dev,"/../"); // Remove 'bin' to get base dir cleanup_dirname(mysql_home,prg_dev); @@ -7411,11 +7475,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), case (int) OPT_INIT_RPL_ROLE: { int role; - if ((role=find_type(argument, &rpl_role_typelib, 2)) <= 0) - { - fprintf(stderr, "Unknown replication role: %s\n", argument); - exit(1); - } + role= find_type_or_exit(argument, &rpl_role_typelib, opt->name); rpl_status = (role == 1) ? RPL_AUTH_MASTER : RPL_IDLE_SLAVE; break; } @@ -7471,17 +7531,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), case OPT_BINLOG_FORMAT: { int id; - if ((id= find_type(argument, &binlog_format_typelib, 2)) <= 0) - { - fprintf(stderr, - "Unknown binary log format: '%s' " - "(should be one of '%s', '%s', '%s')\n", - argument, - binlog_format_names[BINLOG_FORMAT_STMT], - binlog_format_names[BINLOG_FORMAT_ROW], - binlog_format_names[BINLOG_FORMAT_MIXED]); - exit(1); - } + id= find_type_or_exit(argument, &binlog_format_typelib, opt->name); global_system_variables.binlog_format= opt_binlog_format_id= id - 1; break; } @@ -7541,43 +7591,15 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), else { log_output_str= argument; - if ((log_output_options= - find_bit_type(argument, &log_output_typelib)) == ~(ulong) 0) - { - fprintf(stderr, "Unknown option to log-output: %s\n", argument); - exit(1); - } - } + log_output_options= + find_bit_type_or_exit(argument, &log_output_typelib, opt->name); + } break; } #endif case OPT_EVENT_SCHEDULER: - if (!argument) - Events::opt_event_scheduler= Events::EVENTS_DISABLED; - else - { - int type; - /* - type= 5 1 2 3 4 - (DISABLE ) - (OFF | ON) - (0 | 1) - */ - switch ((type=find_type(argument, &Events::opt_typelib, 1))) { - case 0: - fprintf(stderr, "Unknown option to event-scheduler: %s\n",argument); - exit(1); - case 5: /* OPT_DISABLED */ - Events::opt_event_scheduler= Events::EVENTS_DISABLED; - break; - case 2: /* OPT_ON */ - case 4: /* 1 */ - Events::opt_event_scheduler= Events::EVENTS_ON; - break; - case 1: /* OPT_OFF */ - case 3: /* 0 */ - Events::opt_event_scheduler= Events::EVENTS_OFF; - break; - } - } + if (Events::set_opt_event_scheduler(argument)) + exit(1); break; case (int) OPT_SKIP_NEW: opt_specialflag|= SPECIAL_NO_NEW_FUNC; @@ -7714,11 +7736,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), else { int type; - if ((type=find_type(argument, &delay_key_write_typelib, 2)) <= 0) - { - fprintf(stderr,"Unknown delay_key_write type: %s\n",argument); - exit(1); - } + type= find_type_or_exit(argument, &delay_key_write_typelib, opt->name); delay_key_write_options= (uint) type-1; } break; @@ -7729,11 +7747,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), case OPT_TX_ISOLATION: { int type; - if ((type=find_type(argument, &tx_isolation_typelib, 2)) <= 0) - { - fprintf(stderr,"Unknown transaction isolation type: %s\n",argument); - exit(1); - } + type= find_type_or_exit(argument, &tx_isolation_typelib, opt->name); global_system_variables.tx_isolation= (type-1); break; } @@ -7774,16 +7788,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), break; case OPT_NDB_DISTRIBUTION: int id; - if ((id= find_type(argument, &ndb_distribution_typelib, 2)) <= 0) - { - fprintf(stderr, - "Unknown ndb distribution type: '%s' " - "(should be '%s' or '%s')\n", - argument, - ndb_distribution_names[ND_KEYHASH], - ndb_distribution_names[ND_LINHASH]); - exit(1); - } + id= find_type_or_exit(argument, &ndb_distribution_typelib, opt->name); opt_ndb_distribution_id= (enum ndb_distribution)(id-1); break; case OPT_NDB_EXTRA_LOGGING: @@ -7823,12 +7828,8 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), else { myisam_recover_options_str=argument; - if ((myisam_recover_options= - find_bit_type(argument, &myisam_recover_typelib)) == ~(ulong) 0) - { - fprintf(stderr, "Unknown option to myisam-recover: %s\n",argument); - exit(1); - } + myisam_recover_options= + find_bit_type_or_exit(argument, &myisam_recover_typelib, opt->name); } ha_open_options|=HA_OPEN_ABORT_IF_CRASHED; break; @@ -7841,14 +7842,10 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), myisam_concurrent_insert= 0; /* --skip-concurrent-insert */ break; case OPT_TC_HEURISTIC_RECOVER: - { - if ((tc_heuristic_recover=find_type(argument, - &tc_heuristic_recover_typelib, 2)) <=0) - { - fprintf(stderr, "Unknown option to tc-heuristic-recover: %s\n",argument); - exit(1); - } - } + tc_heuristic_recover= find_type_or_exit(argument, + &tc_heuristic_recover_typelib, + opt->name); + break; case OPT_MYISAM_STATS_METHOD: { ulong method_conv; @@ -7856,11 +7853,8 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), LINT_INIT(method_conv); myisam_stats_method_str= argument; - if ((method=find_type(argument, &myisam_stats_method_typelib, 2)) <= 0) - { - fprintf(stderr, "Invalid value of myisam_stats_method: %s.\n", argument); - exit(1); - } + method= find_type_or_exit(argument, &myisam_stats_method_typelib, + opt->name); switch (method-1) { case 2: method_conv= MI_STATS_METHOD_IGNORE_NULLS; @@ -7879,12 +7873,8 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), case OPT_SQL_MODE: { sql_mode_str= argument; - if ((global_system_variables.sql_mode= - find_bit_type(argument, &sql_mode_typelib)) == ~(ulong) 0) - { - fprintf(stderr, "Unknown option to sql-mode: %s\n", argument); - exit(1); - } + global_system_variables.sql_mode= + find_bit_type_or_exit(argument, &sql_mode_typelib, opt->name); global_system_variables.sql_mode= fix_sql_mode(global_system_variables. sql_mode); break; @@ -7894,16 +7884,8 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), break; case OPT_THREAD_HANDLING: { - if ((global_system_variables.thread_handling= - find_type(argument, &thread_handling_typelib, 2)) <= 0 || - (global_system_variables.thread_handling == SCHEDULER_POOL_OF_THREADS - && !HAVE_POOL_OF_THREADS)) - { - /* purecov: begin tested */ - fprintf(stderr,"Unknown/unsupported thread-handling: %s\n",argument); - exit(1); - /* purecov: end */ - } + global_system_variables.thread_handling= + find_type_or_exit(argument, &thread_handling_typelib, opt->name); break; } case OPT_FT_BOOLEAN_SYNTAX: @@ -8192,6 +8174,30 @@ static void fix_paths(void) } +static ulong find_bit_type_or_exit(const char *x, TYPELIB *bit_lib, + const char *option) +{ + ulong res; + + const char **ptr; + + if ((res= find_bit_type(x, bit_lib)) == ~(ulong) 0) + { + ptr= bit_lib->type_names; + if (!*x) + fprintf(stderr, "No option given to %s\n", option); + else + fprintf(stderr, "Wrong option to %s. Option(s) given: %s\n", option, x); + fprintf(stderr, "Alternatives are: '%s'", *ptr); + while (*++ptr) + fprintf(stderr, ",'%s'", *ptr); + fprintf(stderr, "\n"); + exit(1); + } + return res; +} + + /* Return a bitfield from a string of substrings separated by ',' returns ~(ulong) 0 on error. diff --git a/sql/net_serv.cc b/sql/net_serv.cc index 2156888b8cf..719e2ee0d3c 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -298,8 +298,8 @@ void net_clear(NET *net, my_bool clear_buffer) { DBUG_PRINT("info",("skipped %d bytes from file: %s", count, vio_description(net->vio))); -#if defined(EXTRA_DEBUG) && (MYSQL_VERSION_ID < 51000) - fprintf(stderr,"Error: net_clear() skipped %d bytes from file: %s\n", +#if defined(EXTRA_DEBUG) + fprintf(stderr,"Note: net_clear() skipped %d bytes from file: %s\n", count, vio_description(net->vio)); #endif } @@ -818,7 +818,7 @@ my_real_read(NET *net, ulong *complen) { my_bool interrupted = vio_should_retry(net->vio); - DBUG_PRINT("info",("vio_read returned %ld, errno: %d", + DBUG_PRINT("info",("vio_read returned %ld errno: %d", length, vio_errno(net->vio))); #if !defined(__WIN__) || defined(MYSQL_SERVER) /* @@ -903,9 +903,12 @@ my_real_read(NET *net, ulong *complen) (int) net->buff[net->where_b + 3], net->pkt_nr)); #ifdef EXTRA_DEBUG + fflush(stdout); fprintf(stderr,"Error: Packets out of order (Found: %d, expected %d)\n", (int) net->buff[net->where_b + 3], (uint) (uchar) net->pkt_nr); + fflush(stderr); + DBUG_ASSERT(0); #endif } len= packet_error; diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 3dc780e1b5f..f5cf79bd609 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -84,7 +84,7 @@ static int sel_cmp(Field *f,char *a,char *b,uint8 a_flag,uint8 b_flag); static char is_null_string[2]= {1,0}; - +class RANGE_OPT_PARAM; /* A construction block of the SEL_ARG-graph. @@ -170,6 +170,89 @@ static char is_null_string[2]= {1,0}; - get_quick_select() - Walk the SEL_ARG, materialize the key intervals, and create QUICK_RANGE_SELECT object that will read records within these intervals. + + 4. SPACE COMPLEXITY NOTES + + SEL_ARG graph is a representation of an ordered disjoint sequence of + intervals over the ordered set of index tuple values. + + For multi-part keys, one can construct a WHERE expression such that its + list of intervals will be of combinatorial size. Here is an example: + + (keypart1 IN (1,2, ..., n1)) AND + (keypart2 IN (1,2, ..., n2)) AND + (keypart3 IN (1,2, ..., n3)) + + For this WHERE clause the list of intervals will have n1*n2*n3 intervals + of form + + (keypart1, keypart2, keypart3) = (k1, k2, k3), where 1 <= k{i} <= n{i} + + SEL_ARG graph structure aims to reduce the amount of required space by + "sharing" the elementary intervals when possible (the pic at the + beginning of this comment has examples of such sharing). The sharing may + prevent combinatorial blowup: + + There are WHERE clauses that have combinatorial-size interval lists but + will be represented by a compact SEL_ARG graph. + Example: + (keypartN IN (1,2, ..., n1)) AND + ... + (keypart2 IN (1,2, ..., n2)) AND + (keypart1 IN (1,2, ..., n3)) + + but not in all cases: + + - There are WHERE clauses that do have a compact SEL_ARG-graph + representation but get_mm_tree() and its callees will construct a + graph of combinatorial size. + Example: + (keypart1 IN (1,2, ..., n1)) AND + (keypart2 IN (1,2, ..., n2)) AND + ... + (keypartN IN (1,2, ..., n3)) + + - There are WHERE clauses for which the minimal possible SEL_ARG graph + representation will have combinatorial size. + Example: + By induction: Let's take any interval on some keypart in the middle: + + kp15=c0 + + Then let's AND it with this interval 'structure' from preceding and + following keyparts: + + (kp14=c1 AND kp16=c3) OR keypart14=c2) (*) + + We will obtain this SEL_ARG graph: + + kp14 $ kp15 $ kp16 + $ $ + +---------+ $ +---------+ $ +---------+ + | kp14=c1 |--$-->| kp15=c0 |--$-->| kp16=c3 | + +---------+ $ +---------+ $ +---------+ + | $ $ + +---------+ $ +---------+ $ + | kp14=c2 |--$-->| kp15=c0 | $ + +---------+ $ +---------+ $ + $ $ + + Note that we had to duplicate "kp15=c0" and there was no way to avoid + that. + The induction step: AND the obtained expression with another "wrapping" + expression like (*). + When the process ends because of the limit on max. number of keyparts + we'll have: + + WHERE clause length is O(3*#max_keyparts) + SEL_ARG graph size is O(2^(#max_keyparts/2)) + + (it is also possible to construct a case where instead of 2 in 2^n we + have a bigger constant, e.g. 4, and get a graph with 4^(31/2)= 2^31 + nodes) + + We avoid consuming too much memory by setting a limit on the number of + SEL_ARG object we can construct during one range analysis invocation. */ class SEL_ARG :public Sql_alloc @@ -200,6 +283,8 @@ public: enum leaf_color { BLACK,RED } color; enum Type { IMPOSSIBLE, MAYBE, MAYBE_KEY, KEY_RANGE } type; + enum { MAX_SEL_ARGS = 16000 }; + SEL_ARG() {} SEL_ARG(SEL_ARG &); SEL_ARG(Field *,const char *,const char *); @@ -271,7 +356,7 @@ public: return new SEL_ARG(field, part, min_value, arg->max_value, min_flag, arg->max_flag, maybe_flag | arg->maybe_flag); } - SEL_ARG *clone(SEL_ARG *new_parent,SEL_ARG **next); + SEL_ARG *clone(RANGE_OPT_PARAM *param, SEL_ARG *new_parent, SEL_ARG **next); bool copy_min(SEL_ARG* arg) { // Get overlapping range @@ -312,7 +397,8 @@ public: min_value=arg->max_value; min_flag=arg->max_flag & NEAR_MAX ? 0 : NEAR_MIN; } - void store_min(uint length,char **min_key,uint min_key_flag) + /* returns a number of keypart values (0 or 1) appended to the key buffer */ + int store_min(uint length,char **min_key,uint min_key_flag) { if ((min_flag & GEOM_FLAG) || (!(min_flag & NO_MIN_RANGE) && @@ -326,12 +412,13 @@ public: else memcpy(*min_key,min_value,length); (*min_key)+= length; + return 1; } + return 0; } - void store(uint length,char **min_key,uint min_key_flag, - char **max_key, uint max_key_flag) + /* returns a number of keypart values (0 or 1) appended to the key buffer */ + int store_max(uint length,char **max_key, uint max_key_flag) { - store_min(length, min_key, min_key_flag); if (!(max_flag & NO_MAX_RANGE) && !(max_key_flag & (NO_MAX_RANGE | NEAR_MAX))) { @@ -343,33 +430,41 @@ public: else memcpy(*max_key,max_value,length); (*max_key)+= length; + return 1; } + return 0; } - void store_min_key(KEY_PART *key,char **range_key, uint *range_key_flag) + /* returns a number of keypart values appended to the key buffer */ + int store_min_key(KEY_PART *key,char **range_key, uint *range_key_flag) { SEL_ARG *key_tree= first(); - key_tree->store(key[key_tree->part].store_length, - range_key,*range_key_flag,range_key,NO_MAX_RANGE); + uint res= key_tree->store_min(key[key_tree->part].store_length, + range_key, *range_key_flag); *range_key_flag|= key_tree->min_flag; if (key_tree->next_key_part && key_tree->next_key_part->part == key_tree->part+1 && !(*range_key_flag & (NO_MIN_RANGE | NEAR_MIN)) && key_tree->next_key_part->type == SEL_ARG::KEY_RANGE) - key_tree->next_key_part->store_min_key(key,range_key, range_key_flag); + res+= key_tree->next_key_part->store_min_key(key, range_key, + range_key_flag); + return res; } - void store_max_key(KEY_PART *key,char **range_key, uint *range_key_flag) + /* returns a number of keypart values appended to the key buffer */ + int store_max_key(KEY_PART *key,char **range_key, uint *range_key_flag) { SEL_ARG *key_tree= last(); - key_tree->store(key[key_tree->part].store_length, - range_key, NO_MIN_RANGE, range_key,*range_key_flag); + uint res=key_tree->store_max(key[key_tree->part].store_length, + range_key, *range_key_flag); (*range_key_flag)|= key_tree->max_flag; if (key_tree->next_key_part && key_tree->next_key_part->part == key_tree->part+1 && !(*range_key_flag & (NO_MAX_RANGE | NEAR_MAX)) && key_tree->next_key_part->type == SEL_ARG::KEY_RANGE) - key_tree->next_key_part->store_max_key(key,range_key, range_key_flag); + res+= key_tree->next_key_part->store_max_key(key, range_key, + range_key_flag); + return res; } SEL_ARG *insert(SEL_ARG *key); @@ -413,7 +508,6 @@ public: { return parent->left == this ? &parent->left : &parent->right; } - SEL_ARG *clone_tree(); /* @@ -456,6 +550,7 @@ public: } return !field->key_cmp(min_val, max_val); } + SEL_ARG *clone_tree(RANGE_OPT_PARAM *param); }; class SEL_IMERGE; @@ -535,6 +630,8 @@ public: using_real_indexes==TRUE */ uint real_keynr[MAX_KEY]; + /* Number of SEL_ARG objects allocated by SEL_ARG::clone_tree operations */ + uint alloced_sel_args; }; class PARAM : public RANGE_OPT_PARAM @@ -583,11 +680,11 @@ static SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param,COND *cond_func,Field *field, static SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param,COND *cond); static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts); -static ha_rows check_quick_select(PARAM *param,uint index,SEL_ARG *key_tree, +static ha_rows check_quick_select(PARAM *param,uint index,SEL_ARG *key_tree, bool update_tbl_stats); static ha_rows check_quick_keys(PARAM *param,uint index,SEL_ARG *key_tree, - char *min_key,uint min_key_flag, - char *max_key, uint max_key_flag); + char *min_key, uint min_key_flag, int, + char *max_key, uint max_key_flag, int); QUICK_RANGE_SELECT *get_quick_select(PARAM *param,uint index, SEL_ARG *key_tree, @@ -624,8 +721,9 @@ static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg); static SEL_TREE *tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2); static SEL_TREE *tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2); static SEL_ARG *sel_add(SEL_ARG *key1,SEL_ARG *key2); -static SEL_ARG *key_or(SEL_ARG *key1,SEL_ARG *key2); -static SEL_ARG *key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag); +static SEL_ARG *key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2); +static SEL_ARG *key_and(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2, + uint clone_flag); static bool get_range(SEL_ARG **e1,SEL_ARG **e2,SEL_ARG *root1); bool get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, SEL_ARG *key_tree,char *min_key,uint min_key_flag, @@ -850,6 +948,7 @@ int imerge_list_or_tree(RANGE_OPT_PARAM *param, return im1->is_empty(); } + /*************************************************************************** ** Basic functions for SQL_SELECT and QUICK_RANGE_SELECT ***************************************************************************/ @@ -1453,6 +1552,7 @@ QUICK_ROR_UNION_SELECT::~QUICK_ROR_UNION_SELECT() QUICK_RANGE::QUICK_RANGE() :min_key(0),max_key(0),min_length(0),max_length(0), + min_keypart_map(0), max_keypart_map(0), flag(NO_MIN_RANGE | NO_MAX_RANGE) {} @@ -1499,12 +1599,18 @@ SEL_ARG::SEL_ARG(Field *field_,uint8 part_,char *min_value_,char *max_value_, left=right= &null_element; } -SEL_ARG *SEL_ARG::clone(SEL_ARG *new_parent,SEL_ARG **next_arg) +SEL_ARG *SEL_ARG::clone(RANGE_OPT_PARAM *param, SEL_ARG *new_parent, + SEL_ARG **next_arg) { SEL_ARG *tmp; + + /* Bail out if we have already generated too many SEL_ARGs */ + if (++param->alloced_sel_args > MAX_SEL_ARGS) + return 0; + if (type != KEY_RANGE) { - if (!(tmp= new SEL_ARG(type))) + if (!(tmp= new (param->mem_root) SEL_ARG(type))) return 0; // out of memory tmp->prev= *next_arg; // Link into next/prev chain (*next_arg)->next=tmp; @@ -1512,20 +1618,21 @@ SEL_ARG *SEL_ARG::clone(SEL_ARG *new_parent,SEL_ARG **next_arg) } else { - if (!(tmp= new SEL_ARG(field,part, min_value,max_value, - min_flag, max_flag, maybe_flag))) + if (!(tmp= new (param->mem_root) SEL_ARG(field,part, min_value,max_value, + min_flag, max_flag, maybe_flag))) return 0; // OOM tmp->parent=new_parent; tmp->next_key_part=next_key_part; if (left != &null_element) - tmp->left=left->clone(tmp,next_arg); + if (!(tmp->left=left->clone(param, tmp, next_arg))) + return 0; // OOM tmp->prev= *next_arg; // Link into next/prev chain (*next_arg)->next=tmp; (*next_arg)= tmp; if (right != &null_element) - if (!(tmp->right= right->clone(tmp,next_arg))) + if (!(tmp->right= right->clone(param, tmp, next_arg))) return 0; // OOM } increment_use_count(1); @@ -1603,11 +1710,12 @@ static int sel_cmp(Field *field, char *a,char *b,uint8 a_flag,uint8 b_flag) } -SEL_ARG *SEL_ARG::clone_tree() +SEL_ARG *SEL_ARG::clone_tree(RANGE_OPT_PARAM *param) { SEL_ARG tmp_link,*next_arg,*root; next_arg= &tmp_link; - root= clone((SEL_ARG *) 0, &next_arg); + if (!(root= clone(param, (SEL_ARG *) 0, &next_arg))) + return 0; next_arg->next=0; // Fix last link tmp_link.next->prev=0; // Fix first link if (root) // If not OOM @@ -2109,11 +2217,12 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, param.real_keynr[param.keys++]=idx; } param.key_parts_end=key_parts; + param.alloced_sel_args= 0; /* Calculate cost of full index read for the shortest covering index */ - if (!head->used_keys.is_clear_all()) + if (!head->covering_keys.is_clear_all()) { - int key_for_use= find_shortest_key(head, &head->used_keys); + int key_for_use= find_shortest_key(head, &head->covering_keys); double key_read_time= (get_index_only_read_time(¶m, records, key_for_use) + (double) records / TIME_FOR_COMPARE); @@ -2501,6 +2610,7 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond) range_par->using_real_indexes= FALSE; range_par->remove_jump_scans= FALSE; range_par->real_keynr[0]= 0; + range_par->alloced_sel_args= 0; thd->no_errors=1; // Don't warn about NULL thd->mem_root=&alloc; @@ -4031,9 +4141,9 @@ void ror_intersect_cpy(ROR_INTERSECT_INFO *dst, const ROR_INTERSECT_INFO *src) The calculation is conducted as follows: Lets denote #records(keypart1, ... keypartK) as n_k. We need to calculate - n_{k1} n_{k_2} + n_{k1} n_{k2} --------- * --------- * .... (3) - n_{k1-1} n_{k2_1} + n_{k1-1} n_{k2-1} where k1,k2,... are key parts which fields were not yet marked as fixed ( this is result of application of option b) of the recursion step for @@ -4041,9 +4151,9 @@ void ror_intersect_cpy(ROR_INTERSECT_INFO *dst, const ROR_INTERSECT_INFO *src) Since it is reasonable to expect that most of the fields are not marked as fixed, we calculate (3) as - n_{i1} n_{i_2} + n_{i1} n_{i2} (3) = n_{max_key_part} / ( --------- * --------- * .... ) - n_{i1-1} n_{i2_1} + n_{i1-1} n_{i2-1} where i1,i2, .. are key parts that were already marked as fixed. @@ -4052,7 +4162,6 @@ void ror_intersect_cpy(ROR_INTERSECT_INFO *dst, const ROR_INTERSECT_INFO *src) RETURN Selectivity of given ROR scan. - */ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info, @@ -4063,6 +4172,7 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info, byte key_val[MAX_KEY_LENGTH+MAX_FIELD_WIDTH]; /* key values tuple */ char *key_ptr= (char*) key_val; SEL_ARG *sel_arg, *tuple_arg= NULL; + key_part_map keypart_map= 0; bool cur_covered; bool prev_covered= test(bitmap_is_set(&info->covered_fields, key_part->fieldnr-1)); @@ -4073,7 +4183,7 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info, max_range.key= (byte*) key_val; max_range.flag= HA_READ_AFTER_KEY; ha_rows prev_records= info->param->table->file->stats.records; - DBUG_ENTER("ror_intersect_selectivity"); + DBUG_ENTER("ror_scan_selectivity"); for (sel_arg= scan->sel_arg; sel_arg; sel_arg= sel_arg->next_key_part) @@ -4090,13 +4200,17 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info, tuple_arg= scan->sel_arg; /* Here we use the length of the first key part */ tuple_arg->store_min(key_part->store_length, &key_ptr, 0); + keypart_map= 1; } while (tuple_arg->next_key_part != sel_arg) { tuple_arg= tuple_arg->next_key_part; - tuple_arg->store_min(key_part[tuple_arg->part].store_length, &key_ptr, 0); + tuple_arg->store_min(key_part[tuple_arg->part].store_length, + &key_ptr, 0); + keypart_map= (keypart_map << 1) | 1; } min_range.length= max_range.length= ((char*) key_ptr - (char*) key_val); + min_range.keypart_map= max_range.keypart_map= keypart_map; records= (info->param->table->file-> records_in_range(scan->keynr, &min_range, &max_range)); if (cur_covered) @@ -4646,7 +4760,7 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree, param->needed_reg->set_bit(keynr); bool read_index_only= index_read_must_be_used ? TRUE : - (bool) param->table->used_keys.is_set(keynr); + (bool) param->table->covering_keys.is_set(keynr); found_records= check_quick_select(param, idx, *key, update_tbl_stats); if (param->is_ror_scan) @@ -5227,7 +5341,8 @@ static SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param,COND *cond) while ((item=li++)) { SEL_TREE *new_tree=get_mm_tree(param,item); - if (param->thd->is_fatal_error) + if (param->thd->is_fatal_error || + param->alloced_sel_args > SEL_ARG::MAX_SEL_ARGS) DBUG_RETURN(0); // out of memory tree=tree_and(param,tree,new_tree); if (tree && tree->type == SEL_TREE::IMPOSSIBLE) @@ -5305,12 +5420,11 @@ static SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param,COND *cond) */ for (uint i= 1 ; i < cond_func->arg_count ; i++) { - if (cond_func->arguments()[i]->real_item()->type() == Item::FIELD_ITEM) { field_item= (Item_field*) (cond_func->arguments()[i]->real_item()); SEL_TREE *tmp= get_full_func_mm_tree(param, cond_func, - field_item, (Item*) i, inv); + field_item, (Item*)(intptr)i, inv); if (inv) tree= !tree ? tmp : tree_or(param, tree, tmp); else @@ -5804,9 +5918,9 @@ tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) tree1->type=SEL_TREE::KEY_SMALLER; DBUG_RETURN(tree1); } - key_map result_keys; result_keys.clear_all(); + /* Join the trees key per key */ SEL_ARG **key1,**key2,**end; for (key1= tree1->keys,key2= tree2->keys,end=key1+param->keys ; @@ -5819,7 +5933,7 @@ tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) flag|=CLONE_KEY1_MAYBE; if (*key2 && !(*key2)->simple_key()) flag|=CLONE_KEY2_MAYBE; - *key1=key_and(*key1,*key2,flag); + *key1=key_and(param, *key1, *key2, flag); if (*key1 && (*key1)->type == SEL_ARG::IMPOSSIBLE) { tree1->type= SEL_TREE::IMPOSSIBLE; @@ -5827,8 +5941,8 @@ tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) } result_keys.set_bit(key1 - tree1->keys); #ifdef EXTRA_DEBUG - if (*key1) - (*key1)->test_use_count(*key1); + if (*key1 && param->alloced_sel_args < SEL_ARG::MAX_SEL_ARGS) + (*key1)->test_use_count(*key1); #endif } } @@ -5980,13 +6094,14 @@ tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) for (key1= tree1->keys,key2= tree2->keys,end= key1+param->keys ; key1 != end ; key1++,key2++) { - *key1=key_or(*key1,*key2); + *key1=key_or(param, *key1, *key2); if (*key1) { result=tree1; // Added to tree1 result_keys.set_bit(key1 - tree1->keys); #ifdef EXTRA_DEBUG - (*key1)->test_use_count(*key1); + if (param->alloced_sel_args < SEL_ARG::MAX_SEL_ARGS) + (*key1)->test_use_count(*key1); #endif } } @@ -6044,14 +6159,15 @@ tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) /* And key trees where key1->part < key2 -> part */ static SEL_ARG * -and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) +and_all_keys(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2, + uint clone_flag) { SEL_ARG *next; ulong use_count=key1->use_count; if (key1->elements != 1) { - key2->use_count+=key1->elements-1; + key2->use_count+=key1->elements-1; //psergey: why we don't count that key1 has n-k-p? key2->increment_use_count((int) key1->elements-1); } if (key1->type == SEL_ARG::MAYBE_KEY) @@ -6063,7 +6179,7 @@ and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) { if (next->next_key_part) { - SEL_ARG *tmp=key_and(next->next_key_part,key2,clone_flag); + SEL_ARG *tmp= key_and(param, next->next_key_part, key2, clone_flag); if (tmp && tmp->type == SEL_ARG::IMPOSSIBLE) { key1=key1->tree_delete(next); @@ -6072,6 +6188,8 @@ and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) next->next_key_part=tmp; if (use_count) next->increment_use_count(use_count); + if (param->alloced_sel_args > SEL_ARG::MAX_SEL_ARGS) + break; } else next->next_key_part=key2; @@ -6088,8 +6206,10 @@ and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) SYNOPSIS key_and() - key1 First argument, root of its RB-tree - key2 Second argument, root of its RB-tree + param Range analysis context (needed to track if we have allocated + too many SEL_ARGs) + key1 First argument, root of its RB-tree + key2 Second argument, root of its RB-tree RETURN RB-tree root of the resulting SEL_ARG graph. @@ -6097,7 +6217,7 @@ and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) */ static SEL_ARG * -key_and(SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) +key_and(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) { if (!key1) return key2; @@ -6113,9 +6233,9 @@ key_and(SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) // key1->part < key2->part key1->use_count--; if (key1->use_count > 0) - if (!(key1= key1->clone_tree())) + if (!(key1= key1->clone_tree(param))) return 0; // OOM - return and_all_keys(key1,key2,clone_flag); + return and_all_keys(param, key1, key2, clone_flag); } if (((clone_flag & CLONE_KEY2_MAYBE) && @@ -6133,14 +6253,14 @@ key_and(SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) if (key1->use_count > 1) { key1->use_count--; - if (!(key1=key1->clone_tree())) + if (!(key1=key1->clone_tree(param))) return 0; // OOM key1->use_count++; } if (key1->type == SEL_ARG::MAYBE_KEY) { // Both are maybe key - key1->next_key_part=key_and(key1->next_key_part,key2->next_key_part, - clone_flag); + key1->next_key_part=key_and(param, key1->next_key_part, + key2->next_key_part, clone_flag); if (key1->next_key_part && key1->next_key_part->type == SEL_ARG::IMPOSSIBLE) return key1; @@ -6151,7 +6271,7 @@ key_and(SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) if (key2->next_key_part) { key1->use_count--; // Incremented in and_all_keys - return and_all_keys(key1,key2,clone_flag); + return and_all_keys(param, key1, key2, clone_flag); } key2->use_count--; // Key2 doesn't have a tree } @@ -6187,7 +6307,8 @@ key_and(SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) } else if (get_range(&e2,&e1,key2)) continue; - SEL_ARG *next=key_and(e1->next_key_part,e2->next_key_part,clone_flag); + SEL_ARG *next=key_and(param, e1->next_key_part, e2->next_key_part, + clone_flag); e1->increment_use_count(1); e2->increment_use_count(1); if (!next || next->type != SEL_ARG::IMPOSSIBLE) @@ -6235,7 +6356,7 @@ get_range(SEL_ARG **e1,SEL_ARG **e2,SEL_ARG *root1) static SEL_ARG * -key_or(SEL_ARG *key1,SEL_ARG *key2) +key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2) { if (!key1) { @@ -6283,7 +6404,7 @@ key_or(SEL_ARG *key1,SEL_ARG *key2) { swap_variables(SEL_ARG *,key1,key2); } - if (key1->use_count > 0 || !(key1=key1->clone_tree())) + if (key1->use_count > 0 || !(key1=key1->clone_tree(param))) return 0; // OOM } @@ -6427,7 +6548,7 @@ key_or(SEL_ARG *key1,SEL_ARG *key2) { // tmp.min. <= x <= tmp.max tmp->maybe_flag|= key.maybe_flag; key.increment_use_count(key1->use_count+1); - tmp->next_key_part=key_or(tmp->next_key_part,key.next_key_part); + tmp->next_key_part= key_or(param, tmp->next_key_part, key.next_key_part); if (!cmp) // Key2 is ready break; key.copy_max_to_min(tmp); @@ -6458,7 +6579,7 @@ key_or(SEL_ARG *key1,SEL_ARG *key2) tmp->increment_use_count(key1->use_count+1); /* Increment key count as it may be used for next loop */ key.increment_use_count(1); - new_arg->next_key_part=key_or(tmp->next_key_part,key.next_key_part); + new_arg->next_key_part= key_or(param, tmp->next_key_part, key.next_key_part); key1=key1->insert(new_arg); break; } @@ -7057,7 +7178,9 @@ check_quick_select(PARAM *param,uint idx,SEL_ARG *tree, bool update_tbl_stats) } param->n_ranges= 0; - records=check_quick_keys(param,idx,tree,param->min_key,0,param->max_key,0); + records= check_quick_keys(param, idx, tree, + param->min_key, 0, -1, + param->max_key, 0, -1); if (records != HA_POS_ERROR) { if (update_tbl_stats) @@ -7120,12 +7243,13 @@ check_quick_select(PARAM *param,uint idx,SEL_ARG *tree, bool update_tbl_stats) */ static ha_rows -check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, - char *min_key,uint min_key_flag, char *max_key, - uint max_key_flag) +check_quick_keys(PARAM *param, uint idx, SEL_ARG *key_tree, + char *min_key, uint min_key_flag, int min_keypart, + char *max_key, uint max_key_flag, int max_keypart) { ha_rows records=0, tmp; uint tmp_min_flag, tmp_max_flag, keynr, min_key_length, max_key_length; + uint tmp_min_keypart= min_keypart, tmp_max_keypart= max_keypart; char *tmp_min_key, *tmp_max_key; uint8 save_first_null_comp= param->first_null_comp; @@ -7139,18 +7263,21 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, This is not a ROR scan if the key is not Clustered Primary Key. */ param->is_ror_scan= FALSE; - records=check_quick_keys(param,idx,key_tree->left,min_key,min_key_flag, - max_key,max_key_flag); + records=check_quick_keys(param, idx, key_tree->left, + min_key, min_key_flag, min_keypart, + max_key, max_key_flag, max_keypart); if (records == HA_POS_ERROR) // Impossible return records; } tmp_min_key= min_key; tmp_max_key= max_key; - key_tree->store(param->key[idx][key_tree->part].store_length, - &tmp_min_key,min_key_flag,&tmp_max_key,max_key_flag); - min_key_length= (uint) (tmp_min_key- param->min_key); - max_key_length= (uint) (tmp_max_key- param->max_key); + tmp_min_keypart+= key_tree->store_min(param->key[idx][key_tree->part].store_length, + &tmp_min_key, min_key_flag); + tmp_max_keypart+= key_tree->store_max(param->key[idx][key_tree->part].store_length, + &tmp_max_key, max_key_flag); + min_key_length= (uint) (tmp_min_key - param->min_key); + max_key_length= (uint) (tmp_max_key - param->max_key); if (param->is_ror_scan) { @@ -7173,12 +7300,13 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, key_tree->next_key_part->type == SEL_ARG::KEY_RANGE) { // const key as prefix if (min_key_length == max_key_length && - !memcmp(min_key,max_key, (uint) (tmp_max_key - max_key)) && + !memcmp(min_key, max_key, (uint) (tmp_max_key - max_key)) && !key_tree->min_flag && !key_tree->max_flag) { - tmp=check_quick_keys(param,idx,key_tree->next_key_part, - tmp_min_key, min_key_flag | key_tree->min_flag, - tmp_max_key, max_key_flag | key_tree->max_flag); + tmp=check_quick_keys(param,idx,key_tree->next_key_part, tmp_min_key, + min_key_flag | key_tree->min_flag, tmp_min_keypart, + tmp_max_key, max_key_flag | key_tree->max_flag, + tmp_max_keypart); goto end; // Ugly, but efficient } else @@ -7190,18 +7318,20 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, tmp_min_flag=key_tree->min_flag; tmp_max_flag=key_tree->max_flag; if (!tmp_min_flag) + tmp_min_keypart+= key_tree->next_key_part->store_min_key(param->key[idx], &tmp_min_key, &tmp_min_flag); if (!tmp_max_flag) + tmp_max_keypart+= key_tree->next_key_part->store_max_key(param->key[idx], &tmp_max_key, &tmp_max_flag); - min_key_length= (uint) (tmp_min_key- param->min_key); - max_key_length= (uint) (tmp_max_key- param->max_key); + min_key_length= (uint) (tmp_min_key - param->min_key); + max_key_length= (uint) (tmp_max_key - param->max_key); } else { - tmp_min_flag=min_key_flag | key_tree->min_flag; - tmp_max_flag=max_key_flag | key_tree->max_flag; + tmp_min_flag= min_key_flag | key_tree->min_flag; + tmp_max_flag= max_key_flag | key_tree->max_flag; } keynr=param->real_keynr[idx]; @@ -7209,9 +7339,8 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, if (!tmp_min_flag && ! tmp_max_flag && (uint) key_tree->part+1 == param->table->key_info[keynr].key_parts && (param->table->key_info[keynr].flags & (HA_NOSAME | HA_END_SPACE_KEY)) == - HA_NOSAME && - min_key_length == max_key_length && - !memcmp(param->min_key,param->max_key,min_key_length) && + HA_NOSAME && min_key_length == max_key_length && + !memcmp(param->min_key, param->max_key, min_key_length) && !param->first_null_comp) { tmp=1; // Max one record @@ -7231,7 +7360,7 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, first members of clustered primary key. */ if (!(min_key_length == max_key_length && - !memcmp(min_key,max_key, (uint) (tmp_max_key - max_key)) && + !memcmp(min_key, max_key, (uint) (tmp_max_key - max_key)) && !key_tree->min_flag && !key_tree->max_flag && is_key_scan_ror(param, keynr, key_tree->part + 1))) param->is_ror_scan= FALSE; @@ -7243,11 +7372,12 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, key_range min_range; min_range.key= (byte*) param->min_key; min_range.length= min_key_length; + min_range.keypart_map= make_keypart_map(tmp_min_keypart); /* In this case tmp_min_flag contains the handler-read-function */ min_range.flag= (ha_rkey_function) (tmp_min_flag ^ GEOM_FLAG); - tmp= param->table->file->records_in_range(keynr, &min_range, - (key_range*) 0); + tmp= param->table->file->records_in_range(keynr, + &min_range, (key_range*) 0); } else { @@ -7257,10 +7387,12 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, min_range.length= min_key_length; min_range.flag= (tmp_min_flag & NEAR_MIN ? HA_READ_AFTER_KEY : HA_READ_KEY_EXACT); + min_range.keypart_map= make_keypart_map(tmp_min_keypart); max_range.key= (byte*) param->max_key; max_range.length= max_key_length; max_range.flag= (tmp_max_flag & NEAR_MAX ? HA_READ_BEFORE_KEY : HA_READ_AFTER_KEY); + max_range.keypart_map= make_keypart_map(tmp_max_keypart); tmp=param->table->file->records_in_range(keynr, (min_key_length ? &min_range : (key_range*) 0), @@ -7281,8 +7413,9 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, This is not a ROR scan if the key is not Clustered Primary Key. */ param->is_ror_scan= FALSE; - tmp=check_quick_keys(param,idx,key_tree->right,min_key,min_key_flag, - max_key,max_key_flag); + tmp=check_quick_keys(param, idx, key_tree->right, + min_key, min_key_flag, min_keypart, + max_key, max_key_flag, max_keypart); if (tmp == HA_POS_ERROR) return tmp; records+=tmp; @@ -7429,6 +7562,8 @@ get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, { QUICK_RANGE *range; uint flag; + int min_part= key_tree->part-1, // # of keypart values in min_key buffer + max_part= key_tree->part-1; // # of keypart values in max_key buffer if (key_tree->left != &null_element) { @@ -7437,16 +7572,18 @@ get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, return 1; } char *tmp_min_key=min_key,*tmp_max_key=max_key; - key_tree->store(key[key_tree->part].store_length, - &tmp_min_key,min_key_flag,&tmp_max_key,max_key_flag); + min_part+= key_tree->store_min(key[key_tree->part].store_length, + &tmp_min_key,min_key_flag); + max_part+= key_tree->store_max(key[key_tree->part].store_length, + &tmp_max_key,max_key_flag); if (key_tree->next_key_part && key_tree->next_key_part->part == key_tree->part+1 && key_tree->next_key_part->type == SEL_ARG::KEY_RANGE) { // const key as prefix - if (!((tmp_min_key - min_key) != (tmp_max_key - max_key) || - memcmp(min_key,max_key, (uint) (tmp_max_key - max_key)) || - key_tree->min_flag || key_tree->max_flag)) + if ((tmp_min_key - min_key) == (tmp_max_key - max_key) && + memcmp(min_key, max_key, (uint)(tmp_max_key - max_key))==0 && + key_tree->min_flag==0 && key_tree->max_flag==0) { if (get_quick_keys(param,quick,key,key_tree->next_key_part, tmp_min_key, min_key_flag | key_tree->min_flag, @@ -7457,10 +7594,10 @@ get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, { uint tmp_min_flag=key_tree->min_flag,tmp_max_flag=key_tree->max_flag; if (!tmp_min_flag) - key_tree->next_key_part->store_min_key(key, &tmp_min_key, + min_part+= key_tree->next_key_part->store_min_key(key, &tmp_min_key, &tmp_min_flag); if (!tmp_max_flag) - key_tree->next_key_part->store_max_key(key, &tmp_max_key, + max_part+= key_tree->next_key_part->store_max_key(key, &tmp_max_key, &tmp_max_flag); flag=tmp_min_flag | tmp_max_flag; } @@ -7511,13 +7648,15 @@ get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, /* Get range for retrieving rows in QUICK_SELECT::get_next */ if (!(range= new QUICK_RANGE((const char *) param->min_key, (uint) (tmp_min_key - param->min_key), + min_part >=0 ? make_keypart_map(min_part) : 0, (const char *) param->max_key, (uint) (tmp_max_key - param->max_key), + max_part >=0 ? make_keypart_map(max_part) : 0, flag))) return 1; // out of memory - set_if_bigger(quick->max_used_key_length,range->min_length); - set_if_bigger(quick->max_used_key_length,range->max_length); + set_if_bigger(quick->max_used_key_length, range->min_length); + set_if_bigger(quick->max_used_key_length, range->max_length); set_if_bigger(quick->used_key_parts, (uint) key_tree->part+1); if (insert_dynamic(&quick->ranges, (gptr)&range)) return 1; @@ -7615,13 +7754,13 @@ bool QUICK_ROR_UNION_SELECT::is_keys_used(const MY_BITMAP *fields) thd Thread handle table Table to access ref ref[_or_null] scan parameters - records Estimate of number of records (needed only to construct + records Estimate of number of records (needed only to construct quick select) NOTES This allocates things in a new memory root, as this may be called many times during a query. - - RETURN + + RETURN Quick select that retrieves the same rows as passed ref scan NULL on error. */ @@ -7657,8 +7796,10 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, !(range= new(alloc) QUICK_RANGE())) goto err; // out of memory - range->min_key=range->max_key=(char*) ref->key_buff; - range->min_length=range->max_length=ref->key_length; + range->min_key= range->max_key= (char*) ref->key_buff; + range->min_length= range->max_length= ref->key_length; + range->min_keypart_map= range->max_keypart_map= + make_prev_keypart_map(ref->key_parts); range->flag= ((ref->key_length == key_info->key_length && (key_info->flags & (HA_NOSAME | HA_END_SPACE_KEY)) == HA_NOSAME) ? EQ_RANGE : 0); @@ -7671,7 +7812,7 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, { key_part->part=part; key_part->field= key_info->key_part[part].field; - key_part->length= key_info->key_part[part].length; + key_part->length= key_info->key_part[part].length; key_part->store_length= key_info->key_part[part].store_length; key_part->null_bit= key_info->key_part[part].null_bit; key_part->flag= (uint8) key_info->key_part[part].key_part_flag; @@ -7690,11 +7831,11 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, QUICK_RANGE *null_range; *ref->null_ref_key= 1; // Set null byte then create a range - if (!(null_range= new (alloc) QUICK_RANGE((char*)ref->key_buff, - ref->key_length, - (char*)ref->key_buff, - ref->key_length, - EQ_RANGE))) + if (!(null_range= new (alloc) + QUICK_RANGE((char*)ref->key_buff, ref->key_length, + make_prev_keypart_map(ref->key_parts), + (char*)ref->key_buff, ref->key_length, + make_prev_keypart_map(ref->key_parts), EQ_RANGE))) goto err; *ref->null_ref_key= 0; // Clear null byte if (insert_dynamic(&quick->ranges,(gptr)&null_range)) @@ -8146,6 +8287,7 @@ int QUICK_RANGE_SELECT::get_next() start_key->flag= ((last_range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY : (last_range->flag & EQ_RANGE) ? HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT); + start_key->keypart_map= last_range->min_keypart_map; end_key->key= (const byte*) last_range->max_key; end_key->length= last_range->max_length; /* @@ -8154,6 +8296,7 @@ int QUICK_RANGE_SELECT::get_next() */ end_key->flag= (last_range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY : HA_READ_AFTER_KEY); + end_key->keypart_map= last_range->max_keypart_map; mrange_slot->range_flag= last_range->flag; } @@ -8203,7 +8346,9 @@ end: other if some error occurred */ -int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, byte *cur_prefix) +int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, + key_part_map keypart_map, + byte *cur_prefix) { DBUG_ENTER("QUICK_RANGE_SELECT::get_next_prefix"); @@ -8215,8 +8360,7 @@ int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, byte *cur_prefix) { /* Read the next record in the same range with prefix after cur_prefix. */ DBUG_ASSERT(cur_prefix != 0); - result= file->index_read(record, cur_prefix, prefix_length, - HA_READ_AFTER_KEY); + result= file->index_read(record, cur_prefix, keypart_map, HA_READ_AFTER_KEY); if (result || (file->compare_key(file->end_range) <= 0)) DBUG_RETURN(result); } @@ -8232,11 +8376,13 @@ int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, byte *cur_prefix) start_key.key= (const byte*) last_range->min_key; start_key.length= min(last_range->min_length, prefix_length); + start_key.keypart_map= last_range->min_keypart_map & keypart_map; start_key.flag= ((last_range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY : (last_range->flag & EQ_RANGE) ? HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT); end_key.key= (const byte*) last_range->max_key; end_key.length= min(last_range->max_length, prefix_length); + end_key.keypart_map= last_range->max_keypart_map & keypart_map; /* We use READ_AFTER_KEY here because if we are reading on a key prefix we want to find all keys with this prefix @@ -8244,8 +8390,8 @@ int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, byte *cur_prefix) end_key.flag= (last_range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY : HA_READ_AFTER_KEY); - result= file->read_range_first(last_range->min_length ? &start_key : 0, - last_range->max_length ? &end_key : 0, + result= file->read_range_first(last_range->min_keypart_map ? &start_key : 0, + last_range->max_keypart_map ? &end_key : 0, test(last_range->flag & EQ_RANGE), sorted); if (last_range->flag == (UNIQUE_RANGE | EQ_RANGE)) @@ -8285,9 +8431,8 @@ int QUICK_RANGE_SELECT_GEOM::get_next() } last_range= *(cur_range++); - result= file->index_read(record, - (byte*) last_range->min_key, - last_range->min_length, + result= file->index_read(record, (byte*) last_range->min_key, + last_range->min_keypart_map, (ha_rkey_function)(last_range->flag ^ GEOM_FLAG)); if (result != HA_ERR_KEY_NOT_FOUND && result != HA_ERR_END_OF_FILE) DBUG_RETURN(result); @@ -8419,15 +8564,15 @@ int QUICK_SELECT_DESC::get_next() if (last_range->flag & EQ_RANGE) { - result= file->index_read(record, (byte*) last_range->max_key, - last_range->max_length, HA_READ_KEY_EXACT); + result = file->index_read(record, (byte*) last_range->max_key, + last_range->max_keypart_map, HA_READ_KEY_EXACT); } else { DBUG_ASSERT(last_range->flag & NEAR_MAX || range_reads_after_key(last_range)); result=file->index_read(record, (byte*) last_range->max_key, - last_range->max_length, + last_range->max_keypart_map, ((last_range->flag & NEAR_MAX) ? HA_READ_BEFORE_KEY : HA_READ_PREFIX_LAST_OR_PREV)); @@ -8747,8 +8892,7 @@ void QUICK_ROR_UNION_SELECT::add_keys_and_lengths(String *key_names, static inline uint get_field_keypart(KEY *index, Field *field); static inline SEL_ARG * get_index_range_tree(uint index, SEL_TREE* range_tree, PARAM *param, uint *param_idx); -static bool -get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, +static bool get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, KEY_PART_INFO *first_non_group_part, KEY_PART_INFO *min_max_arg_part, KEY_PART_INFO *last_part, THD *thd, @@ -8924,7 +9068,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree) if ((join->tables != 1) || /* The query must reference one table. */ ((!join->group_list) && /* Neither GROUP BY nor a DISTINCT query. */ (!join->select_distinct)) || - (thd->lex->select_lex.olap == ROLLUP_TYPE)) /* Check (B3) for ROLLUP */ + (join->select_lex->olap == ROLLUP_TYPE)) /* Check (B3) for ROLLUP */ DBUG_RETURN(NULL); if (table->s->keys == 0) /* There are no indexes to use. */ DBUG_RETURN(NULL); @@ -8948,7 +9092,8 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree) else DBUG_RETURN(NULL); - Item *expr= min_max_item->args[0]; /* The argument of MIN/MAX. */ + /* The argument of MIN/MAX. */ + Item *expr= min_max_item->args[0]->real_item(); if (expr->type() == Item::FIELD_ITEM) /* Is it an attribute? */ { if (! min_max_arg_item) @@ -9012,7 +9157,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree) cur_index_info++, cur_index++) { /* Check (B1) - if current index is covering. */ - if (!table->used_keys.is_set(cur_index)) + if (!table->covering_keys.is_set(cur_index)) goto next_index; /* @@ -9146,7 +9291,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree) NULL; first_non_infix_part= min_max_arg_part ? (min_max_arg_part < last_part) ? - min_max_arg_part + 1 : + min_max_arg_part : NULL : NULL; if (first_non_group_part && @@ -9203,7 +9348,9 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree) */ if (first_non_infix_part) { - for (cur_part= first_non_infix_part; cur_part != last_part; cur_part++) + cur_part= first_non_infix_part + + (min_max_arg_part && (min_max_arg_part < last_part)); + for (; cur_part != last_part; cur_part++) { if (bitmap_is_set(table->read_set, cur_part->field->field_index)) goto next_index; @@ -9317,6 +9464,7 @@ check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item, DBUG_ENTER("check_group_min_max_predicates"); DBUG_ASSERT(cond && min_max_arg_item); + cond= cond->real_item(); Item::Type cond_type= cond->type(); if (cond_type == Item::COND_ITEM) /* 'AND' or 'OR' */ { @@ -9354,7 +9502,7 @@ check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item, DBUG_PRINT("info", ("Analyzing: %s", pred->func_name())); for (uint arg_idx= 0; arg_idx < pred->argument_count (); arg_idx++) { - cur_arg= arguments[arg_idx]; + cur_arg= arguments[arg_idx]->real_item(); DBUG_PRINT("info", ("cur_arg: %s", cur_arg->full_name())); if (cur_arg->type() == Item::FIELD_ITEM) { @@ -9749,7 +9897,7 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, RETURN New QUICK_GROUP_MIN_MAX_SELECT object if successfully created, - NULL o/w. + NULL otherwise. */ QUICK_SELECT_I * @@ -9762,10 +9910,10 @@ TRP_GROUP_MIN_MAX::make_quick(PARAM *param, bool retrieve_full_rows, quick= new QUICK_GROUP_MIN_MAX_SELECT(param->table, param->thd->lex->current_select->join, have_min, have_max, min_max_arg_part, - group_prefix_len, used_key_parts, - index_info, index, read_cost, records, - key_infix_len, key_infix, - parent_alloc); + group_prefix_len, group_key_parts, + used_key_parts, index_info, index, + read_cost, records, key_infix_len, + key_infix, parent_alloc); if (!quick) DBUG_RETURN(NULL); @@ -9854,7 +10002,7 @@ QUICK_GROUP_MIN_MAX_SELECT:: QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join_arg, bool have_min_arg, bool have_max_arg, KEY_PART_INFO *min_max_arg_part_arg, - uint group_prefix_len_arg, + uint group_prefix_len_arg, uint group_key_parts_arg, uint used_key_parts_arg, KEY *index_info_arg, uint use_index, double read_cost_arg, ha_rows records_arg, uint key_infix_len_arg, @@ -9864,7 +10012,7 @@ QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join_arg, bool have_min_arg, have_max(have_max_arg), seen_first_key(FALSE), min_max_arg_part(min_max_arg_part_arg), key_infix(key_infix_arg), key_infix_len(key_infix_len_arg), min_functions_it(NULL), - max_functions_it(NULL) + max_functions_it(NULL), group_key_parts(group_key_parts_arg) { head= table; file= head->file; @@ -9874,6 +10022,7 @@ QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join_arg, bool have_min_arg, read_time= read_cost_arg; records= records_arg; used_key_parts= used_key_parts_arg; + real_key_parts= used_key_parts_arg; real_prefix_len= group_prefix_len + key_infix_len; group_prefix= NULL; min_max_arg_len= min_max_arg_part ? min_max_arg_part->store_length : 0; @@ -10040,7 +10189,9 @@ bool QUICK_GROUP_MIN_MAX_SELECT::add_range(SEL_ARG *sel_range) range_flag|= EQ_RANGE; /* equality condition */ } range= new QUICK_RANGE(sel_range->min_value, min_max_arg_len, + make_keypart_map(sel_range->part), sel_range->max_value, min_max_arg_len, + make_keypart_map(sel_range->part), range_flag); if (!range) return TRUE; @@ -10279,7 +10430,8 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next() first sub-group with the extended prefix. */ if (!have_min && !have_max && key_infix_len > 0) - result= file->index_read(record, group_prefix, real_prefix_len, + result= file->index_read(record, group_prefix, + make_prev_keypart_map(real_key_parts), HA_READ_KEY_EXACT); result= have_min ? min_res : have_max ? max_res : result; @@ -10342,7 +10494,8 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min() /* Apply the constant equality conditions to the non-group select fields */ if (key_infix_len > 0) { - if ((result= file->index_read(record, group_prefix, real_prefix_len, + if ((result= file->index_read(record, group_prefix, + make_prev_keypart_map(real_key_parts), HA_READ_KEY_EXACT))) DBUG_RETURN(result); } @@ -10359,7 +10512,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min() /* Find the first subsequent record without NULL in the MIN/MAX field. */ key_copy(tmp_record, record, index_info, 0); result= file->index_read(record, tmp_record, - real_prefix_len + min_max_arg_len, + make_keypart_map(real_key_parts), HA_READ_AFTER_KEY); /* Check if the new record belongs to the current group by comparing its @@ -10415,7 +10568,8 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_max() if (min_max_ranges.elements > 0) result= next_max_in_range(); else - result= file->index_read(record, group_prefix, real_prefix_len, + result= file->index_read(record, group_prefix, + make_prev_keypart_map(real_key_parts), HA_READ_PREFIX_LAST); DBUG_RETURN(result); } @@ -10451,7 +10605,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_prefix() { byte *cur_prefix= seen_first_key ? group_prefix : NULL; if ((result= quick_prefix_select->get_next_prefix(group_prefix_len, - cur_prefix))) + make_prev_keypart_map(group_key_parts), cur_prefix))) DBUG_RETURN(result); seen_first_key= TRUE; } @@ -10467,7 +10621,8 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_prefix() else { /* Load the first key in this group into record. */ - result= file->index_read(record, group_prefix, group_prefix_len, + result= file->index_read(record, group_prefix, + make_prev_keypart_map(group_key_parts), HA_READ_AFTER_KEY); if (result) DBUG_RETURN(result); @@ -10509,7 +10664,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_prefix() int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range() { ha_rkey_function find_flag; - uint search_prefix_len; + key_part_map keypart_map; QUICK_RANGE *cur_range; bool found_null= FALSE; int result= HA_ERR_KEY_NOT_FOUND; @@ -10531,22 +10686,21 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range() if (cur_range->flag & NO_MIN_RANGE) { + keypart_map= make_prev_keypart_map(real_key_parts); find_flag= HA_READ_KEY_EXACT; - search_prefix_len= real_prefix_len; } else { /* Extend the search key with the lower boundary for this range. */ memcpy(group_prefix + real_prefix_len, cur_range->min_key, cur_range->min_length); - search_prefix_len= real_prefix_len + min_max_arg_len; + keypart_map= make_keypart_map(real_key_parts); find_flag= (cur_range->flag & (EQ_RANGE | NULL_RANGE)) ? HA_READ_KEY_EXACT : (cur_range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY : HA_READ_KEY_OR_NEXT; } - result= file->index_read(record, group_prefix, search_prefix_len, - find_flag); + result= file->index_read(record, group_prefix, keypart_map, find_flag); if (result) { if ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) && @@ -10643,7 +10797,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range() int QUICK_GROUP_MIN_MAX_SELECT::next_max_in_range() { ha_rkey_function find_flag; - uint search_prefix_len; + key_part_map keypart_map; QUICK_RANGE *cur_range; int result; @@ -10665,22 +10819,21 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_max_in_range() if (cur_range->flag & NO_MAX_RANGE) { + keypart_map= make_prev_keypart_map(real_key_parts); find_flag= HA_READ_PREFIX_LAST; - search_prefix_len= real_prefix_len; } else { /* Extend the search key with the upper boundary for this range. */ memcpy(group_prefix + real_prefix_len, cur_range->max_key, cur_range->max_length); - search_prefix_len= real_prefix_len + min_max_arg_len; + keypart_map= make_keypart_map(real_key_parts); find_flag= (cur_range->flag & EQ_RANGE) ? HA_READ_KEY_EXACT : (cur_range->flag & NEAR_MAX) ? HA_READ_BEFORE_KEY : HA_READ_PREFIX_LAST_OR_PREV; } - result= file->index_read(record, group_prefix, search_prefix_len, - find_flag); + result= file->index_read(record, group_prefix, keypart_map, find_flag); if (result) { diff --git a/sql/opt_range.h b/sql/opt_range.h index d82e1dc459e..1ad9567cddd 100644 --- a/sql/opt_range.h +++ b/sql/opt_range.h @@ -37,17 +37,23 @@ class QUICK_RANGE :public Sql_alloc { public: char *min_key,*max_key; uint16 min_length,max_length,flag; + key_part_map min_keypart_map, // bitmap of used keyparts in min_key + max_keypart_map; // bitmap of used keyparts in max_key #ifdef HAVE_purify uint16 dummy; /* Avoid warnings on 'flag' */ #endif QUICK_RANGE(); /* Full range */ - QUICK_RANGE(const char *min_key_arg,uint min_length_arg, - const char *max_key_arg,uint max_length_arg, + QUICK_RANGE(const char *min_key_arg, uint min_length_arg, + key_part_map min_keypart_map_arg, + const char *max_key_arg, uint max_length_arg, + key_part_map max_keypart_map_arg, uint flag_arg) : min_key((char*) sql_memdup(min_key_arg,min_length_arg+1)), max_key((char*) sql_memdup(max_key_arg,max_length_arg+1)), min_length((uint16) min_length_arg), max_length((uint16) max_length_arg), + min_keypart_map(min_keypart_map_arg), + max_keypart_map(max_keypart_map_arg), flag((uint16) flag_arg) { #ifdef HAVE_purify @@ -60,11 +66,11 @@ class QUICK_RANGE :public Sql_alloc { /* Quick select interface. This class is a parent for all QUICK_*_SELECT and FT_SELECT classes. - + The usage scenario is as follows: 1. Create quick select quick= new QUICK_XXX_SELECT(...); - + 2. Perform lightweight initialization. This can be done in 2 ways: 2.a: Regular initialization if (quick->init()) @@ -75,29 +81,29 @@ class QUICK_RANGE :public Sql_alloc { 2.b: Special initialization for quick selects merged by QUICK_ROR_*_SELECT if (quick->init_ror_merged_scan()) delete quick; - + 3. Perform zero, one, or more scans. while (...) { // initialize quick select for scan. This may allocate - // buffers and/or prefetch rows. + // buffers and/or prefetch rows. if (quick->reset()) { //the only valid action after failed reset() call is delete delete quick; //abort query } - + // perform the scan do { res= quick->get_next(); } while (res && ...) } - + 4. Delete the select: delete quick; - + */ class QUICK_SELECT_I @@ -123,6 +129,8 @@ public: Max. number of (first) key parts this quick select uses for retrieval. eg. for "(key1p1=c1 AND key1p2=c2) OR key1p1=c2" used_key_parts == 2. Applicable if index!= MAX_KEY. + + For QUICK_GROUP_MIN_MAX_SELECT it includes MIN/MAX argument keyparts. */ uint used_key_parts; @@ -318,7 +326,8 @@ public: int reset(void); int get_next(); void range_end(); - int get_next_prefix(uint prefix_length, byte *cur_prefix); + int get_next_prefix(uint prefix_length, key_part_map keypart_map, + byte *cur_prefix); bool reverse_sorted() { return 0; } bool unique_key_range(); int init_ror_merged_scan(bool reuse_handler); @@ -605,6 +614,7 @@ private: byte *tmp_record; /* Temporary storage for next_min(), next_max(). */ byte *group_prefix; /* Key prefix consisting of the GROUP fields. */ uint group_prefix_len; /* Length of the group prefix. */ + uint group_key_parts; /* A number of keyparts in the group prefix */ byte *last_prefix; /* Prefix of the last group for detecting EOF. */ bool have_min; /* Specify whether we are computing */ bool have_max; /* a MIN, a MAX, or both. */ @@ -616,6 +626,7 @@ private: uint key_infix_len; DYNAMIC_ARRAY min_max_ranges; /* Array of range ptrs for the MIN/MAX field. */ uint real_prefix_len; /* Length of key prefix extended with key_infix. */ + uint real_key_parts; /* A number of keyparts in the above value. */ List<Item_sum> *min_functions; List<Item_sum> *max_functions; List_iterator<Item_sum> *min_functions_it; @@ -638,10 +649,11 @@ private: public: QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join, bool have_min, bool have_max, KEY_PART_INFO *min_max_arg_part, - uint group_prefix_len, uint used_key_parts, - KEY *index_info, uint use_index, double read_cost, - ha_rows records, uint key_infix_len, - byte *key_infix, MEM_ROOT *parent_alloc); + uint group_prefix_len, uint group_key_parts, + uint used_key_parts, KEY *index_info, uint + use_index, double read_cost, ha_rows records, uint + key_infix_len, byte *key_infix, MEM_ROOT + *parent_alloc); ~QUICK_GROUP_MIN_MAX_SELECT(); bool add_range(SEL_ARG *sel_range); void update_key_stat(); diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index 5bd5ec4b42d..f9a06f3fb6e 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -251,7 +251,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) error= table->file->index_first(table->record[0]); else error= table->file->index_read(table->record[0],key_buff, - ref.key_length, + make_prev_keypart_map(ref.key_parts), range_fl & NEAR_MIN ? HA_READ_AFTER_KEY : HA_READ_KEY_OR_NEXT); @@ -338,11 +338,11 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) error= table->file->index_last(table->record[0]); else error= table->file->index_read(table->record[0], key_buff, - ref.key_length, + make_prev_keypart_map(ref.key_parts), range_fl & NEAR_MAX ? HA_READ_BEFORE_KEY : HA_READ_PREFIX_LAST_OR_PREV); - if (!error && reckey_in_range(1, &ref, item_field->field, + if (!error && reckey_in_range(1, &ref, item_field->field, conds, range_fl, prefix_len)) error= HA_ERR_KEY_NOT_FOUND; if (table->key_read) @@ -605,15 +605,13 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, /* Check if field is part of the tested partial key */ byte *key_ptr= ref->key_buff; KEY_PART_INFO *part; - for (part= keyinfo->key_part; - ; - key_ptr+= part++->store_length) + for (part= keyinfo->key_part; ; key_ptr+= part++->store_length) { if (part > field_part) return 0; // Field is beyond the tested parts if (part->field->eq(((Item_field*) args[0])->field)) - break; // Found a part od the key for the field + break; // Found a part of the key for the field } bool is_field_part= part == field_part; @@ -625,8 +623,11 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, { uint length= (key_ptr-ref->key_buff)+part->store_length; if (ref->key_length < length) + { /* Ultimately ref->key_length will contain the length of the search key */ ref->key_length= length; + ref->key_parts= (part - keyinfo->key_part) + 1; + } if (!*prefix_len && part+1 == field_part) *prefix_len= length; if (is_field_part && eq_type) @@ -773,6 +774,7 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, { ref->key= idx; ref->key_length= 0; + ref->key_parts= 0; key_part_map key_part_used= 0; *range_fl= NO_MIN_RANGE | NO_MAX_RANGE; if (matching_cond(max_fl, ref, keyinfo, part, cond, @@ -788,6 +790,8 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, */ ref->key_buff[ref->key_length]= 1; ref->key_length+= part->store_length; + ref->key_parts++; + DBUG_ASSERT(ref->key_parts == jdx+1); *range_fl&= ~NO_MIN_RANGE; *range_fl|= NEAR_MIN; // > NULL } diff --git a/sql/parse_file.cc b/sql/parse_file.cc index f5b62e3afe2..f06c7c15202 100644 --- a/sql/parse_file.cc +++ b/sql/parse_file.cc @@ -733,14 +733,18 @@ nlist_err: /* parse parameters - + SYNOPSIS File_parser::parse() base base address for parameter writing (structure like TABLE) mem_root MEM_ROOT for parameters allocation parameters parameters description - required number of required parameters in above list + required number of parameters in the above list. If the file + contains more parameters than "required", they will + be ignored. If the file contains less parameters + then "required", non-existing parameters will + remain their values. hook hook called for unknown keys hook_data some data specific for the hook @@ -923,6 +927,13 @@ list_err: } } } + + /* + NOTE: if we read less than "required" parameters, it is still Ok. + Probably, we've just read the file of the previous version, which + contains less parameters. + */ + DBUG_RETURN(FALSE); } diff --git a/sql/partition_info.cc b/sql/partition_info.cc index a7f9bd413c6..3e0257f5b1d 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -187,8 +187,14 @@ bool partition_info::set_up_default_partitions(handler *file, my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), error_string); goto end; } - if (no_parts == 0) - no_parts= file->get_default_no_partitions(info); + + if ((no_parts == 0) && + ((no_parts= file->get_default_no_partitions(info)) == 0)) + { + my_error(ER_PARTITION_NOT_DEFINED_ERROR, MYF(0), "partitions"); + goto end; + } + if (unlikely(no_parts > MAX_PARTITIONS)) { my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0)); @@ -753,7 +759,11 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type, } if (unlikely(set_up_defaults_for_partitioning(file, info, (uint)0))) goto end; - tot_partitions= get_tot_partitions(); + if (!(tot_partitions= get_tot_partitions())) + { + my_error(ER_PARTITION_NOT_DEFINED_ERROR, MYF(0), "partitions"); + goto end; + } if (unlikely(tot_partitions > MAX_PARTITIONS)) { my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0)); @@ -776,6 +786,8 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type, partition_element *part_elem= part_it++; if (part_elem->engine_type == NULL) part_elem->engine_type= default_engine_type; + if (thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE) + part_elem->data_file_name= part_elem->index_file_name= 0; if (!is_sub_partitioned()) { if (check_table_name(part_elem->partition_name, @@ -849,15 +861,27 @@ void partition_info::print_no_partition_found(TABLE *table) { char buf[100]; char *buf_ptr= (char*)&buf; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + TABLE_LIST table_list; + + bzero(&table_list, sizeof(table_list)); + table_list.db= table->s->db.str; + table_list.table_name= table->s->table_name.str; - if (part_expr->null_value) - buf_ptr= (char*)"NULL"; + if (check_single_table_access(current_thd, + SELECT_ACL, &table_list, TRUE)) + my_message(ER_NO_PARTITION_FOR_GIVEN_VALUE, + ER(ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT), MYF(0)); else - longlong2str(err_value, buf, - part_expr->unsigned_flag ? 10 : -10); - my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0), buf_ptr); - dbug_tmp_restore_column_map(table->read_set, old_map); + { + my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + if (part_expr->null_value) + buf_ptr= (char*)"NULL"; + else + longlong2str(err_value, buf, + part_expr->unsigned_flag ? 10 : -10); + my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0), buf_ptr); + dbug_tmp_restore_column_map(table->read_set, old_map); + } } /* Set up buffers and arrays for fields requiring preparation diff --git a/sql/partition_info.h b/sql/partition_info.h index 8bcc769054f..6c21002c184 100644 --- a/sql/partition_info.h +++ b/sql/partition_info.h @@ -156,7 +156,7 @@ public: char *part_func_string; char *subpart_func_string; - uchar *part_state; + const char *part_state; partition_element *curr_part_elem; partition_element *current_partition; diff --git a/sql/protocol.cc b/sql/protocol.cc index 05e98c68e4e..d537fd346f9 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -35,7 +35,7 @@ static void write_eof_packet(THD *thd, NET *net); #ifndef EMBEDDED_LIBRARY bool Protocol::net_store_data(const char *from, uint length) #else -bool Protocol_prep::net_store_data(const char *from, uint length) +bool Protocol_binary::net_store_data(const char *from, uint length) #endif { ulong packet_length=packet->length(); @@ -557,7 +557,7 @@ bool Protocol::send_fields(List<Item> *list, uint flags) Item *item; char buff[80]; String tmp((char*) buff,sizeof(buff),&my_charset_bin); - Protocol_simple prot(thd); + Protocol_text prot(thd); String *local_packet= prot.storage_packet(); CHARSET_INFO *thd_charset= thd->variables.character_set_results; DBUG_ENTER("send_fields"); @@ -760,7 +760,7 @@ bool Protocol::store(I_List<i_string>* str_list) ****************************************************************************/ #ifndef EMBEDDED_LIBRARY -void Protocol_simple::prepare_for_resend() +void Protocol_text::prepare_for_resend() { packet->length(0); #ifndef DBUG_OFF @@ -768,7 +768,7 @@ void Protocol_simple::prepare_for_resend() #endif } -bool Protocol_simple::store_null() +bool Protocol_text::store_null() { #ifndef DBUG_OFF field_pos++; @@ -801,7 +801,7 @@ bool Protocol::store_string_aux(const char *from, uint length, } -bool Protocol_simple::store(const char *from, uint length, +bool Protocol_text::store(const char *from, uint length, CHARSET_INFO *fromcs, CHARSET_INFO *tocs) { #ifndef DBUG_OFF @@ -817,8 +817,8 @@ bool Protocol_simple::store(const char *from, uint length, } -bool Protocol_simple::store(const char *from, uint length, - CHARSET_INFO *fromcs) +bool Protocol_text::store(const char *from, uint length, + CHARSET_INFO *fromcs) { CHARSET_INFO *tocs= this->thd->variables.character_set_results; #ifndef DBUG_OFF @@ -834,7 +834,7 @@ bool Protocol_simple::store(const char *from, uint length, } -bool Protocol_simple::store_tiny(longlong from) +bool Protocol_text::store_tiny(longlong from) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_TINY); @@ -846,7 +846,7 @@ bool Protocol_simple::store_tiny(longlong from) } -bool Protocol_simple::store_short(longlong from) +bool Protocol_text::store_short(longlong from) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -860,7 +860,7 @@ bool Protocol_simple::store_short(longlong from) } -bool Protocol_simple::store_long(longlong from) +bool Protocol_text::store_long(longlong from) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -874,7 +874,7 @@ bool Protocol_simple::store_long(longlong from) } -bool Protocol_simple::store_longlong(longlong from, bool unsigned_flag) +bool Protocol_text::store_longlong(longlong from, bool unsigned_flag) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -889,7 +889,7 @@ bool Protocol_simple::store_longlong(longlong from, bool unsigned_flag) } -bool Protocol_simple::store_decimal(const my_decimal *d) +bool Protocol_text::store_decimal(const my_decimal *d) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -903,7 +903,7 @@ bool Protocol_simple::store_decimal(const my_decimal *d) } -bool Protocol_simple::store(float from, uint32 decimals, String *buffer) +bool Protocol_text::store(float from, uint32 decimals, String *buffer) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -915,7 +915,7 @@ bool Protocol_simple::store(float from, uint32 decimals, String *buffer) } -bool Protocol_simple::store(double from, uint32 decimals, String *buffer) +bool Protocol_text::store(double from, uint32 decimals, String *buffer) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -927,7 +927,7 @@ bool Protocol_simple::store(double from, uint32 decimals, String *buffer) } -bool Protocol_simple::store(Field *field) +bool Protocol_text::store(Field *field) { if (field->is_null()) return store_null(); @@ -961,7 +961,7 @@ bool Protocol_simple::store(Field *field) */ -bool Protocol_simple::store(TIME *tm) +bool Protocol_text::store(MYSQL_TIME *tm) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -984,7 +984,7 @@ bool Protocol_simple::store(TIME *tm) } -bool Protocol_simple::store_date(TIME *tm) +bool Protocol_text::store_date(MYSQL_TIME *tm) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -1003,7 +1003,7 @@ bool Protocol_simple::store_date(TIME *tm) we support 0-6 decimals for time. */ -bool Protocol_simple::store_time(TIME *tm) +bool Protocol_text::store_time(MYSQL_TIME *tm) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -1043,7 +1043,7 @@ bool Protocol_simple::store_time(TIME *tm) [..]..[[length]data] data ****************************************************************************/ -bool Protocol_prep::prepare_for_send(List<Item> *item_list) +bool Protocol_binary::prepare_for_send(List<Item> *item_list) { Protocol::prepare_for_send(item_list); bit_fields= (field_count+9)/8; @@ -1054,7 +1054,7 @@ bool Protocol_prep::prepare_for_send(List<Item> *item_list) } -void Protocol_prep::prepare_for_resend() +void Protocol_binary::prepare_for_resend() { packet->length(bit_fields+1); bzero((char*) packet->ptr(), 1+bit_fields); @@ -1062,21 +1062,22 @@ void Protocol_prep::prepare_for_resend() } -bool Protocol_prep::store(const char *from, uint length, CHARSET_INFO *fromcs) +bool Protocol_binary::store(const char *from, uint length, + CHARSET_INFO *fromcs) { CHARSET_INFO *tocs= thd->variables.character_set_results; field_pos++; return store_string_aux(from, length, fromcs, tocs); } -bool Protocol_prep::store(const char *from,uint length, - CHARSET_INFO *fromcs, CHARSET_INFO *tocs) +bool Protocol_binary::store(const char *from,uint length, + CHARSET_INFO *fromcs, CHARSET_INFO *tocs) { field_pos++; return store_string_aux(from, length, fromcs, tocs); } -bool Protocol_prep::store_null() +bool Protocol_binary::store_null() { uint offset= (field_pos+2)/8+1, bit= (1 << ((field_pos+2) & 7)); /* Room for this as it's allocated in prepare_for_send */ @@ -1087,7 +1088,7 @@ bool Protocol_prep::store_null() } -bool Protocol_prep::store_tiny(longlong from) +bool Protocol_binary::store_tiny(longlong from) { char buff[1]; field_pos++; @@ -1096,7 +1097,7 @@ bool Protocol_prep::store_tiny(longlong from) } -bool Protocol_prep::store_short(longlong from) +bool Protocol_binary::store_short(longlong from) { field_pos++; char *to= packet->prep_append(2, PACKET_BUFFER_EXTRA_ALLOC); @@ -1107,7 +1108,7 @@ bool Protocol_prep::store_short(longlong from) } -bool Protocol_prep::store_long(longlong from) +bool Protocol_binary::store_long(longlong from) { field_pos++; char *to= packet->prep_append(4, PACKET_BUFFER_EXTRA_ALLOC); @@ -1118,7 +1119,7 @@ bool Protocol_prep::store_long(longlong from) } -bool Protocol_prep::store_longlong(longlong from, bool unsigned_flag) +bool Protocol_binary::store_longlong(longlong from, bool unsigned_flag) { field_pos++; char *to= packet->prep_append(8, PACKET_BUFFER_EXTRA_ALLOC); @@ -1128,7 +1129,7 @@ bool Protocol_prep::store_longlong(longlong from, bool unsigned_flag) return 0; } -bool Protocol_prep::store_decimal(const my_decimal *d) +bool Protocol_binary::store_decimal(const my_decimal *d) { #ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || @@ -1141,7 +1142,7 @@ bool Protocol_prep::store_decimal(const my_decimal *d) return store(str.ptr(), str.length(), str.charset()); } -bool Protocol_prep::store(float from, uint32 decimals, String *buffer) +bool Protocol_binary::store(float from, uint32 decimals, String *buffer) { field_pos++; char *to= packet->prep_append(4, PACKET_BUFFER_EXTRA_ALLOC); @@ -1152,7 +1153,7 @@ bool Protocol_prep::store(float from, uint32 decimals, String *buffer) } -bool Protocol_prep::store(double from, uint32 decimals, String *buffer) +bool Protocol_binary::store(double from, uint32 decimals, String *buffer) { field_pos++; char *to= packet->prep_append(8, PACKET_BUFFER_EXTRA_ALLOC); @@ -1163,7 +1164,7 @@ bool Protocol_prep::store(double from, uint32 decimals, String *buffer) } -bool Protocol_prep::store(Field *field) +bool Protocol_binary::store(Field *field) { /* We should not increment field_pos here as send_binary() will call another @@ -1175,7 +1176,7 @@ bool Protocol_prep::store(Field *field) } -bool Protocol_prep::store(TIME *tm) +bool Protocol_binary::store(MYSQL_TIME *tm) { char buff[12],*pos; uint length; @@ -1201,15 +1202,15 @@ bool Protocol_prep::store(TIME *tm) return packet->append(buff, length+1, PACKET_BUFFER_EXTRA_ALLOC); } -bool Protocol_prep::store_date(TIME *tm) +bool Protocol_binary::store_date(MYSQL_TIME *tm) { tm->hour= tm->minute= tm->second=0; tm->second_part= 0; - return Protocol_prep::store(tm); + return Protocol_binary::store(tm); } -bool Protocol_prep::store_time(TIME *tm) +bool Protocol_binary::store_time(MYSQL_TIME *tm) { char buff[13], *pos; uint length; diff --git a/sql/protocol.h b/sql/protocol.h index 6c4c7414ea5..e0672240e0e 100644 --- a/sql/protocol.h +++ b/sql/protocol.h @@ -88,9 +88,9 @@ public: CHARSET_INFO *fromcs, CHARSET_INFO *tocs)=0; virtual bool store(float from, uint32 decimals, String *buffer)=0; virtual bool store(double from, uint32 decimals, String *buffer)=0; - virtual bool store(TIME *time)=0; - virtual bool store_date(TIME *time)=0; - virtual bool store_time(TIME *time)=0; + virtual bool store(MYSQL_TIME *time)=0; + virtual bool store_date(MYSQL_TIME *time)=0; + virtual bool store_time(MYSQL_TIME *time)=0; virtual bool store(Field *field)=0; #ifdef EMBEDDED_LIBRARY int begin_dataset(); @@ -98,16 +98,25 @@ public: #else void remove_last_row() {} #endif + enum enum_protocol_type + { + PROTOCOL_TEXT= 0, PROTOCOL_BINARY= 1 + /* + before adding here or change the values, consider that it is cast to a + bit in sql_cache.cc. + */ + }; + virtual enum enum_protocol_type type()= 0; }; /* Class used for the old (MySQL 4.0 protocol) */ -class Protocol_simple :public Protocol +class Protocol_text :public Protocol { public: - Protocol_simple() {} - Protocol_simple(THD *thd_arg) :Protocol(thd_arg) {} + Protocol_text() {} + Protocol_text(THD *thd_arg) :Protocol(thd_arg) {} virtual void prepare_for_resend(); virtual bool store_null(); virtual bool store_tiny(longlong from); @@ -118,25 +127,26 @@ public: virtual bool store(const char *from, uint length, CHARSET_INFO *cs); virtual bool store(const char *from, uint length, CHARSET_INFO *fromcs, CHARSET_INFO *tocs); - virtual bool store(TIME *time); - virtual bool store_date(TIME *time); - virtual bool store_time(TIME *time); + virtual bool store(MYSQL_TIME *time); + virtual bool store_date(MYSQL_TIME *time); + virtual bool store_time(MYSQL_TIME *time); virtual bool store(float nr, uint32 decimals, String *buffer); virtual bool store(double from, uint32 decimals, String *buffer); virtual bool store(Field *field); #ifdef EMBEDDED_LIBRARY void remove_last_row(); #endif + virtual enum enum_protocol_type type() { return PROTOCOL_TEXT; }; }; -class Protocol_prep :public Protocol +class Protocol_binary :public Protocol { private: uint bit_fields; public: - Protocol_prep() {} - Protocol_prep(THD *thd_arg) :Protocol(thd_arg) {} + Protocol_binary() {} + Protocol_binary(THD *thd_arg) :Protocol(thd_arg) {} virtual bool prepare_for_send(List<Item> *item_list); virtual void prepare_for_resend(); #ifdef EMBEDDED_LIBRARY @@ -152,12 +162,13 @@ public: virtual bool store(const char *from,uint length, CHARSET_INFO *cs); virtual bool store(const char *from, uint length, CHARSET_INFO *fromcs, CHARSET_INFO *tocs); - virtual bool store(TIME *time); - virtual bool store_date(TIME *time); - virtual bool store_time(TIME *time); + virtual bool store(MYSQL_TIME *time); + virtual bool store_date(MYSQL_TIME *time); + virtual bool store_time(MYSQL_TIME *time); virtual bool store(float nr, uint32 decimals, String *buffer); virtual bool store(double from, uint32 decimals, String *buffer); virtual bool store(Field *field); + virtual enum enum_protocol_type type() { return PROTOCOL_BINARY; }; }; void send_warning(THD *thd, uint sql_errno, const char *err=0); diff --git a/sql/records.cc b/sql/records.cc index 0923ab1d75e..0fb9f4f9650 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -150,7 +150,8 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, info->file= table->file; info->forms= &info->table; /* Only one table */ - if (table->s->tmp_table == TMP_TABLE && !table->sort.addon_field) + if (table->s->tmp_table == NON_TRANSACTIONAL_TMP_TABLE && + !table->sort.addon_field) VOID(table->file->extra(HA_EXTRA_MMAP)); if (table->sort.addon_field) diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc index 934a6821514..6470f15e458 100644 --- a/sql/repl_failsafe.cc +++ b/sql/repl_failsafe.cc @@ -19,6 +19,7 @@ #include "repl_failsafe.h" #include "sql_repl.h" #include "slave.h" +#include "rpl_mi.h" #include "rpl_filter.h" #include "log_event.h" #include <mysql.h> @@ -692,12 +693,16 @@ int connect_to_master(THD *thd, MYSQL* mysql, MASTER_INFO* mi) #ifdef HAVE_OPENSSL if (mi->ssl) + { mysql_ssl_set(mysql, mi->ssl_key[0]?mi->ssl_key:0, mi->ssl_cert[0]?mi->ssl_cert:0, mi->ssl_ca[0]?mi->ssl_ca:0, mi->ssl_capath[0]?mi->ssl_capath:0, mi->ssl_cipher[0]?mi->ssl_cipher:0); + mysql_options(mysql, MYSQL_OPT_SSL_VERIFY_SERVER_CERT, + &mi->ssl_verify_server_cert); + } #endif mysql_options(mysql, MYSQL_SET_CHARSET_NAME, default_charset_info->csname); diff --git a/sql/rpl_constants.h b/sql/rpl_constants.h new file mode 100644 index 00000000000..426e80a328d --- /dev/null +++ b/sql/rpl_constants.h @@ -0,0 +1,18 @@ +#ifndef RPL_CONSTANTS_H +#define RPL_CONSTANTS_H + +/** + Enumeration of the incidents that can occur for the server. + */ +enum Incident { + /** No incident */ + INCIDENT_NONE, + + /** There are possibly lost events in the replication stream */ + INCIDENT_LOST_EVENTS, + + /** Shall be last event of the enumeration */ + INCIDENT_COUNT +}; + +#endif /* RPL_CONSTANTS_H */ diff --git a/sql/rpl_injector.cc b/sql/rpl_injector.cc index 95b5ecba895..aa3020c42be 100644 --- a/sql/rpl_injector.cc +++ b/sql/rpl_injector.cc @@ -75,9 +75,11 @@ int injector::transaction::use_table(server_id_type sid, table tbl) if ((error= check_state(TABLE_STATE))) DBUG_RETURN(error); + server_id_type save_id= m_thd->server_id; m_thd->set_server_id(sid); error= m_thd->binlog_write_table_map(tbl.get_table(), tbl.is_transactional()); + m_thd->set_server_id(save_id); DBUG_RETURN(error); } @@ -91,9 +93,11 @@ int injector::transaction::write_row (server_id_type sid, table tbl, if (int error= check_state(ROW_STATE)) DBUG_RETURN(error); + server_id_type save_id= m_thd->server_id; m_thd->set_server_id(sid); m_thd->binlog_write_row(tbl.get_table(), tbl.is_transactional(), cols, colcnt, record); + m_thd->set_server_id(save_id); DBUG_RETURN(0); } @@ -107,9 +111,11 @@ int injector::transaction::delete_row(server_id_type sid, table tbl, if (int error= check_state(ROW_STATE)) DBUG_RETURN(error); + server_id_type save_id= m_thd->server_id; m_thd->set_server_id(sid); m_thd->binlog_delete_row(tbl.get_table(), tbl.is_transactional(), cols, colcnt, record); + m_thd->set_server_id(save_id); DBUG_RETURN(0); } @@ -123,9 +129,11 @@ int injector::transaction::update_row(server_id_type sid, table tbl, if (int error= check_state(ROW_STATE)) DBUG_RETURN(error); + server_id_type save_id= m_thd->server_id; m_thd->set_server_id(sid); m_thd->binlog_update_row(tbl.get_table(), tbl.is_transactional(), cols, colcnt, before, after); + m_thd->set_server_id(save_id); DBUG_RETURN(0); } @@ -188,3 +196,21 @@ void injector::new_trans(THD *thd, injector::transaction *ptr) DBUG_VOID_RETURN; } + +int injector::record_incident(THD *thd, Incident incident) +{ + Incident_log_event ev(thd, incident); + if (int error= mysql_bin_log.write(&ev)) + return error; + mysql_bin_log.rotate_and_purge(RP_FORCE_ROTATE); + return 0; +} + +int injector::record_incident(THD *thd, Incident incident, LEX_STRING const message) +{ + Incident_log_event ev(thd, incident, message); + if (int error= mysql_bin_log.write(&ev)) + return error; + mysql_bin_log.rotate_and_purge(RP_FORCE_ROTATE); + return 0; +} diff --git a/sql/rpl_injector.h b/sql/rpl_injector.h index 61c2e0ecebc..9c444ee20b3 100644 --- a/sql/rpl_injector.h +++ b/sql/rpl_injector.h @@ -18,9 +18,10 @@ /* Pull in 'byte', 'my_off_t', and 'uint32' */ #include <my_global.h> - #include <my_bitmap.h> +#include "rpl_constants.h" + /* Forward declarations */ class handler; class MYSQL_BIN_LOG; @@ -322,6 +323,9 @@ public: transaction new_trans(THD *); void new_trans(THD *, transaction *); + int record_incident(THD*, Incident incident); + int record_incident(THD*, Incident incident, LEX_STRING const message); + private: explicit injector(); ~injector() { } /* Nothing needs to be done */ diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc index 1c426eff768..354a97cefde 100644 --- a/sql/rpl_mi.cc +++ b/sql/rpl_mi.cc @@ -29,12 +29,13 @@ int init_strvar_from_file(char *var, int max_size, IO_CACHE *f, MASTER_INFO::MASTER_INFO() :ssl(0), fd(-1), io_thd(0), inited(0), - abort_slave(0),slave_running(0), slave_run_id(0) + abort_slave(0),slave_running(0), slave_run_id(0), + ssl_verify_server_cert(0) { host[0] = 0; user[0] = 0; password[0] = 0; ssl_ca[0]= 0; ssl_capath[0]= 0; ssl_cert[0]= 0; ssl_cipher[0]= 0; ssl_key[0]= 0; - + bzero((char*) &file, sizeof(file)); pthread_mutex_init(&run_lock, MY_MUTEX_INIT_FAST); pthread_mutex_init(&data_lock, MY_MUTEX_INIT_FAST); @@ -80,12 +81,21 @@ void init_master_info_with_options(MASTER_INFO* mi) strmake(mi->ssl_cipher, master_ssl_cipher, sizeof(mi->ssl_cipher)-1); if (master_ssl_key) strmake(mi->ssl_key, master_ssl_key, sizeof(mi->ssl_key)-1); + /* Intentionally init ssl_verify_server_cert to 0, no option available */ + mi->ssl_verify_server_cert= 0; DBUG_VOID_RETURN; } -#define LINES_IN_MASTER_INFO_WITH_SSL 14 +enum { + LINES_IN_MASTER_INFO_WITH_SSL= 14, + + /* 5.1.16 added value of master_ssl_verify_server_cert */ + LINE_FOR_MASTER_SSL_VERIFY_SERVER_CERT= 15, + /* Number of lines currently used when saving master info file */ + LINES_IN_MASTER_INFO= LINE_FOR_MASTER_SSL_VERIFY_SERVER_CERT +}; int init_master_info(MASTER_INFO* mi, const char* master_info_fname, const char* slave_info_fname, @@ -184,7 +194,8 @@ file '%s')", fname); } mi->fd = fd; - int port, connect_retry, master_log_pos, ssl= 0, lines; + int port, connect_retry, master_log_pos, lines; + int ssl= 0, ssl_verify_server_cert= 0; char *first_non_digit; /* @@ -195,7 +206,8 @@ file '%s')", fname); file since versions before 4.1.x could generate files with more lines than needed. If first line doesn't contain a number or contain number less than - 14 then such file is treated like file from pre 4.1.1 version. + LINES_IN_MASTER_INFO_WITH_SSL then such file is treated like file + from pre 4.1.1 version. There is no ambiguity when reading an old master.info, as before 4.1.1, the first line contained the binlog's name, which is either empty or has an extension (contains a '.'), so can't be confused @@ -219,7 +231,8 @@ file '%s')", fname); if (mi->master_log_name[0]!='\0' && *first_non_digit=='\0' && lines >= LINES_IN_MASTER_INFO_WITH_SSL) - { // Seems to be new format + { + /* Seems to be new format => read master log name from next line */ if (init_strvar_from_file(mi->master_log_name, sizeof(mi->master_log_name), &mi->file, "")) goto errwithmsg; @@ -245,19 +258,31 @@ file '%s')", fname); slave will try connect to master, so in this case warning is printed. */ - if (lines >= LINES_IN_MASTER_INFO_WITH_SSL && - (init_intvar_from_file(&ssl, &mi->file, master_ssl) || - init_strvar_from_file(mi->ssl_ca, sizeof(mi->ssl_ca), - &mi->file, master_ssl_ca) || - init_strvar_from_file(mi->ssl_capath, sizeof(mi->ssl_capath), - &mi->file, master_ssl_capath) || - init_strvar_from_file(mi->ssl_cert, sizeof(mi->ssl_cert), - &mi->file, master_ssl_cert) || - init_strvar_from_file(mi->ssl_cipher, sizeof(mi->ssl_cipher), - &mi->file, master_ssl_cipher) || - init_strvar_from_file(mi->ssl_key, sizeof(mi->ssl_key), - &mi->file, master_ssl_key))) - goto errwithmsg; + if (lines >= LINES_IN_MASTER_INFO_WITH_SSL) + { + if (init_intvar_from_file(&ssl, &mi->file, master_ssl) || + init_strvar_from_file(mi->ssl_ca, sizeof(mi->ssl_ca), + &mi->file, master_ssl_ca) || + init_strvar_from_file(mi->ssl_capath, sizeof(mi->ssl_capath), + &mi->file, master_ssl_capath) || + init_strvar_from_file(mi->ssl_cert, sizeof(mi->ssl_cert), + &mi->file, master_ssl_cert) || + init_strvar_from_file(mi->ssl_cipher, sizeof(mi->ssl_cipher), + &mi->file, master_ssl_cipher) || + init_strvar_from_file(mi->ssl_key, sizeof(mi->ssl_key), + &mi->file, master_ssl_key)) + goto errwithmsg; + + /* + Starting from 5.1.16 ssl_verify_server_cert might be + in the file + */ + if (lines >= LINE_FOR_MASTER_SSL_VERIFY_SERVER_CERT && + init_intvar_from_file(&ssl_verify_server_cert, &mi->file, 0)) + goto errwithmsg; + + } + #ifndef HAVE_OPENSSL if (ssl) sql_print_warning("SSL information in the master info file " @@ -273,6 +298,7 @@ file '%s')", fname); mi->port= (uint) port; mi->connect_retry= (uint) connect_retry; mi->ssl= (my_bool) ssl; + mi->ssl_verify_server_cert= ssl_verify_server_cert; } DBUG_PRINT("master_info",("log_file_name: %s position: %ld", mi->master_log_name, @@ -315,6 +341,7 @@ int flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache) { IO_CACHE* file = &mi->file; char lbuf[22]; + DBUG_ENTER("flush_master_info"); DBUG_PRINT("enter",("master_pos: %ld", (long) mi->master_log_pos)); @@ -352,13 +379,14 @@ int flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache) */ my_b_seek(file, 0L); - my_b_printf(file, "%u\n%s\n%s\n%s\n%s\n%s\n%d\n%d\n%d\n%s\n%s\n%s\n%s\n%s\n", - LINES_IN_MASTER_INFO_WITH_SSL, + my_b_printf(file, + "%u\n%s\n%s\n%s\n%s\n%s\n%d\n%d\n%d\n%s\n%s\n%s\n%s\n%s\n%d\n", + LINES_IN_MASTER_INFO, mi->master_log_name, llstr(mi->master_log_pos, lbuf), mi->host, mi->user, mi->password, mi->port, mi->connect_retry, (int)(mi->ssl), mi->ssl_ca, mi->ssl_capath, mi->ssl_cert, - mi->ssl_cipher, mi->ssl_key); + mi->ssl_cipher, mi->ssl_key, mi->ssl_verify_server_cert); DBUG_RETURN(-flush_io_cache(file)); } diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h index ae77e64d93a..08435795a91 100644 --- a/sql/rpl_mi.h +++ b/sql/rpl_mi.h @@ -18,6 +18,9 @@ #ifdef HAVE_REPLICATION +#include "rpl_rli.h" + + /***************************************************************************** Replication IO Thread @@ -65,6 +68,7 @@ class MASTER_INFO my_bool ssl; // enables use of SSL connection if true char ssl_ca[FN_REFLEN], ssl_capath[FN_REFLEN], ssl_cert[FN_REFLEN]; char ssl_cipher[FN_REFLEN], ssl_key[FN_REFLEN]; + my_bool ssl_verify_server_cert; my_off_t master_log_pos; File fd; // we keep the file open, so we need to remember the file pointer diff --git a/sql/rpl_record.cc b/sql/rpl_record.cc new file mode 100644 index 00000000000..4c6686cf446 --- /dev/null +++ b/sql/rpl_record.cc @@ -0,0 +1,280 @@ +/* Copyright 2007 MySQL AB. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include "mysql_priv.h" +#include "rpl_record.h" +#include "slave.h" // Need to pull in slave_print_msg + +/** + Pack a record of data for a table into a format suitable for + transfer via the binary log. + + The format for a row in transfer with N fields is the following: + + ceil(N/8) null bytes: + One null bit for every column *regardless of whether it can be + null or not*. This simplifies the decoding. Observe that the + number of null bits is equal to the number of set bits in the + @c cols bitmap. The number of null bytes is the smallest number + of bytes necessary to store the null bits. + + Padding bits are 1. + + N packets: + Each field is stored in packed format. + + + @param table Table describing the format of the record + + @param cols Bitmap with a set bit for each column that should + be stored in the row + + @param row_data Pointer to memory where row will be written + + @param record Pointer to record that should be packed. It is + assumed that the pointer refers to either @c + record[0] or @c record[1], but no such check is + made since the code does not rely on that. + + @return The number of bytes written at @c row_data. + */ +#if !defined(MYSQL_CLIENT) +my_size_t +pack_row(TABLE *table, MY_BITMAP const* cols, + byte *row_data, const byte *record) +{ + Field **p_field= table->field, *field; + int const null_byte_count= (bitmap_bits_set(cols) + 7) / 8; + byte *pack_ptr = row_data + null_byte_count; + byte *null_ptr = row_data; + my_ptrdiff_t const rec_offset= record - table->record[0]; + my_ptrdiff_t const def_offset= table->s->default_values - table->record[0]; + + /* + We write the null bits and the packed records using one pass + through all the fields. The null bytes are written little-endian, + i.e., the first fields are in the first byte. + */ + unsigned int null_bits= (1U << 8) - 1; + // Mask to mask out the correct but among the null bits + unsigned int null_mask= 1U; + for ( ; (field= *p_field) ; p_field++) + { + DBUG_PRINT("debug", ("null_mask=%d; null_ptr=%p; row_data=%p; null_byte_count=%d", + null_mask, null_ptr, row_data, null_byte_count)); + if (bitmap_is_set(cols, p_field - table->field)) + { + my_ptrdiff_t offset; + if (field->is_null(rec_offset)) + { + offset= def_offset; + null_bits |= null_mask; + } + else + { + offset= rec_offset; + null_bits &= ~null_mask; + + /* + We only store the data of the field if it is non-null + */ + pack_ptr= (byte*)field->pack((char *) pack_ptr, field->ptr + offset); + } + + null_mask <<= 1; + if ((null_mask & 0xFF) == 0) + { + DBUG_ASSERT(null_ptr < row_data + null_byte_count); + null_mask = 1U; + *null_ptr++ = null_bits; + null_bits= (1U << 8) - 1; + } + } + } + + /* + Write the last (partial) byte, if there is one + */ + if ((null_mask & 0xFF) > 1) + { + DBUG_ASSERT(null_ptr < row_data + null_byte_count); + *null_ptr++ = null_bits; + } + + /* + The null pointer should now point to the first byte of the + packed data. If it doesn't, something is very wrong. + */ + DBUG_ASSERT(null_ptr == row_data + null_byte_count); + + return static_cast<my_size_t>(pack_ptr - row_data); +} +#endif + + +/** + Unpack a row into @c table->record[0]. + + The function will always unpack into the @c table->record[0] + record. This is because there are too many dependencies on where + the various member functions of Field and subclasses expect to + write. + + The row is assumed to only consist of the fields for which the + bitset represented by @c arr and @c bits; the other parts of the + record are left alone. + + At most @c colcnt columns are read: if the table is larger than + that, the remaining fields are not filled in. + + @param rli Relay log info + @param table Table to unpack into + @param colcnt Number of columns to read from record + @param row Packed row data + @param cols Pointer to columns data to fill in + @param row_end Pointer to variable that will hold the value of the + one-after-end position for the row + @param master_reclength + Pointer to variable that will be set to the length of the + record on the master side + @param rw_set Pointer to bitmap that holds either the read_set or the + write_set of the table + + + @retval 0 No error + + @retval ER_NO_DEFAULT_FOR_FIELD + Returned if one of the fields existing on the slave but not on the + master does not have a default value (and isn't nullable) + + */ +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) +int +unpack_row(RELAY_LOG_INFO const *rli, + TABLE *table, uint const colcnt, + char const *const row_data, MY_BITMAP const *cols, + char const **const row_end, ulong *const master_reclength, + MY_BITMAP* const rw_set, Log_event_type const event_type) +{ + DBUG_ENTER("unpack_row"); + DBUG_ASSERT(row_data); + my_size_t const master_null_byte_count= (bitmap_bits_set(cols) + 7) / 8; + int error= 0; + + char const *null_ptr= row_data; + char const *pack_ptr= row_data + master_null_byte_count; + + bitmap_clear_all(rw_set); + + empty_record(table); + + Field **const begin_ptr = table->field; + Field **field_ptr; + Field **const end_ptr= begin_ptr + colcnt; + + DBUG_ASSERT(null_ptr < row_data + master_null_byte_count); + + // Mask to mask out the correct bit among the null bits + unsigned int null_mask= 1U; + // The "current" null bits + unsigned int null_bits= *null_ptr++; + for (field_ptr= begin_ptr ; field_ptr < end_ptr ; ++field_ptr) + { + Field *const f= *field_ptr; + + /* + No need to bother about columns that does not exist: they have + gotten default values when being emptied above. + */ + if (bitmap_is_set(cols, field_ptr - begin_ptr)) + { + if ((null_mask & 0xFF) == 0) + { + DBUG_ASSERT(null_ptr < row_data + master_null_byte_count); + null_mask= 1U; + null_bits= *null_ptr++; + } + + DBUG_ASSERT(null_mask & 0xFF); // One of the 8 LSB should be set + + /* Field...::unpack() cannot return 0 */ + DBUG_ASSERT(pack_ptr != NULL); + + if ((null_bits & null_mask) && f->maybe_null()) + f->set_null(); + else + { + f->set_notnull(); + + /* + We only unpack the field if it was non-null + */ + pack_ptr= f->unpack(f->ptr, pack_ptr); + } + + bitmap_set_bit(rw_set, f->field_index); + null_mask <<= 1; + } + } + + /* + We should now have read all the null bytes, otherwise something is + really wrong. + */ + DBUG_ASSERT(null_ptr == row_data + master_null_byte_count); + + *row_end = pack_ptr; + if (master_reclength) + { + if (*field_ptr) + *master_reclength = (*field_ptr)->ptr - (char*) table->record[0]; + else + *master_reclength = table->s->reclength; + } + + /* + Set properties for remaining columns, if there are any. We let the + corresponding bit in the write_set be set, to write the value if + it was not there already. We iterate over all remaining columns, + even if there were an error, to get as many error messages as + possible. We are still able to return a pointer to the next row, + so redo that. + + This generation of error messages is only relevant when inserting + new rows. + */ + for ( ; *field_ptr ; ++field_ptr) + { + uint32 const mask= NOT_NULL_FLAG | NO_DEFAULT_VALUE_FLAG; + Field *const f= *field_ptr; + + if (event_type == WRITE_ROWS_EVENT && + ((*field_ptr)->flags & mask) == mask) + { + slave_print_msg(ERROR_LEVEL, rli, ER_NO_DEFAULT_FOR_FIELD, + "Field `%s` of table `%s`.`%s` " + "has no default value and cannot be NULL", + (*field_ptr)->field_name, table->s->db.str, + table->s->table_name.str); + error = ER_NO_DEFAULT_FOR_FIELD; + } + else + f->set_default(); + } + + DBUG_RETURN(error); +} + +#endif // HAVE_REPLICATION diff --git a/sql/rpl_record.h b/sql/rpl_record.h new file mode 100644 index 00000000000..7ff1bbc7c8e --- /dev/null +++ b/sql/rpl_record.h @@ -0,0 +1,33 @@ +/* Copyright 2007 MySQL AB. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef RPL_RECORD_H +#define RPL_RECORD_H + +#if !defined(MYSQL_CLIENT) +my_size_t pack_row(TABLE* table, MY_BITMAP const* cols, + byte *row_data, const byte *data); +#endif + +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) +int unpack_row(RELAY_LOG_INFO const *rli, + TABLE *table, uint const colcnt, + char const *const row_data, MY_BITMAP const *cols, + char const **const row_end, ulong *const master_reclength, + MY_BITMAP* const rw_set, + Log_event_type const event_type); +#endif + +#endif diff --git a/sql/rpl_record_old.cc b/sql/rpl_record_old.cc new file mode 100644 index 00000000000..73e2f4e7344 --- /dev/null +++ b/sql/rpl_record_old.cc @@ -0,0 +1,173 @@ + +#include "mysql_priv.h" +#include "rpl_record_old.h" + +my_size_t +pack_row_old(TABLE *table, MY_BITMAP const* cols, + byte *row_data, const byte *record) +{ + Field **p_field= table->field, *field; + int n_null_bytes= table->s->null_bytes; + byte *ptr; + uint i; + my_ptrdiff_t const rec_offset= record - table->record[0]; + my_ptrdiff_t const def_offset= table->s->default_values - table->record[0]; + memcpy(row_data, record, n_null_bytes); + ptr= row_data+n_null_bytes; + + for (i= 0 ; (field= *p_field) ; i++, p_field++) + { + if (bitmap_is_set(cols,i)) + { + my_ptrdiff_t const offset= + field->is_null(rec_offset) ? def_offset : rec_offset; + field->move_field_offset(offset); + ptr= (byte*)field->pack((char *) ptr, field->ptr); + field->move_field_offset(-offset); + } + } + return (static_cast<my_size_t>(ptr - row_data)); +} + + +/* + Unpack a row into a record. + + SYNOPSIS + unpack_row() + rli Relay log info + table Table to unpack into + colcnt Number of columns to read from record + record Record where the data should be unpacked + row Packed row data + cols Pointer to columns data to fill in + row_end Pointer to variable that will hold the value of the + one-after-end position for the row + master_reclength + Pointer to variable that will be set to the length of the + record on the master side + rw_set Pointer to bitmap that holds either the read_set or the + write_set of the table + + DESCRIPTION + + The row is assumed to only consist of the fields for which the + bitset represented by 'arr' and 'bits'; the other parts of the + record are left alone. + + At most 'colcnt' columns are read: if the table is larger than + that, the remaining fields are not filled in. + + RETURN VALUE + + Error code, or zero if no error. The following error codes can + be returned: + + ER_NO_DEFAULT_FOR_FIELD + Returned if one of the fields existing on the slave but not on + the master does not have a default value (and isn't nullable) + */ +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) +int +unpack_row_old(RELAY_LOG_INFO *rli, + TABLE *table, uint const colcnt, byte *record, + char const *row, MY_BITMAP const *cols, + char const **row_end, ulong *master_reclength, + MY_BITMAP* const rw_set, Log_event_type const event_type) +{ + DBUG_ASSERT(record && row); + my_ptrdiff_t const offset= record - (byte*) table->record[0]; + my_size_t master_null_bytes= table->s->null_bytes; + + if (colcnt != table->s->fields) + { + Field **fptr= &table->field[colcnt-1]; + do + master_null_bytes= (*fptr)->last_null_byte(); + while (master_null_bytes == Field::LAST_NULL_BYTE_UNDEF && + fptr-- > table->field); + + /* + If master_null_bytes is LAST_NULL_BYTE_UNDEF (0) at this time, + there were no nullable fields nor BIT fields at all in the + columns that are common to the master and the slave. In that + case, there is only one null byte holding the X bit. + + OBSERVE! There might still be nullable columns following the + common columns, so table->s->null_bytes might be greater than 1. + */ + if (master_null_bytes == Field::LAST_NULL_BYTE_UNDEF) + master_null_bytes= 1; + } + + DBUG_ASSERT(master_null_bytes <= table->s->null_bytes); + memcpy(record, row, master_null_bytes); // [1] + int error= 0; + + bitmap_set_all(rw_set); + + Field **const begin_ptr = table->field; + Field **field_ptr; + char const *ptr= row + master_null_bytes; + Field **const end_ptr= begin_ptr + colcnt; + for (field_ptr= begin_ptr ; field_ptr < end_ptr ; ++field_ptr) + { + Field *const f= *field_ptr; + + if (bitmap_is_set(cols, field_ptr - begin_ptr)) + { + f->move_field_offset(offset); + ptr= f->unpack(f->ptr, ptr); + f->move_field_offset(-offset); + /* Field...::unpack() cannot return 0 */ + DBUG_ASSERT(ptr != NULL); + } + else + bitmap_clear_bit(rw_set, field_ptr - begin_ptr); + } + + *row_end = ptr; + if (master_reclength) + { + if (*field_ptr) + *master_reclength = (*field_ptr)->ptr - (char*) table->record[0]; + else + *master_reclength = table->s->reclength; + } + + /* + Set properties for remaining columns, if there are any. We let the + corresponding bit in the write_set be set, to write the value if + it was not there already. We iterate over all remaining columns, + even if there were an error, to get as many error messages as + possible. We are still able to return a pointer to the next row, + so redo that. + + This generation of error messages is only relevant when inserting + new rows. + */ + for ( ; *field_ptr ; ++field_ptr) + { + uint32 const mask= NOT_NULL_FLAG | NO_DEFAULT_VALUE_FLAG; + + DBUG_PRINT("debug", ("flags = 0x%x, mask = 0x%x, flags & mask = 0x%x", + (*field_ptr)->flags, mask, + (*field_ptr)->flags & mask)); + + if (event_type == WRITE_ROWS_EVENT && + ((*field_ptr)->flags & mask) == mask) + { + slave_print_msg(ERROR_LEVEL, rli, ER_NO_DEFAULT_FOR_FIELD, + "Field `%s` of table `%s`.`%s` " + "has no default value and cannot be NULL", + (*field_ptr)->field_name, table->s->db.str, + table->s->table_name.str); + error = ER_NO_DEFAULT_FOR_FIELD; + } + else + (*field_ptr)->set_default(); + } + + return error; +} +#endif diff --git a/sql/rpl_record_old.h b/sql/rpl_record_old.h new file mode 100644 index 00000000000..6ddb01de07c --- /dev/null +++ b/sql/rpl_record_old.h @@ -0,0 +1,32 @@ +/* Copyright 2007 MySQL AB. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef RPL_RECORD_OLD_H +#define RPL_RECORD_OLD_H + +#ifndef MYSQL_CLIENT +my_size_t pack_row_old(TABLE *table, MY_BITMAP const* cols, + byte *row_data, const byte *record); + +#ifdef HAVE_REPLICATION +int unpack_row_old(RELAY_LOG_INFO *rli, + TABLE *table, uint const colcnt, byte *record, + char const *row, MY_BITMAP const *cols, + char const **row_end, ulong *master_reclength, + MY_BITMAP* const rw_set, + Log_event_type const event_type); +#endif +#endif +#endif diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index 8a051195dba..702dec72f56 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -15,6 +15,7 @@ #include "mysql_priv.h" +#include "rpl_mi.h" #include "rpl_rli.h" #include <my_dir.h> // For MY_STAT #include "sql_repl.h" // For check_binlog_magic @@ -29,14 +30,15 @@ int init_strvar_from_file(char *var, int max_size, IO_CACHE *f, st_relay_log_info::st_relay_log_info() - :no_storage(FALSE), info_fd(-1), cur_log_fd(-1), save_temporary_tables(0), + :no_storage(FALSE), replicate_same_server_id(::replicate_same_server_id), + info_fd(-1), cur_log_fd(-1), save_temporary_tables(0), cur_log_old_open_count(0), group_master_log_pos(0), log_space_total(0), ignore_log_space_limit(0), last_master_timestamp(0), slave_skip_counter(0), abort_pos_wait(0), slave_run_id(0), sql_thd(0), last_slave_errno(0), inited(0), abort_slave(0), slave_running(0), until_condition(UNTIL_NONE), until_log_pos(0), retried_trans(0), tables_to_lock(0), tables_to_lock_count(0), - unsafe_to_stop_at(0) + last_event_start_time(0), m_flags(0) { DBUG_ENTER("st_relay_log_info::st_relay_log_info"); @@ -968,7 +970,7 @@ err: strtol() conversions needed for log names comparison. We don't need to compare them each time this function is called, we only need to do this when current log name changes. If we have UNTIL_MASTER_POS condition we - need to do this only after Rotate_log_event::exec_event() (which is + need to do this only after Rotate_log_event::do_apply_event() (which is rare, so caching gives real benifit), and if we have UNTIL_RELAY_POS condition then we should invalidate cached comarison value after inc_group_relay_log_pos() which called for each group of events (so we @@ -1001,6 +1003,22 @@ bool st_relay_log_info::is_until_satisfied() log_pos= group_relay_log_pos; } +#ifndef DBUG_OFF + { + char buf[32]; + DBUG_PRINT("info", ("group_master_log_name='%s', group_master_log_pos=%s", + group_master_log_name, llstr(group_master_log_pos, buf))); + DBUG_PRINT("info", ("group_relay_log_name='%s', group_relay_log_pos=%s", + group_relay_log_name, llstr(group_relay_log_pos, buf))); + DBUG_PRINT("info", ("(%s) log_name='%s', log_pos=%s", + until_condition == UNTIL_MASTER_POS ? "master" : "relay", + log_name, llstr(log_pos, buf))); + DBUG_PRINT("info", ("(%s) until_log_name='%s', until_log_pos=%s", + until_condition == UNTIL_MASTER_POS ? "master" : "relay", + until_log_name, llstr(until_log_pos, buf))); + } +#endif + if (until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_UNKNOWN) { /* @@ -1056,28 +1074,63 @@ void st_relay_log_info::cached_charset_invalidate() } -bool st_relay_log_info::cached_charset_compare(char *charset) +bool st_relay_log_info::cached_charset_compare(char *charset) const { DBUG_ENTER("st_relay_log_info::cached_charset_compare"); if (bcmp(cached_charset, charset, sizeof(cached_charset))) { - memcpy(cached_charset, charset, sizeof(cached_charset)); + memcpy(const_cast<char*>(cached_charset), charset, sizeof(cached_charset)); DBUG_RETURN(1); } DBUG_RETURN(0); } -void st_relay_log_info::transaction_end(THD* thd) +void st_relay_log_info::stmt_done(my_off_t event_master_log_pos, + time_t event_creation_time) { - DBUG_ENTER("st_relay_log_info::transaction_end"); + clear_flag(IN_STMT); /* - Nothing to do here right now. - */ - - DBUG_VOID_RETURN; + If in a transaction, and if the slave supports transactions, just + inc_event_relay_log_pos(). We only have to check for OPTION_BEGIN + (not OPTION_NOT_AUTOCOMMIT) as transactions are logged with + BEGIN/COMMIT, not with SET AUTOCOMMIT= . + + CAUTION: opt_using_transactions means innodb || bdb ; suppose the + master supports InnoDB and BDB, but the slave supports only BDB, + problems will arise: - suppose an InnoDB table is created on the + master, - then it will be MyISAM on the slave - but as + opt_using_transactions is true, the slave will believe he is + transactional with the MyISAM table. And problems will come when + one does START SLAVE; STOP SLAVE; START SLAVE; (the slave will + resume at BEGIN whereas there has not been any rollback). This is + the problem of using opt_using_transactions instead of a finer + "does the slave support _transactional handler used on the + master_". + + More generally, we'll have problems when a query mixes a + transactional handler and MyISAM and STOP SLAVE is issued in the + middle of the "transaction". START SLAVE will resume at BEGIN + while the MyISAM table has already been updated. + */ + if ((sql_thd->options & OPTION_BEGIN) && opt_using_transactions) + inc_event_relay_log_pos(); + else + { + inc_group_relay_log_pos(event_master_log_pos); + flush_relay_log_info(this); + /* + Note that Rotate_log_event::do_apply_event() does not call this + function, so there is no chance that a fake rotate event resets + last_master_timestamp. Note that we update without mutex + (probably ok - except in some very rare cases, only consequence + is that value may take some time to display in + Seconds_Behind_Master - not critical). + */ + last_master_timestamp= event_creation_time; + } } #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) @@ -1087,12 +1140,12 @@ void st_relay_log_info::cleanup_context(THD *thd, bool error) DBUG_ASSERT(sql_thd == thd); /* - 1) Instances of Table_map_log_event, if ::exec_event() was called on them, + 1) Instances of Table_map_log_event, if ::do_apply_event() was called on them, may have opened tables, which we cannot be sure have been closed (because maybe the Rows_log_event have not been found or will not be, because slave SQL thread is stopping, or relay log has a missing tail etc). So we close all thread's tables. And so the table mappings have to be cancelled. - 2) Rows_log_event::exec_event() may even have started statements or + 2) Rows_log_event::do_apply_event() may even have started statements or transactions on them, which we need to rollback in case of error. 3) If finding a Format_description_log_event after a BEGIN, we also need to rollback before continuing with the next events. @@ -1106,7 +1159,8 @@ void st_relay_log_info::cleanup_context(THD *thd, bool error) m_table_map.clear_tables(); close_thread_tables(thd); clear_tables_to_lock(); - unsafe_to_stop_at= 0; + clear_flag(IN_STMT); + last_event_start_time= 0; DBUG_VOID_RETURN; } diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h index 45c9fb1cf96..fada45722f6 100644 --- a/sql/rpl_rli.h +++ b/sql/rpl_rli.h @@ -51,6 +51,17 @@ struct RPL_TABLE_LIST; typedef struct st_relay_log_info { + /** + Flags for the state of the replication. + */ + enum enum_state_flag { + /** The replication thread is inside a statement */ + IN_STMT, + + /** Flag counter. Should always be last */ + STATE_FLAGS_COUNT + }; + /* If flag set, then rli does not store its state in any info file. This is the case only when we execute BINLOG SQL commands inside @@ -58,6 +69,15 @@ typedef struct st_relay_log_info */ bool no_storage; + /* + If true, events with the same server id should be replicated. This + field is set on creation of a relay log info structure by copying + the value of ::replicate_same_server_id and can be overridden if + necessary. For example of when this is done, check sql_binlog.cc, + where the BINLOG statement can be used to execute "raw" events. + */ + bool replicate_same_server_id; + /*** The following variables can only be read when protect by data lock ****/ /* @@ -164,7 +184,7 @@ typedef struct st_relay_log_info ulonglong future_group_master_log_pos; #endif - time_t last_master_timestamp; + time_t last_master_timestamp; void clear_slave_error(); void clear_until_condition(); @@ -292,14 +312,79 @@ typedef struct st_relay_log_info When the 6 bytes are equal to 0 is used to mean "cache is invalidated". */ void cached_charset_invalidate(); - bool cached_charset_compare(char *charset); - - void transaction_end(THD*); + bool cached_charset_compare(char *charset) const; void cleanup_context(THD *, bool); void clear_tables_to_lock(); - time_t unsafe_to_stop_at; + /* + Used by row-based replication to detect that it should not stop at + this event, but give it a chance to send more events. The time + where the last event inside a group started is stored here. If the + variable is zero, we are not in a group (but may be in a + transaction). + */ + time_t last_event_start_time; + + /** + Helper function to do after statement completion. + + This function is called from an event to complete the group by + either stepping the group position, if the "statement" is not + inside a transaction; or increase the event position, if the + "statement" is inside a transaction. + + @param event_log_pos + Master log position of the event. The position is recorded in the + relay log info and used to produce information for <code>SHOW + SLAVE STATUS</code>. + + @param event_creation_time + Timestamp for the creation of the event on the master side. The + time stamp is recorded in the relay log info and used to compute + the <code>Seconds_behind_master</code> field. + */ + void stmt_done(my_off_t event_log_pos, + time_t event_creation_time); + + + /** + Set the value of a replication state flag. + + @param flag Flag to set + */ + void set_flag(enum_state_flag flag) + { + m_flags |= (1UL << flag); + } + + /** + Clear the value of a replication state flag. + + @param flag Flag to clear + */ + void clear_flag(enum_state_flag flag) + { + m_flags &= ~(1UL << flag); + } + + /** + Is the replication inside a group? + + Replication is inside a group if either: + - The OPTION_BEGIN flag is set, meaning we're inside a transaction + - The RLI_IN_STMT flag is set, meaning we're inside a statement + + @retval true Replication thread is currently inside a group + @retval false Replication thread is currently not inside a group + */ + bool is_in_group() const { + return (sql_thd->options & OPTION_BEGIN) || + (m_flags & (1UL << IN_STMT)); + } + +private: + uint32 m_flags; } RELAY_LOG_INFO; diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc index 65a44a4947b..1d7cc808f0c 100644 --- a/sql/rpl_utility.cc +++ b/sql/rpl_utility.cc @@ -108,7 +108,7 @@ field_length_from_packed(enum_field_types const field_type, */ int -table_def::compatible_with(RELAY_LOG_INFO *rli, TABLE *table) +table_def::compatible_with(RELAY_LOG_INFO const *rli_arg, TABLE *table) const { /* @@ -116,6 +116,7 @@ table_def::compatible_with(RELAY_LOG_INFO *rli, TABLE *table) */ uint const cols_to_check= min(table->s->fields, size()); int error= 0; + RELAY_LOG_INFO const *rli= const_cast<RELAY_LOG_INFO*>(rli_arg); TABLE_SHARE const *const tsh= table->s; diff --git a/sql/rpl_utility.h b/sql/rpl_utility.h index b1aa642619c..17879a9ecfc 100644 --- a/sql/rpl_utility.h +++ b/sql/rpl_utility.h @@ -117,7 +117,7 @@ public: @retval 1 if the table definition is not compatible with @c table @retval 0 if the table definition is compatible with @c table */ - int compatible_with(RELAY_LOG_INFO *rli, TABLE *table) const; + int compatible_with(RELAY_LOG_INFO const *rli, TABLE *table) const; private: my_size_t m_size; // Number of elements in the types array diff --git a/sql/set_var.cc b/sql/set_var.cc index 54db43deeb0..325167ff9fa 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -51,6 +51,7 @@ #include "mysql_priv.h" #include <mysql.h> #include "slave.h" +#include "rpl_mi.h" #include <my_getopt.h> #include <thr_alarm.h> #include <myisam.h> @@ -356,6 +357,8 @@ sys_var_thd_ulong sys_net_retry_count("net_retry_count", &SV::net_retry_count, 0, fix_net_retry_count); sys_var_thd_bool sys_new_mode("new", &SV::new_mode); +sys_var_bool_ptr_readonly sys_old_mode("old", + &global_system_variables.old_mode); sys_var_thd_bool sys_old_alter_table("old_alter_table", &SV::old_alter_table); sys_var_thd_bool sys_old_passwords("old_passwords", &SV::old_passwords); @@ -875,7 +878,7 @@ SHOW_VAR init_vars[]= { #ifdef HAVE_REPLICATION {"log_slave_updates", (char*) &opt_log_slave_updates, SHOW_MY_BOOL}, #endif - {"log_slow_queries", (char*) &opt_slow_log, SHOW_BOOL}, + {"log_slow_queries", (char*) &opt_slow_log, SHOW_MY_BOOL}, {sys_log_warnings.name, (char*) &sys_log_warnings, SHOW_SYS}, {sys_long_query_time.name, (char*) &sys_long_query_time, SHOW_SYS}, {sys_low_priority_updates.name, (char*) &sys_low_priority_updates, SHOW_SYS}, @@ -942,6 +945,7 @@ SHOW_VAR init_vars[]= { {sys_net_retry_count.name, (char*) &sys_net_retry_count, SHOW_SYS}, {sys_net_write_timeout.name,(char*) &sys_net_write_timeout, SHOW_SYS}, {sys_new_mode.name, (char*) &sys_new_mode, SHOW_SYS}, + {sys_old_mode.name, (char*) &sys_old_mode, SHOW_SYS}, {sys_old_alter_table.name, (char*) &sys_old_alter_table, SHOW_SYS}, {sys_old_passwords.name, (char*) &sys_old_passwords, SHOW_SYS}, {"open_files_limit", (char*) &open_files_limit, SHOW_LONG}, @@ -1683,7 +1687,7 @@ byte *sys_var_thd_bool::value_ptr(THD *thd, enum_var_type type, } -bool sys_var::check_enum(THD *thd, set_var *var, TYPELIB *enum_names) +bool sys_var::check_enum(THD *thd, set_var *var, const TYPELIB *enum_names) { char buff[STRING_BUFFER_USUAL_SIZE]; const char *value; @@ -2750,8 +2754,8 @@ int set_var_collation_client::update(THD *thd) thd->variables.character_set_results= character_set_results; thd->variables.collation_connection= collation_connection; thd->update_charset(); - thd->protocol_simple.init(thd); - thd->protocol_prep.init(thd); + thd->protocol_text.init(thd); + thd->protocol_binary.init(thd); return 0; } @@ -2880,8 +2884,7 @@ bool sys_var_thd_time_zone::check(THD *thd, set_var *var) String str(buff, sizeof(buff), &my_charset_latin1); String *res= var->value->val_str(&str); - if (!(var->save_result.time_zone= - my_tz_find(res, thd->lex->time_zone_tables_used))) + if (!(var->save_result.time_zone= my_tz_find(thd, res))) { my_error(ER_UNKNOWN_TIME_ZONE, MYF(0), res ? res->c_ptr() : "NULL"); return 1; @@ -2942,8 +2945,7 @@ void sys_var_thd_time_zone::set_default(THD *thd, enum_var_type type) We are guaranteed to find this time zone since its existence is checked during start-up. */ - global_system_variables.time_zone= - my_tz_find(&str, thd->lex->time_zone_tables_used); + global_system_variables.time_zone= my_tz_find(thd, &str); } else global_system_variables.time_zone= my_tz_SYSTEM; @@ -3037,7 +3039,10 @@ bool sys_var_thd_lc_time_names::check(THD *thd, set_var *var) bool sys_var_thd_lc_time_names::update(THD *thd, set_var *var) { - thd->variables.lc_time_names= var->save_result.locale_value; + if (var->type == OPT_GLOBAL) + global_system_variables.lc_time_names= var->save_result.locale_value; + else + thd->variables.lc_time_names= var->save_result.locale_value; return 0; } @@ -3045,13 +3050,18 @@ bool sys_var_thd_lc_time_names::update(THD *thd, set_var *var) byte *sys_var_thd_lc_time_names::value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) { - return (byte *)(thd->variables.lc_time_names->name); + return type == OPT_GLOBAL ? + (byte *) global_system_variables.lc_time_names->name : + (byte *) thd->variables.lc_time_names->name; } void sys_var_thd_lc_time_names::set_default(THD *thd, enum_var_type type) { - thd->variables.lc_time_names = &my_locale_en_US; + if (type == OPT_GLOBAL) + global_system_variables.lc_time_names= my_default_lc_time_names; + else + thd->variables.lc_time_names= global_system_variables.lc_time_names; } /* @@ -3085,16 +3095,15 @@ static bool set_option_autocommit(THD *thd, set_var *var) if ((org_options & OPTION_NOT_AUTOCOMMIT)) { /* We changed to auto_commit mode */ - thd->options&= ~(ulonglong) (OPTION_BEGIN | - OPTION_STATUS_NO_TRANS_UPDATE | - OPTION_KEEP_LOG); + thd->options&= ~(ulonglong) (OPTION_BEGIN | OPTION_KEEP_LOG); + thd->no_trans_update.all= FALSE; thd->server_status|= SERVER_STATUS_AUTOCOMMIT; if (ha_commit(thd)) return 1; } else { - thd->options&= ~(ulonglong) (OPTION_STATUS_NO_TRANS_UPDATE); + thd->no_trans_update.all= FALSE; thd->server_status&= ~SERVER_STATUS_AUTOCOMMIT; } } @@ -3642,21 +3651,18 @@ bool sys_var_thd_table_type::update(THD *thd, set_var *var) SYNOPSIS thd in thread handler val in sql_mode value - len out pointer on length of string - - RETURN - pointer to string with sql_mode representation + rep out pointer pointer to string with sql_mode representation */ -byte *sys_var_thd_sql_mode::symbolic_mode_representation(THD *thd, - ulonglong val, - ulong *len) +bool +sys_var_thd_sql_mode:: +symbolic_mode_representation(THD *thd, ulonglong val, LEX_STRING *rep) { - char buff[256]; + char buff[STRING_BUFFER_USUAL_SIZE*8]; String tmp(buff, sizeof(buff), &my_charset_latin1); - ulong length; tmp.length(0); + for (uint i= 0; val; val>>= 1, i++) { if (val & 1) @@ -3667,20 +3673,25 @@ byte *sys_var_thd_sql_mode::symbolic_mode_representation(THD *thd, } } - if ((length= tmp.length())) - length--; - *len= length; - return (byte*) thd->strmake(tmp.ptr(), length); + if (tmp.length()) + tmp.length(tmp.length() - 1); /* trim the trailing comma */ + + rep->str= thd->strmake(tmp.ptr(), tmp.length()); + + rep->length= rep->str ? tmp.length() : 0; + + return rep->length != tmp.length(); } byte *sys_var_thd_sql_mode::value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) { + LEX_STRING sql_mode; ulonglong val= ((type == OPT_GLOBAL) ? global_system_variables.*offset : thd->variables.*offset); - ulong length_unused; - return symbolic_mode_representation(thd, val, &length_unused); + (void) symbolic_mode_representation(thd, val, &sql_mode); + return (byte *) sql_mode.str; } @@ -4011,24 +4022,13 @@ sys_var_event_scheduler::update(THD *thd, set_var *var) int res; /* here start the thread if not running. */ DBUG_ENTER("sys_var_event_scheduler::update"); - if (Events::opt_event_scheduler == Events::EVENTS_DISABLED) - { - my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--event-scheduler=DISABLED"); - DBUG_RETURN(TRUE); - } - DBUG_PRINT("info", ("new_value: %d", (int) var->save_result.ulong_value)); - if (var->save_result.ulong_value == Events::EVENTS_ON) - res= Events::get_instance()->start_execution_of_events(); - else if (var->save_result.ulong_value == Events::EVENTS_OFF) - res= Events::get_instance()->stop_execution_of_events(); - else - { - assert(0); // Impossible - } - if (res) - my_error(ER_EVENT_SET_VAR_ERROR, MYF(0)); + enum Events::enum_opt_event_scheduler + new_state= + (enum Events::enum_opt_event_scheduler) var->save_result.ulong_value; + + res= Events::switch_event_scheduler_state(new_state); DBUG_RETURN((bool) res); } @@ -4037,15 +4037,7 @@ sys_var_event_scheduler::update(THD *thd, set_var *var) byte *sys_var_event_scheduler::value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) { - int state; - if (Events::opt_event_scheduler == Events::EVENTS_DISABLED) - state= Events::EVENTS_DISABLED; // This should be DISABLED - else if (Events::get_instance()->is_execution_of_events_started()) - state= Events::EVENTS_ON; // This should be ON - else - state= Events::EVENTS_OFF; // This should be OFF - - return (byte*) Events::opt_typelib.type_names[state]; + return (byte *) Events::get_opt_event_scheduler_str(); } diff --git a/sql/set_var.h b/sql/set_var.h index 8887d91ec69..a3963171192 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -61,7 +61,7 @@ public: sys_vars++; } virtual bool check(THD *thd, set_var *var); - bool check_enum(THD *thd, set_var *var, TYPELIB *enum_names); + bool check_enum(THD *thd, set_var *var, const TYPELIB *enum_names); bool check_set(THD *thd, set_var *var, TYPELIB *enum_names); virtual bool update(THD *thd, set_var *var)=0; virtual void set_default(THD *thd_arg, enum_var_type type) {} @@ -169,6 +169,16 @@ public: }; +class sys_var_bool_ptr_readonly :public sys_var_bool_ptr +{ +public: + sys_var_bool_ptr_readonly(const char *name_arg, my_bool *value_arg) + :sys_var_bool_ptr(name_arg, value_arg) + {} + bool is_readonly() const { return 1; } +}; + + class sys_var_str :public sys_var { public: @@ -440,8 +450,8 @@ public: } void set_default(THD *thd, enum_var_type type); byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); - static byte *symbolic_mode_representation(THD *thd, ulonglong sql_mode, - ulong *length); + static bool symbolic_mode_representation(THD *thd, ulonglong sql_mode, + LEX_STRING *rep); }; diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt index 1d9c7c814fd..03990bbad0e 100644 --- a/sql/share/errmsg.txt +++ b/sql/share/errmsg.txt @@ -100,155 +100,155 @@ ER_CANT_CREATE_TABLE swe "Kan inte skapa tabellen '%-.200s' (Felkod: %d)" ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÔÁÂÌÉÃÀ '%-.200s' (ÐÏÍÉÌËÁ: %d)" ER_CANT_CREATE_DB - cze "Nemohu vytvo-Bøit databázi '%-.64s' (chybový kód: %d)" - dan "Kan ikke oprette databasen '%-.64s' (Fejlkode: %d)" - nla "Kan database '%-.64s' niet aanmaken (Errcode: %d)" - eng "Can't create database '%-.64s' (errno: %d)" - jps "'%-.64s' ƒf[ƒ^ƒx[ƒX‚ªì‚ê‚Ü‚¹‚ñ (errno: %d)", - est "Ei suuda luua andmebaasi '%-.64s' (veakood: %d)" - fre "Ne peut créer la base '%-.64s' (Erreur %d)" - ger "Kann Datenbank '%-.64s' nicht erzeugen (Fehler: %d)" - greek "Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.64s' (êùäéêüò ëÜèïõò: %d)" - hun "Az '%-.64s' adatbazis nem hozhato letre (hibakod: %d)" - ita "Impossibile creare il database '%-.64s' (errno: %d)" - jpn "'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ºî¤ì¤Þ¤»¤ó (errno: %d)" - kor "µ¥ÀÌŸº£À̽º '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù.. (¿¡·¯¹øÈ£: %d)" - nor "Kan ikke opprette databasen '%-.64s' (Feilkode: %d)" - norwegian-ny "Kan ikkje opprette databasen '%-.64s' (Feilkode: %d)" - pol "Nie mo¿na stworzyæ bazy danych '%-.64s' (Kod b³êdu: %d)" - por "Não pode criar o banco de dados '%-.64s' (erro no. %d)" - rum "Nu pot sa creez baza de date '%-.64s' (Eroare: %d)" - rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s' (ÏÛÉÂËÁ: %d)" - serbian "Ne mogu da kreiram bazu '%-.64s' (errno: %d)" - slo "Nemô¾em vytvori» databázu '%-.64s' (chybový kód: %d)" - spa "No puedo crear base de datos '%-.64s' (Error: %d)" - swe "Kan inte skapa databasen '%-.64s' (Felkod: %d)" - ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s' (ÐÏÍÉÌËÁ: %d)" + cze "Nemohu vytvo-Bøit databázi '%-.192s' (chybový kód: %d)" + dan "Kan ikke oprette databasen '%-.192s' (Fejlkode: %d)" + nla "Kan database '%-.192s' niet aanmaken (Errcode: %d)" + eng "Can't create database '%-.192s' (errno: %d)" + jps "'%-.192s' ƒf[ƒ^ƒx[ƒX‚ªì‚ê‚Ü‚¹‚ñ (errno: %d)", + est "Ei suuda luua andmebaasi '%-.192s' (veakood: %d)" + fre "Ne peut créer la base '%-.192s' (Erreur %d)" + ger "Kann Datenbank '%-.192s' nicht erzeugen (Fehler: %d)" + greek "Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.192s' (êùäéêüò ëÜèïõò: %d)" + hun "Az '%-.192s' adatbazis nem hozhato letre (hibakod: %d)" + ita "Impossibile creare il database '%-.192s' (errno: %d)" + jpn "'%-.192s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ºî¤ì¤Þ¤»¤ó (errno: %d)" + kor "µ¥ÀÌŸº£À̽º '%-.192s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù.. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke opprette databasen '%-.192s' (Feilkode: %d)" + norwegian-ny "Kan ikkje opprette databasen '%-.192s' (Feilkode: %d)" + pol "Nie mo¿na stworzyæ bazy danych '%-.192s' (Kod b³êdu: %d)" + por "Não pode criar o banco de dados '%-.192s' (erro no. %d)" + rum "Nu pot sa creez baza de date '%-.192s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.192s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da kreiram bazu '%-.192s' (errno: %d)" + slo "Nemô¾em vytvori» databázu '%-.192s' (chybový kód: %d)" + spa "No puedo crear base de datos '%-.192s' (Error: %d)" + swe "Kan inte skapa databasen '%-.192s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.192s' (ÐÏÍÉÌËÁ: %d)" ER_DB_CREATE_EXISTS - cze "Nemohu vytvo-Bøit databázi '%-.64s'; databáze ji¾ existuje" - dan "Kan ikke oprette databasen '%-.64s'; databasen eksisterer" - nla "Kan database '%-.64s' niet aanmaken; database bestaat reeds" - eng "Can't create database '%-.64s'; database exists" - jps "'%-.64s' ƒf[ƒ^ƒx[ƒX‚ªì‚ê‚Ü‚¹‚ñ.Šù‚É‚»‚̃f[ƒ^ƒx[ƒX‚ª‘¶Ý‚µ‚Ü‚·", - est "Ei suuda luua andmebaasi '%-.64s': andmebaas juba eksisteerib" - fre "Ne peut créer la base '%-.64s'; elle existe déjà" - ger "Kann Datenbank '%-.64s' nicht erzeugen. Datenbank existiert bereits" - greek "Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.64s'; Ç âÜóç äåäïìÝíùí õðÜñ÷åé Þäç" - hun "Az '%-.64s' adatbazis nem hozhato letre Az adatbazis mar letezik" - ita "Impossibile creare il database '%-.64s'; il database esiste" - jpn "'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ºî¤ì¤Þ¤»¤ó.´û¤Ë¤½¤Î¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬Â¸ºß¤·¤Þ¤¹" - kor "µ¥ÀÌŸº£À̽º '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù.. µ¥ÀÌŸº£À̽º°¡ Á¸ÀçÇÔ" - nor "Kan ikke opprette databasen '%-.64s'; databasen eksisterer" - norwegian-ny "Kan ikkje opprette databasen '%-.64s'; databasen eksisterer" - pol "Nie mo¿na stworzyæ bazy danych '%-.64s'; baza danych ju¿ istnieje" - por "Não pode criar o banco de dados '%-.64s'; este banco de dados já existe" - rum "Nu pot sa creez baza de date '%-.64s'; baza de date exista deja" - rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s'. âÁÚÁ ÄÁÎÎÙÈ ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ" - serbian "Ne mogu da kreiram bazu '%-.64s'; baza veæ postoji." - slo "Nemô¾em vytvori» databázu '%-.64s'; databáza existuje" - spa "No puedo crear base de datos '%-.64s'; la base de datos ya existe" - swe "Databasen '%-.64s' existerar redan" - ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s'. âÁÚÁ ÄÁÎÎÉÈ ¦ÓÎÕ¤" + cze "Nemohu vytvo-Bøit databázi '%-.192s'; databáze ji¾ existuje" + dan "Kan ikke oprette databasen '%-.192s'; databasen eksisterer" + nla "Kan database '%-.192s' niet aanmaken; database bestaat reeds" + eng "Can't create database '%-.192s'; database exists" + jps "'%-.192s' ƒf[ƒ^ƒx[ƒX‚ªì‚ê‚Ü‚¹‚ñ.Šù‚É‚»‚̃f[ƒ^ƒx[ƒX‚ª‘¶Ý‚µ‚Ü‚·", + est "Ei suuda luua andmebaasi '%-.192s': andmebaas juba eksisteerib" + fre "Ne peut créer la base '%-.192s'; elle existe déjà" + ger "Kann Datenbank '%-.192s' nicht erzeugen. Datenbank existiert bereits" + greek "Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.192s'; Ç âÜóç äåäïìÝíùí õðÜñ÷åé Þäç" + hun "Az '%-.192s' adatbazis nem hozhato letre Az adatbazis mar letezik" + ita "Impossibile creare il database '%-.192s'; il database esiste" + jpn "'%-.192s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ºî¤ì¤Þ¤»¤ó.´û¤Ë¤½¤Î¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬Â¸ºß¤·¤Þ¤¹" + kor "µ¥ÀÌŸº£À̽º '%-.192s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù.. µ¥ÀÌŸº£À̽º°¡ Á¸ÀçÇÔ" + nor "Kan ikke opprette databasen '%-.192s'; databasen eksisterer" + norwegian-ny "Kan ikkje opprette databasen '%-.192s'; databasen eksisterer" + pol "Nie mo¿na stworzyæ bazy danych '%-.192s'; baza danych ju¿ istnieje" + por "Não pode criar o banco de dados '%-.192s'; este banco de dados já existe" + rum "Nu pot sa creez baza de date '%-.192s'; baza de date exista deja" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.192s'. âÁÚÁ ÄÁÎÎÙÈ ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "Ne mogu da kreiram bazu '%-.192s'; baza veæ postoji." + slo "Nemô¾em vytvori» databázu '%-.192s'; databáza existuje" + spa "No puedo crear base de datos '%-.192s'; la base de datos ya existe" + swe "Databasen '%-.192s' existerar redan" + ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.192s'. âÁÚÁ ÄÁÎÎÉÈ ¦ÓÎÕ¤" ER_DB_DROP_EXISTS - cze "Nemohu zru-B¹it databázi '%-.64s', databáze neexistuje" - dan "Kan ikke slette (droppe) '%-.64s'; databasen eksisterer ikke" - nla "Kan database '%-.64s' niet verwijderen; database bestaat niet" - eng "Can't drop database '%-.64s'; database doesn't exist" - jps "'%-.64s' ƒf[ƒ^ƒx[ƒX‚ð”jŠü‚Å‚«‚Ü‚¹‚ñ. ‚»‚̃f[ƒ^ƒx[ƒX‚ª‚È‚¢‚Ì‚Å‚·.", - est "Ei suuda kustutada andmebaasi '%-.64s': andmebaasi ei eksisteeri" - fre "Ne peut effacer la base '%-.64s'; elle n'existe pas" - ger "Kann Datenbank '%-.64s' nicht löschen; Datenbank nicht vorhanden" - greek "Áäýíáôç ç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí '%-.64s'. Ç âÜóç äåäïìÝíùí äåí õðÜñ÷åé" - hun "A(z) '%-.64s' adatbazis nem szuntetheto meg. Az adatbazis nem letezik" - ita "Impossibile cancellare '%-.64s'; il database non esiste" - jpn "'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤òÇË´þ¤Ç¤¤Þ¤»¤ó. ¤½¤Î¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬¤Ê¤¤¤Î¤Ç¤¹." - kor "µ¥ÀÌŸº£À̽º '%-.64s'¸¦ Á¦°ÅÇÏÁö ¸øÇß½À´Ï´Ù. µ¥ÀÌŸº£À̽º°¡ Á¸ÀçÇÏÁö ¾ÊÀ½ " - nor "Kan ikke fjerne (drop) '%-.64s'; databasen eksisterer ikke" - norwegian-ny "Kan ikkje fjerne (drop) '%-.64s'; databasen eksisterer ikkje" - pol "Nie mo¿na usun?æ bazy danych '%-.64s'; baza danych nie istnieje" - por "Não pode eliminar o banco de dados '%-.64s'; este banco de dados não existe" - rum "Nu pot sa drop baza de date '%-.64s'; baza da date este inexistenta" - rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s'. ôÁËÏÊ ÂÁÚÙ ÄÁÎÎÙÈ ÎÅÔ" - serbian "Ne mogu da izbrišem bazu '%-.64s'; baza ne postoji." - slo "Nemô¾em zmaza» databázu '%-.64s'; databáza neexistuje" - spa "No puedo eliminar base de datos '%-.64s'; la base de datos no existe" - swe "Kan inte radera databasen '%-.64s'; databasen finns inte" - ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s'. âÁÚÁ ÄÁÎÎÉÈ ÎÅ ¦ÓÎÕ¤" + cze "Nemohu zru-B¹it databázi '%-.192s', databáze neexistuje" + dan "Kan ikke slette (droppe) '%-.192s'; databasen eksisterer ikke" + nla "Kan database '%-.192s' niet verwijderen; database bestaat niet" + eng "Can't drop database '%-.192s'; database doesn't exist" + jps "'%-.192s' ƒf[ƒ^ƒx[ƒX‚ð”jŠü‚Å‚«‚Ü‚¹‚ñ. ‚»‚̃f[ƒ^ƒx[ƒX‚ª‚È‚¢‚Ì‚Å‚·.", + est "Ei suuda kustutada andmebaasi '%-.192s': andmebaasi ei eksisteeri" + fre "Ne peut effacer la base '%-.192s'; elle n'existe pas" + ger "Kann Datenbank '%-.192s' nicht löschen; Datenbank nicht vorhanden" + greek "Áäýíáôç ç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí '%-.192s'. Ç âÜóç äåäïìÝíùí äåí õðÜñ÷åé" + hun "A(z) '%-.192s' adatbazis nem szuntetheto meg. Az adatbazis nem letezik" + ita "Impossibile cancellare '%-.192s'; il database non esiste" + jpn "'%-.192s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤òÇË´þ¤Ç¤¤Þ¤»¤ó. ¤½¤Î¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬¤Ê¤¤¤Î¤Ç¤¹." + kor "µ¥ÀÌŸº£À̽º '%-.192s'¸¦ Á¦°ÅÇÏÁö ¸øÇß½À´Ï´Ù. µ¥ÀÌŸº£À̽º°¡ Á¸ÀçÇÏÁö ¾ÊÀ½ " + nor "Kan ikke fjerne (drop) '%-.192s'; databasen eksisterer ikke" + norwegian-ny "Kan ikkje fjerne (drop) '%-.192s'; databasen eksisterer ikkje" + pol "Nie mo¿na usun?æ bazy danych '%-.192s'; baza danych nie istnieje" + por "Não pode eliminar o banco de dados '%-.192s'; este banco de dados não existe" + rum "Nu pot sa drop baza de date '%-.192s'; baza da date este inexistenta" + rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.192s'. ôÁËÏÊ ÂÁÚÙ ÄÁÎÎÙÈ ÎÅÔ" + serbian "Ne mogu da izbrišem bazu '%-.192s'; baza ne postoji." + slo "Nemô¾em zmaza» databázu '%-.192s'; databáza neexistuje" + spa "No puedo eliminar base de datos '%-.192s'; la base de datos no existe" + swe "Kan inte radera databasen '%-.192s'; databasen finns inte" + ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.192s'. âÁÚÁ ÄÁÎÎÉÈ ÎÅ ¦ÓÎÕ¤" ER_DB_DROP_DELETE - cze "Chyba p-Bøi ru¹ení databáze (nemohu vymazat '%-.64s', chyba %d)" - dan "Fejl ved sletning (drop) af databasen (kan ikke slette '%-.64s', Fejlkode %d)" - nla "Fout bij verwijderen database (kan '%-.64s' niet verwijderen, Errcode: %d)" - eng "Error dropping database (can't delete '%-.64s', errno: %d)" - jps "ƒf[ƒ^ƒx[ƒX”jŠüƒGƒ‰[ ('%-.64s' ‚ð휂ł«‚Ü‚¹‚ñ, errno: %d)", - est "Viga andmebaasi kustutamisel (ei suuda kustutada faili '%-.64s', veakood: %d)" - fre "Ne peut effacer la base '%-.64s' (erreur %d)" - ger "Fehler beim Löschen der Datenbank ('%-.64s' kann nicht gelöscht werden, Fehler: %d)" - greek "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí (áäýíáôç ç äéáãñáöÞ '%-.64s', êùäéêüò ëÜèïõò: %d)" - hun "Adatbazis megszuntetesi hiba ('%-.64s' nem torolheto, hibakod: %d)" - ita "Errore durante la cancellazione del database (impossibile cancellare '%-.64s', errno: %d)" - jpn "¥Ç¡¼¥¿¥Ù¡¼¥¹ÇË´þ¥¨¥é¡¼ ('%-.64s' ¤òºï½ü¤Ç¤¤Þ¤»¤ó, errno: %d)" - kor "µ¥ÀÌŸº£À̽º Á¦°Å ¿¡·¯('%-.64s'¸¦ »èÁ¦ÇÒ ¼ö ¾øÀ¾´Ï´Ù, ¿¡·¯¹øÈ£: %d)" - nor "Feil ved fjerning (drop) av databasen (kan ikke slette '%-.64s', feil %d)" - norwegian-ny "Feil ved fjerning (drop) av databasen (kan ikkje slette '%-.64s', feil %d)" - pol "B³?d podczas usuwania bazy danych (nie mo¿na usun?æ '%-.64s', b³?d %d)" - por "Erro ao eliminar banco de dados (não pode eliminar '%-.64s' - erro no. %d)" - rum "Eroare dropuind baza de date (nu pot sa sterg '%-.64s', Eroare: %d)" - rus "ïÛÉÂËÁ ÐÒÉ ÕÄÁÌÅÎÉÉ ÂÁÚÙ ÄÁÎÎÙÈ (ÎÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ '%-.64s', ÏÛÉÂËÁ: %d)" - serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem '%-.64s', errno: %d)" - slo "Chyba pri mazaní databázy (nemô¾em zmaza» '%-.64s', chybový kód: %d)" - spa "Error eliminando la base de datos(no puedo borrar '%-.64s', error %d)" - swe "Fel vid radering av databasen (Kan inte radera '%-.64s'. Felkod: %d)" - ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ (îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ '%-.64s', ÐÏÍÉÌËÁ: %d)" + cze "Chyba p-Bøi ru¹ení databáze (nemohu vymazat '%-.192s', chyba %d)" + dan "Fejl ved sletning (drop) af databasen (kan ikke slette '%-.192s', Fejlkode %d)" + nla "Fout bij verwijderen database (kan '%-.192s' niet verwijderen, Errcode: %d)" + eng "Error dropping database (can't delete '%-.192s', errno: %d)" + jps "ƒf[ƒ^ƒx[ƒX”jŠüƒGƒ‰[ ('%-.192s' ‚ð휂ł«‚Ü‚¹‚ñ, errno: %d)", + est "Viga andmebaasi kustutamisel (ei suuda kustutada faili '%-.192s', veakood: %d)" + fre "Ne peut effacer la base '%-.192s' (erreur %d)" + ger "Fehler beim Löschen der Datenbank ('%-.192s' kann nicht gelöscht werden, Fehler: %d)" + greek "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí (áäýíáôç ç äéáãñáöÞ '%-.192s', êùäéêüò ëÜèïõò: %d)" + hun "Adatbazis megszuntetesi hiba ('%-.192s' nem torolheto, hibakod: %d)" + ita "Errore durante la cancellazione del database (impossibile cancellare '%-.192s', errno: %d)" + jpn "¥Ç¡¼¥¿¥Ù¡¼¥¹ÇË´þ¥¨¥é¡¼ ('%-.192s' ¤òºï½ü¤Ç¤¤Þ¤»¤ó, errno: %d)" + kor "µ¥ÀÌŸº£À̽º Á¦°Å ¿¡·¯('%-.192s'¸¦ »èÁ¦ÇÒ ¼ö ¾øÀ¾´Ï´Ù, ¿¡·¯¹øÈ£: %d)" + nor "Feil ved fjerning (drop) av databasen (kan ikke slette '%-.192s', feil %d)" + norwegian-ny "Feil ved fjerning (drop) av databasen (kan ikkje slette '%-.192s', feil %d)" + pol "B³?d podczas usuwania bazy danych (nie mo¿na usun?æ '%-.192s', b³?d %d)" + por "Erro ao eliminar banco de dados (não pode eliminar '%-.192s' - erro no. %d)" + rum "Eroare dropuind baza de date (nu pot sa sterg '%-.192s', Eroare: %d)" + rus "ïÛÉÂËÁ ÐÒÉ ÕÄÁÌÅÎÉÉ ÂÁÚÙ ÄÁÎÎÙÈ (ÎÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ '%-.192s', ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem '%-.192s', errno: %d)" + slo "Chyba pri mazaní databázy (nemô¾em zmaza» '%-.192s', chybový kód: %d)" + spa "Error eliminando la base de datos(no puedo borrar '%-.192s', error %d)" + swe "Fel vid radering av databasen (Kan inte radera '%-.192s'. Felkod: %d)" + ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ (îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ '%-.192s', ÐÏÍÉÌËÁ: %d)" ER_DB_DROP_RMDIR - cze "Chyba p-Bøi ru¹ení databáze (nemohu vymazat adresáø '%-.64s', chyba %d)" - dan "Fejl ved sletting af database (kan ikke slette folderen '%-.64s', Fejlkode %d)" - nla "Fout bij verwijderen database (kan rmdir '%-.64s' niet uitvoeren, Errcode: %d)" - eng "Error dropping database (can't rmdir '%-.64s', errno: %d)" - jps "ƒf[ƒ^ƒx[ƒX”jŠüƒGƒ‰[ ('%-.64s' ‚ð rmdir ‚Å‚«‚Ü‚¹‚ñ, errno: %d)", - est "Viga andmebaasi kustutamisel (ei suuda kustutada kataloogi '%-.64s', veakood: %d)" - fre "Erreur en effaçant la base (rmdir '%-.64s', erreur %d)" - ger "Fehler beim Löschen der Datenbank (Verzeichnis '%-.64s' kann nicht gelöscht werden, Fehler: %d)" - greek "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí (áäýíáôç ç äéáãñáöÞ ôïõ öáêÝëëïõ '%-.64s', êùäéêüò ëÜèïõò: %d)" - hun "Adatbazis megszuntetesi hiba ('%-.64s' nem szuntetheto meg, hibakod: %d)" - ita "Errore durante la cancellazione del database (impossibile rmdir '%-.64s', errno: %d)" - jpn "¥Ç¡¼¥¿¥Ù¡¼¥¹ÇË´þ¥¨¥é¡¼ ('%-.64s' ¤ò rmdir ¤Ç¤¤Þ¤»¤ó, errno: %d)" - kor "µ¥ÀÌŸº£À̽º Á¦°Å ¿¡·¯(rmdir '%-.64s'¸¦ ÇÒ ¼ö ¾øÀ¾´Ï´Ù, ¿¡·¯¹øÈ£: %d)" - nor "Feil ved sletting av database (kan ikke slette katalogen '%-.64s', feil %d)" - norwegian-ny "Feil ved sletting av database (kan ikkje slette katalogen '%-.64s', feil %d)" - pol "B³?d podczas usuwania bazy danych (nie mo¿na wykonaæ rmdir '%-.64s', b³?d %d)" - por "Erro ao eliminar banco de dados (não pode remover diretório '%-.64s' - erro no. %d)" - rum "Eroare dropuind baza de date (nu pot sa rmdir '%-.64s', Eroare: %d)" - rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÂÁÚÕ ÄÁÎÎÙÈ (ÎÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ËÁÔÁÌÏÇ '%-.64s', ÏÛÉÂËÁ: %d)" - serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem direktorijum '%-.64s', errno: %d)" - slo "Chyba pri mazaní databázy (nemô¾em vymaza» adresár '%-.64s', chybový kód: %d)" - spa "Error eliminando la base de datos (No puedo borrar directorio '%-.64s', error %d)" - swe "Fel vid radering av databasen (Kan inte radera biblioteket '%-.64s'. Felkod: %d)" - ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ (îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÔÅËÕ '%-.64s', ÐÏÍÉÌËÁ: %d)" + cze "Chyba p-Bøi ru¹ení databáze (nemohu vymazat adresáø '%-.192s', chyba %d)" + dan "Fejl ved sletting af database (kan ikke slette folderen '%-.192s', Fejlkode %d)" + nla "Fout bij verwijderen database (kan rmdir '%-.192s' niet uitvoeren, Errcode: %d)" + eng "Error dropping database (can't rmdir '%-.192s', errno: %d)" + jps "ƒf[ƒ^ƒx[ƒX”jŠüƒGƒ‰[ ('%-.192s' ‚ð rmdir ‚Å‚«‚Ü‚¹‚ñ, errno: %d)", + est "Viga andmebaasi kustutamisel (ei suuda kustutada kataloogi '%-.192s', veakood: %d)" + fre "Erreur en effaçant la base (rmdir '%-.192s', erreur %d)" + ger "Fehler beim Löschen der Datenbank (Verzeichnis '%-.192s' kann nicht gelöscht werden, Fehler: %d)" + greek "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí (áäýíáôç ç äéáãñáöÞ ôïõ öáêÝëëïõ '%-.192s', êùäéêüò ëÜèïõò: %d)" + hun "Adatbazis megszuntetesi hiba ('%-.192s' nem szuntetheto meg, hibakod: %d)" + ita "Errore durante la cancellazione del database (impossibile rmdir '%-.192s', errno: %d)" + jpn "¥Ç¡¼¥¿¥Ù¡¼¥¹ÇË´þ¥¨¥é¡¼ ('%-.192s' ¤ò rmdir ¤Ç¤¤Þ¤»¤ó, errno: %d)" + kor "µ¥ÀÌŸº£À̽º Á¦°Å ¿¡·¯(rmdir '%-.192s'¸¦ ÇÒ ¼ö ¾øÀ¾´Ï´Ù, ¿¡·¯¹øÈ£: %d)" + nor "Feil ved sletting av database (kan ikke slette katalogen '%-.192s', feil %d)" + norwegian-ny "Feil ved sletting av database (kan ikkje slette katalogen '%-.192s', feil %d)" + pol "B³?d podczas usuwania bazy danych (nie mo¿na wykonaæ rmdir '%-.192s', b³?d %d)" + por "Erro ao eliminar banco de dados (não pode remover diretório '%-.192s' - erro no. %d)" + rum "Eroare dropuind baza de date (nu pot sa rmdir '%-.192s', Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÂÁÚÕ ÄÁÎÎÙÈ (ÎÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ËÁÔÁÌÏÇ '%-.192s', ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem direktorijum '%-.192s', errno: %d)" + slo "Chyba pri mazaní databázy (nemô¾em vymaza» adresár '%-.192s', chybový kód: %d)" + spa "Error eliminando la base de datos (No puedo borrar directorio '%-.192s', error %d)" + swe "Fel vid radering av databasen (Kan inte radera biblioteket '%-.192s'. Felkod: %d)" + ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ (îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÔÅËÕ '%-.192s', ÐÏÍÉÌËÁ: %d)" ER_CANT_DELETE_FILE - cze "Chyba p-Bøi výmazu '%-.64s' (chybový kód: %d)" - dan "Fejl ved sletning af '%-.64s' (Fejlkode: %d)" - nla "Fout bij het verwijderen van '%-.64s' (Errcode: %d)" - eng "Error on delete of '%-.64s' (errno: %d)" - jps "'%-.64s' ‚Ì휂ªƒGƒ‰[ (errno: %d)", - est "Viga '%-.64s' kustutamisel (veakood: %d)" - fre "Erreur en effaçant '%-.64s' (Errcode: %d)" - ger "Fehler beim Löschen von '%-.64s' (Fehler: %d)" - greek "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ '%-.64s' (êùäéêüò ëÜèïõò: %d)" - hun "Torlesi hiba: '%-.64s' (hibakod: %d)" - ita "Errore durante la cancellazione di '%-.64s' (errno: %d)" - jpn "'%-.64s' ¤Îºï½ü¤¬¥¨¥é¡¼ (errno: %d)" - kor "'%-.64s' »èÁ¦ Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)" - nor "Feil ved sletting av '%-.64s' (Feilkode: %d)" - norwegian-ny "Feil ved sletting av '%-.64s' (Feilkode: %d)" - pol "B³?d podczas usuwania '%-.64s' (Kod b³êdu: %d)" - por "Erro na remoção de '%-.64s' (erro no. %d)" - rum "Eroare incercind sa delete '%-.64s' (Eroare: %d)" - rus "ïÛÉÂËÁ ÐÒÉ ÕÄÁÌÅÎÉÉ '%-.64s' (ÏÛÉÂËÁ: %d)" - serbian "Greška pri brisanju '%-.64s' (errno: %d)" - slo "Chyba pri mazaní '%-.64s' (chybový kód: %d)" - spa "Error en el borrado de '%-.64s' (Error: %d)" - swe "Kan inte radera filen '%-.64s' (Felkod: %d)" - ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ '%-.64s' (ÐÏÍÉÌËÁ: %d)" + cze "Chyba p-Bøi výmazu '%-.192s' (chybový kód: %d)" + dan "Fejl ved sletning af '%-.192s' (Fejlkode: %d)" + nla "Fout bij het verwijderen van '%-.192s' (Errcode: %d)" + eng "Error on delete of '%-.192s' (errno: %d)" + jps "'%-.192s' ‚Ì휂ªƒGƒ‰[ (errno: %d)", + est "Viga '%-.192s' kustutamisel (veakood: %d)" + fre "Erreur en effaçant '%-.192s' (Errcode: %d)" + ger "Fehler beim Löschen von '%-.192s' (Fehler: %d)" + greek "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ '%-.192s' (êùäéêüò ëÜèïõò: %d)" + hun "Torlesi hiba: '%-.192s' (hibakod: %d)" + ita "Errore durante la cancellazione di '%-.192s' (errno: %d)" + jpn "'%-.192s' ¤Îºï½ü¤¬¥¨¥é¡¼ (errno: %d)" + kor "'%-.192s' »èÁ¦ Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)" + nor "Feil ved sletting av '%-.192s' (Feilkode: %d)" + norwegian-ny "Feil ved sletting av '%-.192s' (Feilkode: %d)" + pol "B³?d podczas usuwania '%-.192s' (Kod b³êdu: %d)" + por "Erro na remoção de '%-.192s' (erro no. %d)" + rum "Eroare incercind sa delete '%-.192s' (Eroare: %d)" + rus "ïÛÉÂËÁ ÐÒÉ ÕÄÁÌÅÎÉÉ '%-.192s' (ÏÛÉÂËÁ: %d)" + serbian "Greška pri brisanju '%-.192s' (errno: %d)" + slo "Chyba pri mazaní '%-.192s' (chybový kód: %d)" + spa "Error en el borrado de '%-.192s' (Error: %d)" + swe "Kan inte radera filen '%-.192s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ '%-.192s' (ÐÏÍÉÌËÁ: %d)" ER_CANT_FIND_SYSTEM_REC cze "Nemohu -Bèíst záznam v systémové tabulce" dan "Kan ikke læse posten i systemfolderen" @@ -400,78 +400,78 @@ ER_FILE_NOT_FOUND swe "Hittar inte filen '%-.200s' (Felkod: %d)" ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ ÆÁÊÌ: '%-.200s' (ÐÏÍÉÌËÁ: %d)" ER_CANT_READ_DIR - cze "Nemohu -Bèíst adresáø '%-.64s' (chybový kód: %d)" - dan "Kan ikke læse folder '%-.64s' (Fejlkode: %d)" - nla "Kan de directory niet lezen van '%-.64s' (Errcode: %d)" - eng "Can't read dir of '%-.64s' (errno: %d)" - jps "'%-.64s' ƒfƒBƒŒƒNƒgƒŠ‚ª“Ç‚ß‚Ü‚¹‚ñ.(errno: %d)", - est "Ei suuda lugeda kataloogi '%-.64s' (veakood: %d)" - fre "Ne peut lire le répertoire de '%-.64s' (Errcode: %d)" - ger "Verzeichnis von '%-.64s' nicht lesbar (Fehler: %d)" - greek "Äåí åßíáé äõíáôü íá äéáâáóôåß ï öÜêåëëïò ôïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)" - hun "A(z) '%-.64s' konyvtar nem olvashato. (hibakod: %d)" - ita "Impossibile leggere la directory di '%-.64s' (errno: %d)" - jpn "'%-.64s' ¥Ç¥£¥ì¥¯¥È¥ê¤¬Æɤá¤Þ¤»¤ó.(errno: %d)" - kor "'%-.64s'µð·ºÅ丮¸¦ ÀÐÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" - nor "Kan ikke lese katalogen '%-.64s' (Feilkode: %d)" - norwegian-ny "Kan ikkje lese katalogen '%-.64s' (Feilkode: %d)" - pol "Nie mo¿na odczytaæ katalogu '%-.64s' (Kod b³êdu: %d)" - por "Não pode ler o diretório de '%-.64s' (erro no. %d)" - rum "Nu pot sa citesc directorul '%-.64s' (Eroare: %d)" - rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÞÉÔÁÔØ ËÁÔÁÌÏÇ '%-.64s' (ÏÛÉÂËÁ: %d)" - serbian "Ne mogu da proèitam direktorijum '%-.64s' (errno: %d)" - slo "Nemô¾em èíta» adresár '%-.64s' (chybový kód: %d)" - spa "No puedo leer el directorio de '%-.64s' (Error: %d)" - swe "Kan inte läsa från bibliotek '%-.64s' (Felkod: %d)" - ukr "îÅ ÍÏÖÕ ÐÒÏÞÉÔÁÔÉ ÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)" + cze "Nemohu -Bèíst adresáø '%-.192s' (chybový kód: %d)" + dan "Kan ikke læse folder '%-.192s' (Fejlkode: %d)" + nla "Kan de directory niet lezen van '%-.192s' (Errcode: %d)" + eng "Can't read dir of '%-.192s' (errno: %d)" + jps "'%-.192s' ƒfƒBƒŒƒNƒgƒŠ‚ª“Ç‚ß‚Ü‚¹‚ñ.(errno: %d)", + est "Ei suuda lugeda kataloogi '%-.192s' (veakood: %d)" + fre "Ne peut lire le répertoire de '%-.192s' (Errcode: %d)" + ger "Verzeichnis von '%-.192s' nicht lesbar (Fehler: %d)" + greek "Äåí åßíáé äõíáôü íá äéáâáóôåß ï öÜêåëëïò ôïõ '%-.192s' (êùäéêüò ëÜèïõò: %d)" + hun "A(z) '%-.192s' konyvtar nem olvashato. (hibakod: %d)" + ita "Impossibile leggere la directory di '%-.192s' (errno: %d)" + jpn "'%-.192s' ¥Ç¥£¥ì¥¯¥È¥ê¤¬Æɤá¤Þ¤»¤ó.(errno: %d)" + kor "'%-.192s'µð·ºÅ丮¸¦ ÀÐÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke lese katalogen '%-.192s' (Feilkode: %d)" + norwegian-ny "Kan ikkje lese katalogen '%-.192s' (Feilkode: %d)" + pol "Nie mo¿na odczytaæ katalogu '%-.192s' (Kod b³êdu: %d)" + por "Não pode ler o diretório de '%-.192s' (erro no. %d)" + rum "Nu pot sa citesc directorul '%-.192s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÞÉÔÁÔØ ËÁÔÁÌÏÇ '%-.192s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da proèitam direktorijum '%-.192s' (errno: %d)" + slo "Nemô¾em èíta» adresár '%-.192s' (chybový kód: %d)" + spa "No puedo leer el directorio de '%-.192s' (Error: %d)" + swe "Kan inte läsa från bibliotek '%-.192s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÐÒÏÞÉÔÁÔÉ ÔÅËÕ '%-.192s' (ÐÏÍÉÌËÁ: %d)" ER_CANT_SET_WD - cze "Nemohu zm-Bìnit adresáø na '%-.64s' (chybový kód: %d)" - dan "Kan ikke skifte folder til '%-.64s' (Fejlkode: %d)" - nla "Kan de directory niet veranderen naar '%-.64s' (Errcode: %d)" - eng "Can't change dir to '%-.64s' (errno: %d)" - jps "'%-.64s' ƒfƒBƒŒƒNƒgƒŠ‚É chdir ‚Å‚«‚Ü‚¹‚ñ.(errno: %d)", - est "Ei suuda siseneda kataloogi '%-.64s' (veakood: %d)" - fre "Ne peut changer le répertoire pour '%-.64s' (Errcode: %d)" - ger "Kann nicht in das Verzeichnis '%-.64s' wechseln (Fehler: %d)" - greek "Áäýíáôç ç áëëáãÞ ôïõ ôñÝ÷ïíôïò êáôáëüãïõ óå '%-.64s' (êùäéêüò ëÜèïõò: %d)" - hun "Konyvtarvaltas nem lehetseges a(z) '%-.64s'-ba. (hibakod: %d)" - ita "Impossibile cambiare la directory in '%-.64s' (errno: %d)" - jpn "'%-.64s' ¥Ç¥£¥ì¥¯¥È¥ê¤Ë chdir ¤Ç¤¤Þ¤»¤ó.(errno: %d)" - kor "'%-.64s'µð·ºÅ丮·Î À̵¿ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" - nor "Kan ikke skifte katalog til '%-.64s' (Feilkode: %d)" - norwegian-ny "Kan ikkje skifte katalog til '%-.64s' (Feilkode: %d)" - pol "Nie mo¿na zmieniæ katalogu na '%-.64s' (Kod b³êdu: %d)" - por "Não pode mudar para o diretório '%-.64s' (erro no. %d)" - rum "Nu pot sa schimb directorul '%-.64s' (Eroare: %d)" - rus "îÅ×ÏÚÍÏÖÎÏ ÐÅÒÅÊÔÉ × ËÁÔÁÌÏÇ '%-.64s' (ÏÛÉÂËÁ: %d)" - serbian "Ne mogu da promenim direktorijum na '%-.64s' (errno: %d)" - slo "Nemô¾em vojs» do adresára '%-.64s' (chybový kód: %d)" - spa "No puedo cambiar al directorio de '%-.64s' (Error: %d)" - swe "Kan inte byta till '%-.64s' (Felkod: %d)" - ukr "îÅ ÍÏÖÕ ÐÅÒÅÊÔÉ Õ ÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)" + cze "Nemohu zm-Bìnit adresáø na '%-.192s' (chybový kód: %d)" + dan "Kan ikke skifte folder til '%-.192s' (Fejlkode: %d)" + nla "Kan de directory niet veranderen naar '%-.192s' (Errcode: %d)" + eng "Can't change dir to '%-.192s' (errno: %d)" + jps "'%-.192s' ƒfƒBƒŒƒNƒgƒŠ‚É chdir ‚Å‚«‚Ü‚¹‚ñ.(errno: %d)", + est "Ei suuda siseneda kataloogi '%-.192s' (veakood: %d)" + fre "Ne peut changer le répertoire pour '%-.192s' (Errcode: %d)" + ger "Kann nicht in das Verzeichnis '%-.192s' wechseln (Fehler: %d)" + greek "Áäýíáôç ç áëëáãÞ ôïõ ôñÝ÷ïíôïò êáôáëüãïõ óå '%-.192s' (êùäéêüò ëÜèïõò: %d)" + hun "Konyvtarvaltas nem lehetseges a(z) '%-.192s'-ba. (hibakod: %d)" + ita "Impossibile cambiare la directory in '%-.192s' (errno: %d)" + jpn "'%-.192s' ¥Ç¥£¥ì¥¯¥È¥ê¤Ë chdir ¤Ç¤¤Þ¤»¤ó.(errno: %d)" + kor "'%-.192s'µð·ºÅ丮·Î À̵¿ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke skifte katalog til '%-.192s' (Feilkode: %d)" + norwegian-ny "Kan ikkje skifte katalog til '%-.192s' (Feilkode: %d)" + pol "Nie mo¿na zmieniæ katalogu na '%-.192s' (Kod b³êdu: %d)" + por "Não pode mudar para o diretório '%-.192s' (erro no. %d)" + rum "Nu pot sa schimb directorul '%-.192s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÅÒÅÊÔÉ × ËÁÔÁÌÏÇ '%-.192s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da promenim direktorijum na '%-.192s' (errno: %d)" + slo "Nemô¾em vojs» do adresára '%-.192s' (chybový kód: %d)" + spa "No puedo cambiar al directorio de '%-.192s' (Error: %d)" + swe "Kan inte byta till '%-.192s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÐÅÒÅÊÔÉ Õ ÔÅËÕ '%-.192s' (ÐÏÍÉÌËÁ: %d)" ER_CHECKREAD - cze "Z-Báznam byl zmìnìn od posledního ètení v tabulce '%-.64s'" - dan "Posten er ændret siden sidste læsning '%-.64s'" - nla "Record is veranderd sinds de laatste lees activiteit in de tabel '%-.64s'" - eng "Record has changed since last read in table '%-.64s'" - est "Kirje tabelis '%-.64s' on muutunud viimasest lugemisest saadik" - fre "Enregistrement modifié depuis sa dernière lecture dans la table '%-.64s'" - ger "Datensatz hat sich seit dem letzten Zugriff auf Tabelle '%-.64s' geändert" - greek "Ç åããñáöÞ Ý÷åé áëëÜîåé áðü ôçí ôåëåõôáßá öïñÜ ðïõ áíáóýñèçêå áðü ôïí ðßíáêá '%-.64s'" - hun "A(z) '%-.64s' tablaban talalhato rekord megvaltozott az utolso olvasas ota" - ita "Il record e` cambiato dall'ultima lettura della tabella '%-.64s'" - kor "Å×À̺í '%-.64s'¿¡¼ ¸¶Áö¸·À¸·Î ÀÐÀº ÈÄ Record°¡ º¯°æµÇ¾ú½À´Ï´Ù." - nor "Posten har blitt endret siden den ble lest '%-.64s'" - norwegian-ny "Posten har vorte endra sidan den sist vart lesen '%-.64s'" - pol "Rekord zosta³ zmieniony od ostaniego odczytania z tabeli '%-.64s'" - por "Registro alterado desde a última leitura da tabela '%-.64s'" - rum "Cimpul a fost schimbat de la ultima citire a tabelei '%-.64s'" - rus "úÁÐÉÓØ ÉÚÍÅÎÉÌÁÓØ Ó ÍÏÍÅÎÔÁ ÐÏÓÌÅÄÎÅÊ ×ÙÂÏÒËÉ × ÔÁÂÌÉÃÅ '%-.64s'" - serbian "Slog je promenjen od zadnjeg èitanja tabele '%-.64s'" - slo "Záznam bol zmenený od posledného èítania v tabuµke '%-.64s'" - spa "El registro ha cambiado desde la ultima lectura de la tabla '%-.64s'" - swe "Posten har förändrats sedan den lästes i register '%-.64s'" - ukr "úÁÐÉÓ ÂÕÌÏ ÚͦÎÅÎÏ Ú ÞÁÓÕ ÏÓÔÁÎÎØÏÇÏ ÞÉÔÁÎÎÑ Ú ÔÁÂÌÉæ '%-.64s'" + cze "Z-Báznam byl zmìnìn od posledního ètení v tabulce '%-.192s'" + dan "Posten er ændret siden sidste læsning '%-.192s'" + nla "Record is veranderd sinds de laatste lees activiteit in de tabel '%-.192s'" + eng "Record has changed since last read in table '%-.192s'" + est "Kirje tabelis '%-.192s' on muutunud viimasest lugemisest saadik" + fre "Enregistrement modifié depuis sa dernière lecture dans la table '%-.192s'" + ger "Datensatz hat sich seit dem letzten Zugriff auf Tabelle '%-.192s' geändert" + greek "Ç åããñáöÞ Ý÷åé áëëÜîåé áðü ôçí ôåëåõôáßá öïñÜ ðïõ áíáóýñèçêå áðü ôïí ðßíáêá '%-.192s'" + hun "A(z) '%-.192s' tablaban talalhato rekord megvaltozott az utolso olvasas ota" + ita "Il record e` cambiato dall'ultima lettura della tabella '%-.192s'" + kor "Å×À̺í '%-.192s'¿¡¼ ¸¶Áö¸·À¸·Î ÀÐÀº ÈÄ Record°¡ º¯°æµÇ¾ú½À´Ï´Ù." + nor "Posten har blitt endret siden den ble lest '%-.192s'" + norwegian-ny "Posten har vorte endra sidan den sist vart lesen '%-.192s'" + pol "Rekord zosta³ zmieniony od ostaniego odczytania z tabeli '%-.192s'" + por "Registro alterado desde a última leitura da tabela '%-.192s'" + rum "Cimpul a fost schimbat de la ultima citire a tabelei '%-.192s'" + rus "úÁÐÉÓØ ÉÚÍÅÎÉÌÁÓØ Ó ÍÏÍÅÎÔÁ ÐÏÓÌÅÄÎÅÊ ×ÙÂÏÒËÉ × ÔÁÂÌÉÃÅ '%-.192s'" + serbian "Slog je promenjen od zadnjeg èitanja tabele '%-.192s'" + slo "Záznam bol zmenený od posledného èítania v tabuµke '%-.192s'" + spa "El registro ha cambiado desde la ultima lectura de la tabla '%-.192s'" + swe "Posten har förändrats sedan den lästes i register '%-.192s'" + ukr "úÁÐÉÓ ÂÕÌÏ ÚͦÎÅÎÏ Ú ÞÁÓÕ ÏÓÔÁÎÎØÏÇÏ ÞÉÔÁÎÎÑ Ú ÔÁÂÌÉæ '%-.192s'" ER_DISK_FULL cze "Disk je pln-Bý (%s), èekám na uvolnìní nìjakého místa ..." dan "Ikke mere diskplads (%s). Venter på at få frigjort plads..." @@ -498,53 +498,53 @@ ER_DISK_FULL swe "Disken är full (%s). Väntar tills det finns ledigt utrymme..." ukr "äÉÓË ÚÁÐÏ×ÎÅÎÉÊ (%s). ÷ÉÞÉËÕÀ, ÄÏËÉ ÚצÌØÎÉÔØÓÑ ÔÒÏÈÉ Í¦ÓÃÑ..." ER_DUP_KEY 23000 - cze "Nemohu zapsat, zdvojen-Bý klíè v tabulce '%-.64s'" - dan "Kan ikke skrive, flere ens nøgler i tabellen '%-.64s'" - nla "Kan niet schrijven, dubbele zoeksleutel in tabel '%-.64s'" - eng "Can't write; duplicate key in table '%-.64s'" - jps "table '%-.64s' ‚É key ‚ªd•¡‚µ‚Ä‚¢‚Ä‘‚«‚±‚ß‚Ü‚¹‚ñ", - est "Ei saa kirjutada, korduv võti tabelis '%-.64s'" - fre "Ecriture impossible, doublon dans une clé de la table '%-.64s'" - ger "Kann nicht speichern, Grund: doppelter Schlüssel in Tabelle '%-.64s'" - greek "Äåí åßíáé äõíáôÞ ç êáôá÷þñçóç, ç ôéìÞ õðÜñ÷åé Þäç óôïí ðßíáêá '%-.64s'" - hun "Irasi hiba, duplikalt kulcs a '%-.64s' tablaban." - ita "Scrittura impossibile: chiave duplicata nella tabella '%-.64s'" - jpn "table '%-.64s' ¤Ë key ¤¬½ÅÊ£¤·¤Æ¤¤¤Æ½ñ¤¤³¤á¤Þ¤»¤ó" - kor "±â·ÏÇÒ ¼ö ¾øÀ¾´Ï´Ù., Å×À̺í '%-.64s'¿¡¼ Áߺ¹ Å°" - nor "Kan ikke skrive, flere like nøkler i tabellen '%-.64s'" - norwegian-ny "Kan ikkje skrive, flere like nyklar i tabellen '%-.64s'" - pol "Nie mo¿na zapisaæ, powtórzone klucze w tabeli '%-.64s'" - por "Não pode gravar. Chave duplicada na tabela '%-.64s'" - rum "Nu pot sa scriu (can't write), cheie duplicata in tabela '%-.64s'" - rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÉÚ×ÅÓÔÉ ÚÁÐÉÓØ, ÄÕÂÌÉÒÕÀÝÉÊÓÑ ËÌÀÞ × ÔÁÂÌÉÃÅ '%-.64s'" - serbian "Ne mogu da pišem pošto postoji duplirani kljuè u tabeli '%-.64s'" - slo "Nemô¾em zapísa», duplikát kµúèa v tabuµke '%-.64s'" - spa "No puedo escribir, clave duplicada en la tabla '%-.64s'" - swe "Kan inte skriva, dubbel söknyckel i register '%-.64s'" - ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ, ÄÕÂÌÀÀÞÉÊÓÑ ËÌÀÞ × ÔÁÂÌÉæ '%-.64s'" + cze "Nemohu zapsat, zdvojen-Bý klíè v tabulce '%-.192s'" + dan "Kan ikke skrive, flere ens nøgler i tabellen '%-.192s'" + nla "Kan niet schrijven, dubbele zoeksleutel in tabel '%-.192s'" + eng "Can't write; duplicate key in table '%-.192s'" + jps "table '%-.192s' ‚É key ‚ªd•¡‚µ‚Ä‚¢‚Ä‘‚«‚±‚ß‚Ü‚¹‚ñ", + est "Ei saa kirjutada, korduv võti tabelis '%-.192s'" + fre "Ecriture impossible, doublon dans une clé de la table '%-.192s'" + ger "Kann nicht speichern, Grund: doppelter Schlüssel in Tabelle '%-.192s'" + greek "Äåí åßíáé äõíáôÞ ç êáôá÷þñçóç, ç ôéìÞ õðÜñ÷åé Þäç óôïí ðßíáêá '%-.192s'" + hun "Irasi hiba, duplikalt kulcs a '%-.192s' tablaban." + ita "Scrittura impossibile: chiave duplicata nella tabella '%-.192s'" + jpn "table '%-.192s' ¤Ë key ¤¬½ÅÊ£¤·¤Æ¤¤¤Æ½ñ¤¤³¤á¤Þ¤»¤ó" + kor "±â·ÏÇÒ ¼ö ¾øÀ¾´Ï´Ù., Å×À̺í '%-.192s'¿¡¼ Áߺ¹ Å°" + nor "Kan ikke skrive, flere like nøkler i tabellen '%-.192s'" + norwegian-ny "Kan ikkje skrive, flere like nyklar i tabellen '%-.192s'" + pol "Nie mo¿na zapisaæ, powtórzone klucze w tabeli '%-.192s'" + por "Não pode gravar. Chave duplicada na tabela '%-.192s'" + rum "Nu pot sa scriu (can't write), cheie duplicata in tabela '%-.192s'" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÉÚ×ÅÓÔÉ ÚÁÐÉÓØ, ÄÕÂÌÉÒÕÀÝÉÊÓÑ ËÌÀÞ × ÔÁÂÌÉÃÅ '%-.192s'" + serbian "Ne mogu da pišem pošto postoji duplirani kljuè u tabeli '%-.192s'" + slo "Nemô¾em zapísa», duplikát kµúèa v tabuµke '%-.192s'" + spa "No puedo escribir, clave duplicada en la tabla '%-.192s'" + swe "Kan inte skriva, dubbel söknyckel i register '%-.192s'" + ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ, ÄÕÂÌÀÀÞÉÊÓÑ ËÌÀÞ × ÔÁÂÌÉæ '%-.192s'" ER_ERROR_ON_CLOSE - cze "Chyba p-Bøi zavírání '%-.64s' (chybový kód: %d)" - dan "Fejl ved lukning af '%-.64s' (Fejlkode: %d)" - nla "Fout bij het sluiten van '%-.64s' (Errcode: %d)" - eng "Error on close of '%-.64s' (errno: %d)" - est "Viga faili '%-.64s' sulgemisel (veakood: %d)" - fre "Erreur a la fermeture de '%-.64s' (Errcode: %d)" - ger "Fehler beim Schließen von '%-.64s' (Fehler: %d)" - greek "ÐáñïõóéÜóôçêå ðñüâëçìá êëåßíïíôáò ôï '%-.64s' (êùäéêüò ëÜèïõò: %d)" - hun "Hiba a(z) '%-.64s' zarasakor. (hibakod: %d)" - ita "Errore durante la chiusura di '%-.64s' (errno: %d)" - kor "'%-.64s'´Ý´Â Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)" - nor "Feil ved lukking av '%-.64s' (Feilkode: %d)" - norwegian-ny "Feil ved lukking av '%-.64s' (Feilkode: %d)" - pol "B³?d podczas zamykania '%-.64s' (Kod b³êdu: %d)" - por "Erro ao fechar '%-.64s' (erro no. %d)" - rum "Eroare inchizind '%-.64s' (errno: %d)" - rus "ïÛÉÂËÁ ÐÒÉ ÚÁËÒÙÔÉÉ '%-.64s' (ÏÛÉÂËÁ: %d)" - serbian "Greška pri zatvaranju '%-.64s' (errno: %d)" - slo "Chyba pri zatváraní '%-.64s' (chybový kód: %d)" - spa "Error en el cierre de '%-.64s' (Error: %d)" - swe "Fick fel vid stängning av '%-.64s' (Felkod: %d)" - ukr "îÅ ÍÏÖÕ ÚÁËÒÉÔÉ '%-.64s' (ÐÏÍÉÌËÁ: %d)" + cze "Chyba p-Bøi zavírání '%-.192s' (chybový kód: %d)" + dan "Fejl ved lukning af '%-.192s' (Fejlkode: %d)" + nla "Fout bij het sluiten van '%-.192s' (Errcode: %d)" + eng "Error on close of '%-.192s' (errno: %d)" + est "Viga faili '%-.192s' sulgemisel (veakood: %d)" + fre "Erreur a la fermeture de '%-.192s' (Errcode: %d)" + ger "Fehler beim Schließen von '%-.192s' (Fehler: %d)" + greek "ÐáñïõóéÜóôçêå ðñüâëçìá êëåßíïíôáò ôï '%-.192s' (êùäéêüò ëÜèïõò: %d)" + hun "Hiba a(z) '%-.192s' zarasakor. (hibakod: %d)" + ita "Errore durante la chiusura di '%-.192s' (errno: %d)" + kor "'%-.192s'´Ý´Â Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)" + nor "Feil ved lukking av '%-.192s' (Feilkode: %d)" + norwegian-ny "Feil ved lukking av '%-.192s' (Feilkode: %d)" + pol "B³?d podczas zamykania '%-.192s' (Kod b³êdu: %d)" + por "Erro ao fechar '%-.192s' (erro no. %d)" + rum "Eroare inchizind '%-.192s' (errno: %d)" + rus "ïÛÉÂËÁ ÐÒÉ ÚÁËÒÙÔÉÉ '%-.192s' (ÏÛÉÂËÁ: %d)" + serbian "Greška pri zatvaranju '%-.192s' (errno: %d)" + slo "Chyba pri zatváraní '%-.192s' (chybový kód: %d)" + spa "Error en el cierre de '%-.192s' (Error: %d)" + swe "Fick fel vid stängning av '%-.192s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÚÁËÒÉÔÉ '%-.192s' (ÐÏÍÉÌËÁ: %d)" ER_ERROR_ON_READ cze "Chyba p-Bøi ètení souboru '%-.200s' (chybový kód: %d)" dan "Fejl ved læsning af '%-.200s' (Fejlkode: %d)" @@ -621,30 +621,30 @@ ER_ERROR_ON_WRITE swe "Fick fel vid skrivning till '%-.200s' (Felkod %d)" ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ ÆÁÊÌ '%-.200s' (ÐÏÍÉÌËÁ: %d)" ER_FILE_USED - cze "'%-.64s' je zam-Bèen proti zmìnám" - dan "'%-.64s' er låst mod opdateringer" - nla "'%-.64s' is geblokeerd tegen veranderingen" - eng "'%-.64s' is locked against change" - jps "'%-.64s' ‚̓ƒbƒN‚³‚ê‚Ä‚¢‚Ü‚·", - est "'%-.64s' on lukustatud muudatuste vastu" - fre "'%-.64s' est verrouillé contre les modifications" - ger "'%-.64s' ist für Änderungen gesperrt" - greek "'%-.64s' äåí åðéôñÝðïíôáé áëëáãÝò" - hun "'%-.64s' a valtoztatas ellen zarolva" - ita "'%-.64s' e` soggetto a lock contro i cambiamenti" - jpn "'%-.64s' ¤Ï¥í¥Ã¥¯¤µ¤ì¤Æ¤¤¤Þ¤¹" - kor "'%-.64s'°¡ º¯°æÇÒ ¼ö ¾øµµ·Ï Àá°ÜÀÖÀ¾´Ï´Ù." - nor "'%-.64s' er låst mot oppdateringer" - norwegian-ny "'%-.64s' er låst mot oppdateringar" - pol "'%-.64s' jest zablokowany na wypadek zmian" - por "'%-.64s' está com travamento contra alterações" - rum "'%-.64s' este blocat pentry schimbari (loccked against change)" - rus "'%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎ ÄÌÑ ÉÚÍÅÎÅÎÉÊ" - serbian "'%-.64s' je zakljuèan za upis" - slo "'%-.64s' je zamknutý proti zmenám" - spa "'%-.64s' esta bloqueado contra cambios" - swe "'%-.64s' är låst mot användning" - ukr "'%-.64s' ÚÁÂÌÏËÏ×ÁÎÉÊ ÎÁ ×ÎÅÓÅÎÎÑ ÚͦÎ" + cze "'%-.192s' je zam-Bèen proti zmìnám" + dan "'%-.192s' er låst mod opdateringer" + nla "'%-.192s' is geblokeerd tegen veranderingen" + eng "'%-.192s' is locked against change" + jps "'%-.192s' ‚̓ƒbƒN‚³‚ê‚Ä‚¢‚Ü‚·", + est "'%-.192s' on lukustatud muudatuste vastu" + fre "'%-.192s' est verrouillé contre les modifications" + ger "'%-.192s' ist für Änderungen gesperrt" + greek "'%-.192s' äåí åðéôñÝðïíôáé áëëáãÝò" + hun "'%-.192s' a valtoztatas ellen zarolva" + ita "'%-.192s' e` soggetto a lock contro i cambiamenti" + jpn "'%-.192s' ¤Ï¥í¥Ã¥¯¤µ¤ì¤Æ¤¤¤Þ¤¹" + kor "'%-.192s'°¡ º¯°æÇÒ ¼ö ¾øµµ·Ï Àá°ÜÀÖÀ¾´Ï´Ù." + nor "'%-.192s' er låst mot oppdateringer" + norwegian-ny "'%-.192s' er låst mot oppdateringar" + pol "'%-.192s' jest zablokowany na wypadek zmian" + por "'%-.192s' está com travamento contra alterações" + rum "'%-.192s' este blocat pentry schimbari (loccked against change)" + rus "'%-.192s' ÚÁÂÌÏËÉÒÏ×ÁÎ ÄÌÑ ÉÚÍÅÎÅÎÉÊ" + serbian "'%-.192s' je zakljuèan za upis" + slo "'%-.192s' je zamknutý proti zmenám" + spa "'%-.192s' esta bloqueado contra cambios" + swe "'%-.192s' är låst mot användning" + ukr "'%-.192s' ÚÁÂÌÏËÏ×ÁÎÉÊ ÎÁ ×ÎÅÓÅÎÎÑ ÚͦÎ" ER_FILSORT_ABORT cze "T-Bøídìní pøeru¹eno" dan "Sortering afbrudt" @@ -671,30 +671,30 @@ ER_FILSORT_ABORT swe "Sorteringen avbruten" ukr "óÏÒÔÕ×ÁÎÎÑ ÐÅÒÅÒ×ÁÎÏ" ER_FORM_NOT_FOUND - cze "Pohled '%-.64s' pro '%-.64s' neexistuje" - dan "View '%-.64s' eksisterer ikke for '%-.64s'" - nla "View '%-.64s' bestaat niet voor '%-.64s'" - eng "View '%-.64s' doesn't exist for '%-.64s'" - jps "View '%-.64s' ‚ª '%-.64s' ‚É’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", - est "Vaade '%-.64s' ei eksisteeri '%-.64s' jaoks" - fre "La vue (View) '%-.64s' n'existe pas pour '%-.64s'" - ger "View '%-.64s' existiert für '%-.64s' nicht" - greek "Ôï View '%-.64s' äåí õðÜñ÷åé ãéá '%-.64s'" - hun "A(z) '%-.64s' nezet nem letezik a(z) '%-.64s'-hoz" - ita "La view '%-.64s' non esiste per '%-.64s'" - jpn "View '%-.64s' ¤¬ '%-.64s' ¤ËÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" - kor "ºä '%-.64s'°¡ '%-.64s'¿¡¼´Â Á¸ÀçÇÏÁö ¾ÊÀ¾´Ï´Ù." - nor "View '%-.64s' eksisterer ikke for '%-.64s'" - norwegian-ny "View '%-.64s' eksisterar ikkje for '%-.64s'" - pol "Widok '%-.64s' nie istnieje dla '%-.64s'" - por "Visão '%-.64s' não existe para '%-.64s'" - rum "View '%-.64s' nu exista pentru '%-.64s'" - rus "ðÒÅÄÓÔÁ×ÌÅÎÉÅ '%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ ÄÌÑ '%-.64s'" - serbian "View '%-.64s' ne postoji za '%-.64s'" - slo "Pohµad '%-.64s' neexistuje pre '%-.64s'" - spa "La vista '%-.64s' no existe para '%-.64s'" - swe "Formulär '%-.64s' finns inte i '%-.64s'" - ukr "÷ÉÇÌÑÄ '%-.64s' ÎÅ ¦ÓÎÕ¤ ÄÌÑ '%-.64s'" + cze "Pohled '%-.192s' pro '%-.192s' neexistuje" + dan "View '%-.192s' eksisterer ikke for '%-.192s'" + nla "View '%-.192s' bestaat niet voor '%-.192s'" + eng "View '%-.192s' doesn't exist for '%-.192s'" + jps "View '%-.192s' ‚ª '%-.192s' ‚É’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", + est "Vaade '%-.192s' ei eksisteeri '%-.192s' jaoks" + fre "La vue (View) '%-.192s' n'existe pas pour '%-.192s'" + ger "View '%-.192s' existiert für '%-.192s' nicht" + greek "Ôï View '%-.192s' äåí õðÜñ÷åé ãéá '%-.192s'" + hun "A(z) '%-.192s' nezet nem letezik a(z) '%-.192s'-hoz" + ita "La view '%-.192s' non esiste per '%-.192s'" + jpn "View '%-.192s' ¤¬ '%-.192s' ¤ËÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "ºä '%-.192s'°¡ '%-.192s'¿¡¼´Â Á¸ÀçÇÏÁö ¾ÊÀ¾´Ï´Ù." + nor "View '%-.192s' eksisterer ikke for '%-.192s'" + norwegian-ny "View '%-.192s' eksisterar ikkje for '%-.192s'" + pol "Widok '%-.192s' nie istnieje dla '%-.192s'" + por "Visão '%-.192s' não existe para '%-.192s'" + rum "View '%-.192s' nu exista pentru '%-.192s'" + rus "ðÒÅÄÓÔÁ×ÌÅÎÉÅ '%-.192s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ ÄÌÑ '%-.192s'" + serbian "View '%-.192s' ne postoji za '%-.192s'" + slo "Pohµad '%-.192s' neexistuje pre '%-.192s'" + spa "La vista '%-.192s' no existe para '%-.192s'" + swe "Formulär '%-.192s' finns inte i '%-.192s'" + ukr "÷ÉÇÌÑÄ '%-.192s' ÎÅ ¦ÓÎÕ¤ ÄÌÑ '%-.192s'" ER_GET_ERRNO cze "Obsluha tabulky vr-Bátila chybu %d" dan "Modtog fejl %d fra tabel håndteringen" @@ -720,54 +720,54 @@ ER_GET_ERRNO swe "Fick felkod %d från databashanteraren" ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d ×¦Ä ÄÅÓËÒÉÐÔÏÒÁ ÔÁÂÌÉæ" ER_ILLEGAL_HA - cze "Obsluha tabulky '%-.64s' nem-Bá tento parametr" - dan "Denne mulighed eksisterer ikke for tabeltypen '%-.64s'" - nla "Tabel handler voor '%-.64s' heeft deze optie niet" - eng "Table storage engine for '%-.64s' doesn't have this option" - est "Tabeli '%-.64s' handler ei toeta antud operatsiooni" - fre "Le handler de la table '%-.64s' n'a pas cette option" - ger "Diese Option gibt es nicht (Speicher-Engine für '%-.64s')" - greek "Ï ÷åéñéóôÞò ðßíáêá (table handler) ãéá '%-.64s' äåí äéáèÝôåé áõôÞ ôçí åðéëïãÞ" - hun "A(z) '%-.64s' tablakezelonek nincs ilyen opcioja" - ita "Il gestore delle tabelle per '%-.64s' non ha questa opzione" - jpn "Table handler for '%-.64s' doesn't have this option" - kor "'%-.64s'ÀÇ Å×À̺í handler´Â ÀÌ·¯ÇÑ ¿É¼ÇÀ» Á¦°øÇÏÁö ¾ÊÀ¾´Ï´Ù." - nor "Tabell håndtereren for '%-.64s' har ikke denne muligheten" - norwegian-ny "Tabell håndteraren for '%-.64s' har ikkje denne moglegheita" - pol "Obs³uga tabeli '%-.64s' nie posiada tej opcji" - por "Manipulador de tabela para '%-.64s' não tem esta opção" - rum "Handlerul tabelei pentru '%-.64s' nu are aceasta optiune" - rus "ïÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ '%-.64s' ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÜÔÕ ×ÏÚÍÏÖÎÏÓÔØ" - serbian "Handler tabela za '%-.64s' nema ovu opciju" - slo "Obsluha tabuµky '%-.64s' nemá tento parameter" - spa "El manejador de la tabla de '%-.64s' no tiene esta opcion" - swe "Tabellhanteraren for tabell '%-.64s' stödjer ej detta" - ukr "äÅÓËÒÉÐÔÏÒ ÔÁÂÌÉæ '%-.64s' ÎÅ ÍÁ¤ 椧 ×ÌÁÓÔÉ×ÏÓÔ¦" + cze "Obsluha tabulky '%-.192s' nem-Bá tento parametr" + dan "Denne mulighed eksisterer ikke for tabeltypen '%-.192s'" + nla "Tabel handler voor '%-.192s' heeft deze optie niet" + eng "Table storage engine for '%-.192s' doesn't have this option" + est "Tabeli '%-.192s' handler ei toeta antud operatsiooni" + fre "Le handler de la table '%-.192s' n'a pas cette option" + ger "Diese Option gibt es nicht (Speicher-Engine für '%-.192s')" + greek "Ï ÷åéñéóôÞò ðßíáêá (table handler) ãéá '%-.192s' äåí äéáèÝôåé áõôÞ ôçí åðéëïãÞ" + hun "A(z) '%-.192s' tablakezelonek nincs ilyen opcioja" + ita "Il gestore delle tabelle per '%-.192s' non ha questa opzione" + jpn "Table handler for '%-.192s' doesn't have this option" + kor "'%-.192s'ÀÇ Å×À̺í handler´Â ÀÌ·¯ÇÑ ¿É¼ÇÀ» Á¦°øÇÏÁö ¾ÊÀ¾´Ï´Ù." + nor "Tabell håndtereren for '%-.192s' har ikke denne muligheten" + norwegian-ny "Tabell håndteraren for '%-.192s' har ikkje denne moglegheita" + pol "Obs³uga tabeli '%-.192s' nie posiada tej opcji" + por "Manipulador de tabela para '%-.192s' não tem esta opção" + rum "Handlerul tabelei pentru '%-.192s' nu are aceasta optiune" + rus "ïÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ '%-.192s' ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÜÔÕ ×ÏÚÍÏÖÎÏÓÔØ" + serbian "Handler tabela za '%-.192s' nema ovu opciju" + slo "Obsluha tabuµky '%-.192s' nemá tento parameter" + spa "El manejador de la tabla de '%-.192s' no tiene esta opcion" + swe "Tabellhanteraren for tabell '%-.192s' stödjer ej detta" + ukr "äÅÓËÒÉÐÔÏÒ ÔÁÂÌÉæ '%-.192s' ÎÅ ÍÁ¤ 椧 ×ÌÁÓÔÉ×ÏÓÔ¦" ER_KEY_NOT_FOUND - cze "Nemohu naj-Bít záznam v '%-.64s'" - dan "Kan ikke finde posten i '%-.64s'" - nla "Kan record niet vinden in '%-.64s'" - eng "Can't find record in '%-.64s'" - jps "'%-.64s'‚Ì‚È‚©‚ɃŒƒR[ƒh‚ªŒ©•t‚©‚è‚Ü‚¹‚ñ", - est "Ei suuda leida kirjet '%-.64s'-s" - fre "Ne peut trouver l'enregistrement dans '%-.64s'" - ger "Kann Datensatz in '%-.64s' nicht finden" - greek "Áäýíáôç ç áíåýñåóç åããñáöÞò óôï '%-.64s'" - hun "Nem talalhato a rekord '%-.64s'-ben" - ita "Impossibile trovare il record in '%-.64s'" - jpn "'%-.64s'¤Î¤Ê¤«¤Ë¥ì¥³¡¼¥É¤¬¸«ÉÕ¤«¤ê¤Þ¤»¤ó" - kor "'%-.64s'¿¡¼ ·¹Äڵ带 ãÀ» ¼ö ¾øÀ¾´Ï´Ù." - nor "Kan ikke finne posten i '%-.64s'" - norwegian-ny "Kan ikkje finne posten i '%-.64s'" - pol "Nie mo¿na znale¥æ rekordu w '%-.64s'" - por "Não pode encontrar registro em '%-.64s'" - rum "Nu pot sa gasesc recordul in '%-.64s'" - rus "îÅ×ÏÚÍÏÖÎÏ ÎÁÊÔÉ ÚÁÐÉÓØ × '%-.64s'" - serbian "Ne mogu da pronaðem slog u '%-.64s'" - slo "Nemô¾em nájs» záznam v '%-.64s'" - spa "No puedo encontrar el registro en '%-.64s'" - swe "Hittar inte posten '%-.64s'" - ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ Õ '%-.64s'" + cze "Nemohu naj-Bít záznam v '%-.192s'" + dan "Kan ikke finde posten i '%-.192s'" + nla "Kan record niet vinden in '%-.192s'" + eng "Can't find record in '%-.192s'" + jps "'%-.192s'‚Ì‚È‚©‚ɃŒƒR[ƒh‚ªŒ©•t‚©‚è‚Ü‚¹‚ñ", + est "Ei suuda leida kirjet '%-.192s'-s" + fre "Ne peut trouver l'enregistrement dans '%-.192s'" + ger "Kann Datensatz in '%-.192s' nicht finden" + greek "Áäýíáôç ç áíåýñåóç åããñáöÞò óôï '%-.192s'" + hun "Nem talalhato a rekord '%-.192s'-ben" + ita "Impossibile trovare il record in '%-.192s'" + jpn "'%-.192s'¤Î¤Ê¤«¤Ë¥ì¥³¡¼¥É¤¬¸«ÉÕ¤«¤ê¤Þ¤»¤ó" + kor "'%-.192s'¿¡¼ ·¹Äڵ带 ãÀ» ¼ö ¾øÀ¾´Ï´Ù." + nor "Kan ikke finne posten i '%-.192s'" + norwegian-ny "Kan ikkje finne posten i '%-.192s'" + pol "Nie mo¿na znale¥æ rekordu w '%-.192s'" + por "Não pode encontrar registro em '%-.192s'" + rum "Nu pot sa gasesc recordul in '%-.192s'" + rus "îÅ×ÏÚÍÏÖÎÏ ÎÁÊÔÉ ÚÁÐÉÓØ × '%-.192s'" + serbian "Ne mogu da pronaðem slog u '%-.192s'" + slo "Nemô¾em nájs» záznam v '%-.192s'" + spa "No puedo encontrar el registro en '%-.192s'" + swe "Hittar inte posten '%-.192s'" + ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ Õ '%-.192s'" ER_NOT_FORM_FILE cze "Nespr-Bávná informace v souboru '%-.200s'" dan "Forkert indhold i: '%-.200s'" @@ -819,55 +819,55 @@ ER_NOT_KEYFILE swe "Fatalt fel vid hantering av register '%-.200s'; kör en reparation" ukr "èÉÂÎÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ: '%-.200s'; óÐÒÏÂÕÊÔÅ ÊÏÇÏ ×¦ÄÎÏ×ÉÔÉ" ER_OLD_KEYFILE - cze "Star-Bý klíèový soubor pro '%-.64s'; opravte ho." - dan "Gammel indeksfil for tabellen '%-.64s'; reparer den" - nla "Oude zoeksleutel file voor tabel '%-.64s'; repareer het!" - eng "Old key file for table '%-.64s'; repair it!" - jps "'%-.64s' ƒe[ƒuƒ‹‚͌¢Œ`Ž®‚Ì key file ‚̂悤‚Å‚·; C•œ‚ð‚µ‚Ä‚‚¾‚³‚¢", - est "Tabeli '%-.64s' võtmefail on aegunud; paranda see!" - fre "Vieux fichier d'index pour la table '%-.64s'; réparez le!" - ger "Alte Index-Datei für Tabelle '%-.64s'. Bitte reparieren" - greek "Ðáëáéü áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá '%-.64s'; Ðáñáêáëþ, äéïñèþóôå ôï!" - hun "Regi kulcsfile a '%-.64s'tablahoz; probalja kijavitani!" - ita "File chiave vecchio per la tabella '%-.64s'; riparalo!" - jpn "'%-.64s' ¥Æ¡¼¥Ö¥ë¤Ï¸Å¤¤·Á¼°¤Î key file ¤Î¤è¤¦¤Ç¤¹; ½¤Éü¤ò¤·¤Æ¤¯¤À¤µ¤¤" - kor "'%-.64s' Å×À̺íÀÇ ÀÌÀü¹öÁ¯ÀÇ Å° Á¸Àç. ¼öÁ¤ÇϽÿÀ!" - nor "Gammel nøkkelfil for tabellen '%-.64s'; reparer den!" - norwegian-ny "Gammel nykkelfil for tabellen '%-.64s'; reparer den!" - pol "Plik kluczy dla tabeli '%-.64s' jest starego typu; napraw go!" - por "Arquivo de índice desatualizado para tabela '%-.64s'; repare-o!" - rum "Cheia fisierului e veche pentru tabela '%-.64s'; repar-o!" - rus "óÔÁÒÙÊ ÉÎÄÅËÓÎÙÊ ÆÁÊÌ ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'; ÏÔÒÅÍÏÎÔÉÒÕÊÔÅ ÅÇÏ!" - serbian "Zastareo key file za tabelu '%-.64s'; ispravite ga" - slo "Starý kµúèový súbor pre '%-.64s'; opravte ho!" - spa "Clave de archivo antigua para la tabla '%-.64s'; reparelo!" - swe "Gammal nyckelfil '%-.64s'; reparera registret" - ukr "óÔÁÒÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ '%-.64s'; ÷¦ÄÎÏצÔØ ÊÏÇÏ!" + cze "Star-Bý klíèový soubor pro '%-.192s'; opravte ho." + dan "Gammel indeksfil for tabellen '%-.192s'; reparer den" + nla "Oude zoeksleutel file voor tabel '%-.192s'; repareer het!" + eng "Old key file for table '%-.192s'; repair it!" + jps "'%-.192s' ƒe[ƒuƒ‹‚͌¢Œ`Ž®‚Ì key file ‚̂悤‚Å‚·; C•œ‚ð‚µ‚Ä‚‚¾‚³‚¢", + est "Tabeli '%-.192s' võtmefail on aegunud; paranda see!" + fre "Vieux fichier d'index pour la table '%-.192s'; réparez le!" + ger "Alte Index-Datei für Tabelle '%-.192s'. Bitte reparieren" + greek "Ðáëáéü áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá '%-.192s'; Ðáñáêáëþ, äéïñèþóôå ôï!" + hun "Regi kulcsfile a '%-.192s'tablahoz; probalja kijavitani!" + ita "File chiave vecchio per la tabella '%-.192s'; riparalo!" + jpn "'%-.192s' ¥Æ¡¼¥Ö¥ë¤Ï¸Å¤¤·Á¼°¤Î key file ¤Î¤è¤¦¤Ç¤¹; ½¤Éü¤ò¤·¤Æ¤¯¤À¤µ¤¤" + kor "'%-.192s' Å×À̺íÀÇ ÀÌÀü¹öÁ¯ÀÇ Å° Á¸Àç. ¼öÁ¤ÇϽÿÀ!" + nor "Gammel nøkkelfil for tabellen '%-.192s'; reparer den!" + norwegian-ny "Gammel nykkelfil for tabellen '%-.192s'; reparer den!" + pol "Plik kluczy dla tabeli '%-.192s' jest starego typu; napraw go!" + por "Arquivo de índice desatualizado para tabela '%-.192s'; repare-o!" + rum "Cheia fisierului e veche pentru tabela '%-.192s'; repar-o!" + rus "óÔÁÒÙÊ ÉÎÄÅËÓÎÙÊ ÆÁÊÌ ÄÌÑ ÔÁÂÌÉÃÙ '%-.192s'; ÏÔÒÅÍÏÎÔÉÒÕÊÔÅ ÅÇÏ!" + serbian "Zastareo key file za tabelu '%-.192s'; ispravite ga" + slo "Starý kµúèový súbor pre '%-.192s'; opravte ho!" + spa "Clave de archivo antigua para la tabla '%-.192s'; reparelo!" + swe "Gammal nyckelfil '%-.192s'; reparera registret" + ukr "óÔÁÒÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ '%-.192s'; ÷¦ÄÎÏצÔØ ÊÏÇÏ!" ER_OPEN_AS_READONLY - cze "'%-.64s' je jen pro -Bètení" - dan "'%-.64s' er skrivebeskyttet" - nla "'%-.64s' is alleen leesbaar" - eng "Table '%-.64s' is read only" - jps "'%-.64s' ‚Í“Ç‚Ýž‚Ýê—p‚Å‚·", - est "Tabel '%-.64s' on ainult lugemiseks" - fre "'%-.64s' est en lecture seulement" - ger "Tabelle '%-.64s' ist nur lesbar" - greek "'%-.64s' åðéôñÝðåôáé ìüíï ç áíÜãíùóç" - hun "'%-.64s' irasvedett" - ita "'%-.64s' e` di sola lettura" - jpn "'%-.64s' ¤ÏÆɤ߹þ¤ßÀìÍѤǤ¹" - kor "Å×À̺í '%-.64s'´Â ÀбâÀü¿ë ÀÔ´Ï´Ù." - nor "'%-.64s' er skrivebeskyttet" - norwegian-ny "'%-.64s' er skrivetryggja" - pol "'%-.64s' jest tylko do odczytu" - por "Tabela '%-.64s' é somente para leitura" - rum "Tabela '%-.64s' e read-only" - rus "ôÁÂÌÉÃÁ '%-.64s' ÐÒÅÄÎÁÚÎÁÞÅÎÁ ÔÏÌØËÏ ÄÌÑ ÞÔÅÎÉÑ" - serbian "Tabelu '%-.64s' je dozvoljeno samo èitati" - slo "'%-.64s' is èíta» only" - spa "'%-.64s' es de solo lectura" - swe "'%-.64s' är skyddad mot förändring" - ukr "ôÁÂÌÉÃÑ '%-.64s' Ô¦ÌØËÉ ÄÌÑ ÞÉÔÁÎÎÑ" + cze "'%-.192s' je jen pro -Bètení" + dan "'%-.192s' er skrivebeskyttet" + nla "'%-.192s' is alleen leesbaar" + eng "Table '%-.192s' is read only" + jps "'%-.192s' ‚Í“Ç‚Ýž‚Ýê—p‚Å‚·", + est "Tabel '%-.192s' on ainult lugemiseks" + fre "'%-.192s' est en lecture seulement" + ger "Tabelle '%-.192s' ist nur lesbar" + greek "'%-.192s' åðéôñÝðåôáé ìüíï ç áíÜãíùóç" + hun "'%-.192s' irasvedett" + ita "'%-.192s' e` di sola lettura" + jpn "'%-.192s' ¤ÏÆɤ߹þ¤ßÀìÍѤǤ¹" + kor "Å×À̺í '%-.192s'´Â ÀбâÀü¿ë ÀÔ´Ï´Ù." + nor "'%-.192s' er skrivebeskyttet" + norwegian-ny "'%-.192s' er skrivetryggja" + pol "'%-.192s' jest tylko do odczytu" + por "Tabela '%-.192s' é somente para leitura" + rum "Tabela '%-.192s' e read-only" + rus "ôÁÂÌÉÃÁ '%-.192s' ÐÒÅÄÎÁÚÎÁÞÅÎÁ ÔÏÌØËÏ ÄÌÑ ÞÔÅÎÉÑ" + serbian "Tabelu '%-.192s' je dozvoljeno samo èitati" + slo "'%-.192s' is èíta» only" + spa "'%-.192s' es de solo lectura" + swe "'%-.192s' är skyddad mot förändring" + ukr "ôÁÂÌÉÃÑ '%-.192s' Ô¦ÌØËÉ ÄÌÑ ÞÉÔÁÎÎÑ" ER_OUTOFMEMORY HY001 S1001 cze "M-Bálo pamìti. Pøestartujte daemona a zkuste znovu (je potøeba %d bytù)" dan "Ikke mere hukommelse. Genstart serveren og prøv igen (mangler %d bytes)" @@ -919,30 +919,30 @@ ER_OUT_OF_SORTMEMORY HY001 S1001 swe "Sorteringsbufferten räcker inte till. Kontrollera startparametrarna" ukr "âÒÁË ÐÁÍ'ÑÔ¦ ÄÌÑ ÓÏÒÔÕ×ÁÎÎÑ. ôÒÅÂÁ Ú¦ÌØÛÉÔÉ ÒÏÚÍ¦Ò ÂÕÆÅÒÁ ÓÏÒÔÕ×ÁÎÎÑ Õ ÓÅÒ×ÅÒÁ" ER_UNEXPECTED_EOF - cze "Neo-Bèekávaný konec souboru pøi ètení '%-.64s' (chybový kód: %d)" - dan "Uventet afslutning på fil (eof) ved læsning af filen '%-.64s' (Fejlkode: %d)" - nla "Onverwachte eof gevonden tijdens het lezen van file '%-.64s' (Errcode: %d)" - eng "Unexpected EOF found when reading file '%-.64s' (errno: %d)" - jps "'%-.64s' ƒtƒ@ƒCƒ‹‚ð“Ç‚Ýž‚Ý’†‚É EOF ‚ª—\Šú‚¹‚ÊŠ‚ÅŒ»‚ê‚Ü‚µ‚½. (errno: %d)", - est "Ootamatu faililõpumärgend faili '%-.64s' lugemisel (veakood: %d)" - fre "Fin de fichier inattendue en lisant '%-.64s' (Errcode: %d)" - ger "Unerwartetes Ende beim Lesen der Datei '%-.64s' (Fehler: %d)" - greek "ÊáôÜ ôç äéÜñêåéá ôçò áíÜãíùóçò, âñÝèçêå áðñïóäüêçôá ôï ôÝëïò ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)" - hun "Varatlan filevege-jel a '%-.64s'olvasasakor. (hibakod: %d)" - ita "Fine del file inaspettata durante la lettura del file '%-.64s' (errno: %d)" - jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤òÆɤ߹þ¤ßÃæ¤Ë EOF ¤¬Í½´ü¤»¤Ì½ê¤Ç¸½¤ì¤Þ¤·¤¿. (errno: %d)" - kor "'%-.64s' ÈÀÏÀ» Àд µµÁß À߸øµÈ eofÀ» ¹ß°ß (¿¡·¯¹øÈ£: %d)" - nor "Uventet slutt på fil (eof) ved lesing av filen '%-.64s' (Feilkode: %d)" - norwegian-ny "Uventa slutt på fil (eof) ved lesing av fila '%-.64s' (Feilkode: %d)" - pol "Nieoczekiwany 'eof' napotkany podczas czytania z pliku '%-.64s' (Kod b³êdu: %d)" - por "Encontrado fim de arquivo inesperado ao ler arquivo '%-.64s' (erro no. %d)" - rum "Sfirsit de fisier neasteptat in citirea fisierului '%-.64s' (errno: %d)" - rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ '%-.64s' (ÏÛÉÂËÁ: %d)" - serbian "Neoèekivani kraj pri èitanju file-a '%-.64s' (errno: %d)" - slo "Neoèakávaný koniec súboru pri èítaní '%-.64s' (chybový kód: %d)" - spa "Inesperado fin de ficheroU mientras leiamos el archivo '%-.64s' (Error: %d)" - swe "Oväntat filslut vid läsning från '%-.64s' (Felkod: %d)" - ukr "èÉÂÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)" + cze "Neo-Bèekávaný konec souboru pøi ètení '%-.192s' (chybový kód: %d)" + dan "Uventet afslutning på fil (eof) ved læsning af filen '%-.192s' (Fejlkode: %d)" + nla "Onverwachte eof gevonden tijdens het lezen van file '%-.192s' (Errcode: %d)" + eng "Unexpected EOF found when reading file '%-.192s' (errno: %d)" + jps "'%-.192s' ƒtƒ@ƒCƒ‹‚ð“Ç‚Ýž‚Ý’†‚É EOF ‚ª—\Šú‚¹‚ÊŠ‚ÅŒ»‚ê‚Ü‚µ‚½. (errno: %d)", + est "Ootamatu faililõpumärgend faili '%-.192s' lugemisel (veakood: %d)" + fre "Fin de fichier inattendue en lisant '%-.192s' (Errcode: %d)" + ger "Unerwartetes Ende beim Lesen der Datei '%-.192s' (Fehler: %d)" + greek "ÊáôÜ ôç äéÜñêåéá ôçò áíÜãíùóçò, âñÝèçêå áðñïóäüêçôá ôï ôÝëïò ôïõ áñ÷åßïõ '%-.192s' (êùäéêüò ëÜèïõò: %d)" + hun "Varatlan filevege-jel a '%-.192s'olvasasakor. (hibakod: %d)" + ita "Fine del file inaspettata durante la lettura del file '%-.192s' (errno: %d)" + jpn "'%-.192s' ¥Õ¥¡¥¤¥ë¤òÆɤ߹þ¤ßÃæ¤Ë EOF ¤¬Í½´ü¤»¤Ì½ê¤Ç¸½¤ì¤Þ¤·¤¿. (errno: %d)" + kor "'%-.192s' ÈÀÏÀ» Àд µµÁß À߸øµÈ eofÀ» ¹ß°ß (¿¡·¯¹øÈ£: %d)" + nor "Uventet slutt på fil (eof) ved lesing av filen '%-.192s' (Feilkode: %d)" + norwegian-ny "Uventa slutt på fil (eof) ved lesing av fila '%-.192s' (Feilkode: %d)" + pol "Nieoczekiwany 'eof' napotkany podczas czytania z pliku '%-.192s' (Kod b³êdu: %d)" + por "Encontrado fim de arquivo inesperado ao ler arquivo '%-.192s' (erro no. %d)" + rum "Sfirsit de fisier neasteptat in citirea fisierului '%-.192s' (errno: %d)" + rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ '%-.192s' (ÏÛÉÂËÁ: %d)" + serbian "Neoèekivani kraj pri èitanju file-a '%-.192s' (errno: %d)" + slo "Neoèakávaný koniec súboru pri èítaní '%-.192s' (chybový kód: %d)" + spa "Inesperado fin de ficheroU mientras leiamos el archivo '%-.192s' (Error: %d)" + swe "Oväntat filslut vid läsning från '%-.192s' (Felkod: %d)" + ukr "èÉÂÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ '%-.192s' (ÐÏÍÉÌËÁ: %d)" ER_CON_COUNT_ERROR 08004 cze "P-Bøíli¹ mnoho spojení" dan "For mange forbindelser (connections)" @@ -1041,53 +1041,53 @@ ER_HANDSHAKE_ERROR 08S01 swe "Fel vid initiering av kommunikationen med klienten" ukr "îÅצÒÎÁ ÕÓÔÁÎÏ×ËÁ Ú×'ÑÚËÕ" ER_DBACCESS_DENIED_ERROR 42000 - cze "P-Bøístup pro u¾ivatele '%-.32s'@'%-.64s' k databázi '%-.64s' není povolen" - dan "Adgang nægtet bruger: '%-.32s'@'%-.64s' til databasen '%-.64s'" - nla "Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' naar database '%-.64s'" - eng "Access denied for user '%-.32s'@'%-.64s' to database '%-.64s'" - jps "ƒ†[ƒU[ '%-.32s'@'%-.64s' ‚Ì '%-.64s' ƒf[ƒ^ƒx[ƒX‚ւ̃AƒNƒZƒX‚ð‹‘”Û‚µ‚Ü‚·", - est "Ligipääs keelatud kasutajale '%-.32s'@'%-.64s' andmebaasile '%-.64s'" - fre "Accès refusé pour l'utilisateur: '%-.32s'@'@%-.64s'. Base '%-.64s'" - ger "Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung für Datenbank '%-.64s'" - greek "Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s'@'%-.64s' óôç âÜóç äåäïìÝíùí '%-.64s'" - hun "A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres az '%-.64s' adabazishoz." - ita "Accesso non consentito per l'utente: '%-.32s'@'%-.64s' al database '%-.64s'" - jpn "¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ¤Î '%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤Ø¤Î¥¢¥¯¥»¥¹¤òµñÈݤ·¤Þ¤¹" - kor "'%-.32s'@'%-.64s' »ç¿ëÀÚ´Â '%-.64s' µ¥ÀÌŸº£À̽º¿¡ Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù." - nor "Tilgang nektet for bruker: '%-.32s'@'%-.64s' til databasen '%-.64s' nektet" - norwegian-ny "Tilgang ikkje tillate for brukar: '%-.32s'@'%-.64s' til databasen '%-.64s' nekta" - por "Acesso negado para o usuário '%-.32s'@'%-.64s' ao banco de dados '%-.64s'" - rum "Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' la baza de date '%-.64s'" - rus "äÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s'@'%-.64s' ÄÏÓÔÕÐ Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÚÁËÒÙÔ" - serbian "Pristup je zabranjen korisniku '%-.32s'@'%-.64s' za bazu '%-.64s'" - slo "Zakázaný prístup pre u¾ívateµa: '%-.32s'@'%-.64s' k databázi '%-.64s'" - spa "Acceso negado para usuario: '%-.32s'@'%-.64s' para la base de datos '%-.64s'" - swe "Användare '%-.32s'@'%-.64s' är ej berättigad att använda databasen %-.64s" - ukr "äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'@'%-.64s' ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ '%-.64s'" + cze "P-Bøístup pro u¾ivatele '%-.48s'@'%-.64s' k databázi '%-.192s' není povolen" + dan "Adgang nægtet bruger: '%-.48s'@'%-.64s' til databasen '%-.192s'" + nla "Toegang geweigerd voor gebruiker: '%-.48s'@'%-.64s' naar database '%-.192s'" + eng "Access denied for user '%-.48s'@'%-.64s' to database '%-.192s'" + jps "ƒ†[ƒU[ '%-.48s'@'%-.64s' ‚Ì '%-.192s' ƒf[ƒ^ƒx[ƒX‚ւ̃AƒNƒZƒX‚ð‹‘”Û‚µ‚Ü‚·", + est "Ligipääs keelatud kasutajale '%-.48s'@'%-.64s' andmebaasile '%-.192s'" + fre "Accès refusé pour l'utilisateur: '%-.48s'@'@%-.64s'. Base '%-.192s'" + ger "Benutzer '%-.48s'@'%-.64s' hat keine Zugriffsberechtigung für Datenbank '%-.192s'" + greek "Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.48s'@'%-.64s' óôç âÜóç äåäïìÝíùí '%-.192s'" + hun "A(z) '%-.48s'@'%-.64s' felhasznalo szamara tiltott eleres az '%-.192s' adabazishoz." + ita "Accesso non consentito per l'utente: '%-.48s'@'%-.64s' al database '%-.192s'" + jpn "¥æ¡¼¥¶¡¼ '%-.48s'@'%-.64s' ¤Î '%-.192s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤Ø¤Î¥¢¥¯¥»¥¹¤òµñÈݤ·¤Þ¤¹" + kor "'%-.48s'@'%-.64s' »ç¿ëÀÚ´Â '%-.192s' µ¥ÀÌŸº£À̽º¿¡ Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù." + nor "Tilgang nektet for bruker: '%-.48s'@'%-.64s' til databasen '%-.192s' nektet" + norwegian-ny "Tilgang ikkje tillate for brukar: '%-.48s'@'%-.64s' til databasen '%-.192s' nekta" + por "Acesso negado para o usuário '%-.48s'@'%-.64s' ao banco de dados '%-.192s'" + rum "Acces interzis pentru utilizatorul: '%-.48s'@'%-.64s' la baza de date '%-.192s'" + rus "äÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.48s'@'%-.64s' ÄÏÓÔÕÐ Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.192s' ÚÁËÒÙÔ" + serbian "Pristup je zabranjen korisniku '%-.48s'@'%-.64s' za bazu '%-.192s'" + slo "Zakázaný prístup pre u¾ívateµa: '%-.48s'@'%-.64s' k databázi '%-.192s'" + spa "Acceso negado para usuario: '%-.48s'@'%-.64s' para la base de datos '%-.192s'" + swe "Användare '%-.48s'@'%-.64s' är ej berättigad att använda databasen %-.192s" + ukr "äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.48s'@'%-.64s' ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ '%-.192s'" ER_ACCESS_DENIED_ERROR 28000 - cze "P-Bøístup pro u¾ivatele '%-.32s'@'%-.64s' (s heslem %s)" - dan "Adgang nægtet bruger: '%-.32s'@'%-.64s' (Bruger adgangskode: %s)" - nla "Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' (Wachtwoord gebruikt: %s)" - eng "Access denied for user '%-.32s'@'%-.64s' (using password: %s)" - jps "ƒ†[ƒU[ '%-.32s'@'%-.64s' ‚ð‹‘”Û‚µ‚Ü‚·.uUsing password: %s)", - est "Ligipääs keelatud kasutajale '%-.32s'@'%-.64s' (kasutab parooli: %s)" - fre "Accès refusé pour l'utilisateur: '%-.32s'@'@%-.64s' (mot de passe: %s)" - ger "Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung (verwendetes Passwort: %s)" - greek "Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s'@'%-.64s' (÷ñÞóç password: %s)" - hun "A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres. (Hasznalja a jelszot: %s)" - ita "Accesso non consentito per l'utente: '%-.32s'@'%-.64s' (Password: %s)" - jpn "¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ¤òµñÈݤ·¤Þ¤¹.uUsing password: %s)" - kor "'%-.32s'@'%-.64s' »ç¿ëÀÚ´Â Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù. (using password: %s)" - nor "Tilgang nektet for bruker: '%-.32s'@'%-.64s' (Bruker passord: %s)" - norwegian-ny "Tilgang ikke tillate for brukar: '%-.32s'@'%-.64s' (Brukar passord: %s)" - por "Acesso negado para o usuário '%-.32s'@'%-.64s' (senha usada: %s)" - rum "Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' (Folosind parola: %s)" - rus "äÏÓÔÕÐ ÚÁËÒÙÔ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s'@'%-.64s' (ÂÙÌ ÉÓÐÏÌØÚÏ×ÁÎ ÐÁÒÏÌØ: %s)" - serbian "Pristup je zabranjen korisniku '%-.32s'@'%-.64s' (koristi lozinku: '%s')" - slo "Zakázaný prístup pre u¾ívateµa: '%-.32s'@'%-.64s' (pou¾itie hesla: %s)" - spa "Acceso negado para usuario: '%-.32s'@'%-.64s' (Usando clave: %s)" - swe "Användare '%-.32s'@'%-.64s' är ej berättigad att logga in (Använder lösen: %s)" - ukr "äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'@'%-.64s' (÷ÉËÏÒÉÓÔÁÎÏ ÐÁÒÏÌØ: %s)" + cze "P-Bøístup pro u¾ivatele '%-.48s'@'%-.64s' (s heslem %s)" + dan "Adgang nægtet bruger: '%-.48s'@'%-.64s' (Bruger adgangskode: %s)" + nla "Toegang geweigerd voor gebruiker: '%-.48s'@'%-.64s' (Wachtwoord gebruikt: %s)" + eng "Access denied for user '%-.48s'@'%-.64s' (using password: %s)" + jps "ƒ†[ƒU[ '%-.48s'@'%-.64s' ‚ð‹‘”Û‚µ‚Ü‚·.uUsing password: %s)", + est "Ligipääs keelatud kasutajale '%-.48s'@'%-.64s' (kasutab parooli: %s)" + fre "Accès refusé pour l'utilisateur: '%-.48s'@'@%-.64s' (mot de passe: %s)" + ger "Benutzer '%-.48s'@'%-.64s' hat keine Zugriffsberechtigung (verwendetes Passwort: %s)" + greek "Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.48s'@'%-.64s' (÷ñÞóç password: %s)" + hun "A(z) '%-.48s'@'%-.64s' felhasznalo szamara tiltott eleres. (Hasznalja a jelszot: %s)" + ita "Accesso non consentito per l'utente: '%-.48s'@'%-.64s' (Password: %s)" + jpn "¥æ¡¼¥¶¡¼ '%-.48s'@'%-.64s' ¤òµñÈݤ·¤Þ¤¹.uUsing password: %s)" + kor "'%-.48s'@'%-.64s' »ç¿ëÀÚ´Â Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù. (using password: %s)" + nor "Tilgang nektet for bruker: '%-.48s'@'%-.64s' (Bruker passord: %s)" + norwegian-ny "Tilgang ikke tillate for brukar: '%-.48s'@'%-.64s' (Brukar passord: %s)" + por "Acesso negado para o usuário '%-.48s'@'%-.64s' (senha usada: %s)" + rum "Acces interzis pentru utilizatorul: '%-.48s'@'%-.64s' (Folosind parola: %s)" + rus "äÏÓÔÕÐ ÚÁËÒÙÔ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.48s'@'%-.64s' (ÂÙÌ ÉÓÐÏÌØÚÏ×ÁÎ ÐÁÒÏÌØ: %s)" + serbian "Pristup je zabranjen korisniku '%-.48s'@'%-.64s' (koristi lozinku: '%s')" + slo "Zakázaný prístup pre u¾ívateµa: '%-.48s'@'%-.64s' (pou¾itie hesla: %s)" + spa "Acceso negado para usuario: '%-.48s'@'%-.64s' (Usando clave: %s)" + swe "Användare '%-.48s'@'%-.64s' är ej berättigad att logga in (Använder lösen: %s)" + ukr "äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.48s'@'%-.64s' (÷ÉËÏÒÉÓÔÁÎÏ ÐÁÒÏÌØ: %s)" ER_NO_DB_ERROR 3D000 cze "Nebyla vybr-Bána ¾ádná databáze" dan "Ingen database valgt" @@ -1139,80 +1139,80 @@ ER_UNKNOWN_COM_ERROR 08S01 swe "Okänt commando" ukr "îÅצÄÏÍÁ ËÏÍÁÎÄÁ" ER_BAD_NULL_ERROR 23000 - cze "Sloupec '%-.64s' nem-Bù¾e být null" - dan "Kolonne '%-.64s' kan ikke være NULL" - nla "Kolom '%-.64s' kan niet null zijn" - eng "Column '%-.64s' cannot be null" - jps "Column '%-.64s' ‚Í null ‚É‚Í‚Å‚«‚È‚¢‚Ì‚Å‚·", - est "Tulp '%-.64s' ei saa omada nullväärtust" - fre "Le champ '%-.64s' ne peut être vide (null)" - ger "Feld '%-.64s' darf nicht NULL sein" - greek "Ôï ðåäßï '%-.64s' äåí ìðïñåß íá åßíáé êåíü (null)" - hun "A(z) '%-.64s' oszlop erteke nem lehet nulla" - ita "La colonna '%-.64s' non puo` essere nulla" - jpn "Column '%-.64s' ¤Ï null ¤Ë¤Ï¤Ç¤¤Ê¤¤¤Î¤Ç¤¹" - kor "Ä®·³ '%-.64s'´Â ³Î(Null)ÀÌ µÇ¸é ¾ÈµË´Ï´Ù. " - nor "Kolonne '%-.64s' kan ikke vere null" - norwegian-ny "Kolonne '%-.64s' kan ikkje vere null" - pol "Kolumna '%-.64s' nie mo¿e byæ null" - por "Coluna '%-.64s' não pode ser vazia" - rum "Coloana '%-.64s' nu poate sa fie null" - rus "óÔÏÌÂÅà '%-.64s' ÎÅ ÍÏÖÅÔ ÐÒÉÎÉÍÁÔØ ×ÅÌÉÞÉÎÕ NULL" - serbian "Kolona '%-.64s' ne može biti NULL" - slo "Pole '%-.64s' nemô¾e by» null" - spa "La columna '%-.64s' no puede ser nula" - swe "Kolumn '%-.64s' får inte vara NULL" - ukr "óÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ÎÕÌØÏ×ÉÍ" + cze "Sloupec '%-.192s' nem-Bù¾e být null" + dan "Kolonne '%-.192s' kan ikke være NULL" + nla "Kolom '%-.192s' kan niet null zijn" + eng "Column '%-.192s' cannot be null" + jps "Column '%-.192s' ‚Í null ‚É‚Í‚Å‚«‚È‚¢‚Ì‚Å‚·", + est "Tulp '%-.192s' ei saa omada nullväärtust" + fre "Le champ '%-.192s' ne peut être vide (null)" + ger "Feld '%-.192s' darf nicht NULL sein" + greek "Ôï ðåäßï '%-.192s' äåí ìðïñåß íá åßíáé êåíü (null)" + hun "A(z) '%-.192s' oszlop erteke nem lehet nulla" + ita "La colonna '%-.192s' non puo` essere nulla" + jpn "Column '%-.192s' ¤Ï null ¤Ë¤Ï¤Ç¤¤Ê¤¤¤Î¤Ç¤¹" + kor "Ä®·³ '%-.192s'´Â ³Î(Null)ÀÌ µÇ¸é ¾ÈµË´Ï´Ù. " + nor "Kolonne '%-.192s' kan ikke vere null" + norwegian-ny "Kolonne '%-.192s' kan ikkje vere null" + pol "Kolumna '%-.192s' nie mo¿e byæ null" + por "Coluna '%-.192s' não pode ser vazia" + rum "Coloana '%-.192s' nu poate sa fie null" + rus "óÔÏÌÂÅà '%-.192s' ÎÅ ÍÏÖÅÔ ÐÒÉÎÉÍÁÔØ ×ÅÌÉÞÉÎÕ NULL" + serbian "Kolona '%-.192s' ne može biti NULL" + slo "Pole '%-.192s' nemô¾e by» null" + spa "La columna '%-.192s' no puede ser nula" + swe "Kolumn '%-.192s' får inte vara NULL" + ukr "óÔÏ×ÂÅÃØ '%-.192s' ÎÅ ÍÏÖÅ ÂÕÔÉ ÎÕÌØÏ×ÉÍ" ER_BAD_DB_ERROR 42000 - cze "Nezn-Bámá databáze '%-.64s'" - dan "Ukendt database '%-.64s'" - nla "Onbekende database '%-.64s'" - eng "Unknown database '%-.64s'" - jps "'%-.64s' ‚È‚ñ‚ăf[ƒ^ƒx[ƒX‚Í’m‚è‚Ü‚¹‚ñ.", - est "Tundmatu andmebaas '%-.64s'" - fre "Base '%-.64s' inconnue" - ger "Unbekannte Datenbank '%-.64s'" - greek "Áãíùóôç âÜóç äåäïìÝíùí '%-.64s'" - hun "Ervenytelen adatbazis: '%-.64s'" - ita "Database '%-.64s' sconosciuto" - jpn "'%-.64s' ¤Ê¤ó¤Æ¥Ç¡¼¥¿¥Ù¡¼¥¹¤ÏÃΤê¤Þ¤»¤ó." - kor "µ¥ÀÌŸº£À̽º '%-.64s'´Â ¾Ë¼ö ¾øÀ½" - nor "Ukjent database '%-.64s'" - norwegian-ny "Ukjent database '%-.64s'" - pol "Nieznana baza danych '%-.64s'" - por "Banco de dados '%-.64s' desconhecido" - rum "Baza de data invalida '%-.64s'" - rus "îÅÉÚ×ÅÓÔÎÁÑ ÂÁÚÁ ÄÁÎÎÙÈ '%-.64s'" - serbian "Nepoznata baza '%-.64s'" - slo "Neznáma databáza '%-.64s'" - spa "Base de datos desconocida '%-.64s'" - swe "Okänd databas: '%-.64s'" - ukr "îÅצÄÏÍÁ ÂÁÚÁ ÄÁÎÎÉÈ '%-.64s'" + cze "Nezn-Bámá databáze '%-.192s'" + dan "Ukendt database '%-.192s'" + nla "Onbekende database '%-.192s'" + eng "Unknown database '%-.192s'" + jps "'%-.192s' ‚È‚ñ‚ăf[ƒ^ƒx[ƒX‚Í’m‚è‚Ü‚¹‚ñ.", + est "Tundmatu andmebaas '%-.192s'" + fre "Base '%-.192s' inconnue" + ger "Unbekannte Datenbank '%-.192s'" + greek "Áãíùóôç âÜóç äåäïìÝíùí '%-.192s'" + hun "Ervenytelen adatbazis: '%-.192s'" + ita "Database '%-.192s' sconosciuto" + jpn "'%-.192s' ¤Ê¤ó¤Æ¥Ç¡¼¥¿¥Ù¡¼¥¹¤ÏÃΤê¤Þ¤»¤ó." + kor "µ¥ÀÌŸº£À̽º '%-.192s'´Â ¾Ë¼ö ¾øÀ½" + nor "Ukjent database '%-.192s'" + norwegian-ny "Ukjent database '%-.192s'" + pol "Nieznana baza danych '%-.192s'" + por "Banco de dados '%-.192s' desconhecido" + rum "Baza de data invalida '%-.192s'" + rus "îÅÉÚ×ÅÓÔÎÁÑ ÂÁÚÁ ÄÁÎÎÙÈ '%-.192s'" + serbian "Nepoznata baza '%-.192s'" + slo "Neznáma databáza '%-.192s'" + spa "Base de datos desconocida '%-.192s'" + swe "Okänd databas: '%-.192s'" + ukr "îÅצÄÏÍÁ ÂÁÚÁ ÄÁÎÎÉÈ '%-.192s'" ER_TABLE_EXISTS_ERROR 42S01 - cze "Tabulka '%-.64s' ji-B¾ existuje" - dan "Tabellen '%-.64s' findes allerede" - nla "Tabel '%-.64s' bestaat al" - eng "Table '%-.64s' already exists" - jps "Table '%-.64s' ‚ÍŠù‚É‚ ‚è‚Ü‚·", - est "Tabel '%-.64s' juba eksisteerib" - fre "La table '%-.64s' existe déjà" - ger "Tabelle '%-.64s' bereits vorhanden" - greek "Ï ðßíáêáò '%-.64s' õðÜñ÷åé Þäç" - hun "A(z) '%-.64s' tabla mar letezik" - ita "La tabella '%-.64s' esiste gia`" - jpn "Table '%-.64s' ¤Ï´û¤Ë¤¢¤ê¤Þ¤¹" - kor "Å×À̺í '%-.64s'´Â ÀÌ¹Ì Á¸ÀçÇÔ" - nor "Tabellen '%-.64s' eksisterer allerede" - norwegian-ny "Tabellen '%-.64s' eksisterar allereide" - pol "Tabela '%-.64s' ju¿ istnieje" - por "Tabela '%-.64s' já existe" - rum "Tabela '%-.64s' exista deja" - rus "ôÁÂÌÉÃÁ '%-.64s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ" - serbian "Tabela '%-.64s' veæ postoji" - slo "Tabuµka '%-.64s' u¾ existuje" - spa "La tabla '%-.64s' ya existe" - swe "Tabellen '%-.64s' finns redan" - ukr "ôÁÂÌÉÃÑ '%-.64s' ×ÖÅ ¦ÓÎÕ¤" + cze "Tabulka '%-.192s' ji-B¾ existuje" + dan "Tabellen '%-.192s' findes allerede" + nla "Tabel '%-.192s' bestaat al" + eng "Table '%-.192s' already exists" + jps "Table '%-.192s' ‚ÍŠù‚É‚ ‚è‚Ü‚·", + est "Tabel '%-.192s' juba eksisteerib" + fre "La table '%-.192s' existe déjà" + ger "Tabelle '%-.192s' bereits vorhanden" + greek "Ï ðßíáêáò '%-.192s' õðÜñ÷åé Þäç" + hun "A(z) '%-.192s' tabla mar letezik" + ita "La tabella '%-.192s' esiste gia`" + jpn "Table '%-.192s' ¤Ï´û¤Ë¤¢¤ê¤Þ¤¹" + kor "Å×À̺í '%-.192s'´Â ÀÌ¹Ì Á¸ÀçÇÔ" + nor "Tabellen '%-.192s' eksisterer allerede" + norwegian-ny "Tabellen '%-.192s' eksisterar allereide" + pol "Tabela '%-.192s' ju¿ istnieje" + por "Tabela '%-.192s' já existe" + rum "Tabela '%-.192s' exista deja" + rus "ôÁÂÌÉÃÁ '%-.192s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "Tabela '%-.192s' veæ postoji" + slo "Tabuµka '%-.192s' u¾ existuje" + spa "La tabla '%-.192s' ya existe" + swe "Tabellen '%-.192s' finns redan" + ukr "ôÁÂÌÉÃÑ '%-.192s' ×ÖÅ ¦ÓÎÕ¤" ER_BAD_TABLE_ERROR 42S02 cze "Nezn-Bámá tabulka '%-.100s'" dan "Ukendt tabel '%-.100s'" @@ -1239,29 +1239,29 @@ ER_BAD_TABLE_ERROR 42S02 swe "Okänd tabell '%-.100s'" ukr "îÅצÄÏÍÁ ÔÁÂÌÉÃÑ '%-.100s'" ER_NON_UNIQ_ERROR 23000 - cze "Sloupec '%-.64s' v %-.64s nen-Bí zcela jasný" - dan "Felt: '%-.64s' i tabel %-.64s er ikke entydigt" - nla "Kolom: '%-.64s' in %-.64s is niet eenduidig" - eng "Column '%-.64s' in %-.64s is ambiguous" - est "Väli '%-.64s' %-.64s-s ei ole ühene" - fre "Champ: '%-.64s' dans %-.64s est ambigu" - ger "Feld '%-.64s' in %-.64s ist nicht eindeutig" - greek "Ôï ðåäßï: '%-.64s' óå %-.64s äåí Ý÷åé êáèïñéóôåß" - hun "A(z) '%-.64s' oszlop %-.64s-ben ketertelmu" - ita "Colonna: '%-.64s' di %-.64s e` ambigua" - jpn "Column: '%-.64s' in %-.64s is ambiguous" - kor "Ä®·³: '%-.64s' in '%-.64s' ÀÌ ¸ðÈ£ÇÔ" - nor "Felt: '%-.64s' i tabell %-.64s er ikke entydig" - norwegian-ny "Kolonne: '%-.64s' i tabell %-.64s er ikkje eintydig" - pol "Kolumna: '%-.64s' w %-.64s jest dwuznaczna" - por "Coluna '%-.64s' em '%-.64s' é ambígua" - rum "Coloana: '%-.64s' in %-.64s este ambigua" - rus "óÔÏÌÂÅà '%-.64s' × %-.64s ÚÁÄÁÎ ÎÅÏÄÎÏÚÎÁÞÎÏ" - serbian "Kolona '%-.64s' u %-.64s nije jedinstvena u kontekstu" - slo "Pole: '%-.64s' v %-.64s je nejasné" - spa "La columna: '%-.64s' en %-.64s es ambigua" - swe "Kolumn '%-.64s' i %-.64s är inte unik" - ukr "óÔÏ×ÂÅÃØ '%-.64s' Õ %-.64s ×ÉÚÎÁÞÅÎÉÊ ÎÅÏÄÎÏÚÎÁÞÎÏ" + cze "Sloupec '%-.192s' v %-.192s nen-Bí zcela jasný" + dan "Felt: '%-.192s' i tabel %-.192s er ikke entydigt" + nla "Kolom: '%-.192s' in %-.192s is niet eenduidig" + eng "Column '%-.192s' in %-.192s is ambiguous" + est "Väli '%-.192s' %-.192s-s ei ole ühene" + fre "Champ: '%-.192s' dans %-.192s est ambigu" + ger "Feld '%-.192s' in %-.192s ist nicht eindeutig" + greek "Ôï ðåäßï: '%-.192s' óå %-.192s äåí Ý÷åé êáèïñéóôåß" + hun "A(z) '%-.192s' oszlop %-.192s-ben ketertelmu" + ita "Colonna: '%-.192s' di %-.192s e` ambigua" + jpn "Column: '%-.192s' in %-.192s is ambiguous" + kor "Ä®·³: '%-.192s' in '%-.192s' ÀÌ ¸ðÈ£ÇÔ" + nor "Felt: '%-.192s' i tabell %-.192s er ikke entydig" + norwegian-ny "Kolonne: '%-.192s' i tabell %-.192s er ikkje eintydig" + pol "Kolumna: '%-.192s' w %-.192s jest dwuznaczna" + por "Coluna '%-.192s' em '%-.192s' é ambígua" + rum "Coloana: '%-.192s' in %-.192s este ambigua" + rus "óÔÏÌÂÅà '%-.192s' × %-.192s ÚÁÄÁÎ ÎÅÏÄÎÏÚÎÁÞÎÏ" + serbian "Kolona '%-.192s' u %-.192s nije jedinstvena u kontekstu" + slo "Pole: '%-.192s' v %-.192s je nejasné" + spa "La columna: '%-.192s' en %-.192s es ambigua" + swe "Kolumn '%-.192s' i %-.192s är inte unik" + ukr "óÔÏ×ÂÅÃØ '%-.192s' Õ %-.192s ×ÉÚÎÁÞÅÎÉÊ ÎÅÏÄÎÏÚÎÁÞÎÏ" ER_SERVER_SHUTDOWN 08S01 cze "Prob-Bíhá ukonèování práce serveru" dan "Database nedlukning er i gang" @@ -1288,77 +1288,77 @@ ER_SERVER_SHUTDOWN 08S01 swe "Servern går nu ned" ukr "úÁ×ÅÒÛÕ¤ÔØÓÑ ÒÁÂÏÔÁ ÓÅÒ×ÅÒÁ" ER_BAD_FIELD_ERROR 42S22 S0022 - cze "Nezn-Bámý sloupec '%-.64s' v %-.64s" - dan "Ukendt kolonne '%-.64s' i tabel %-.64s" - nla "Onbekende kolom '%-.64s' in %-.64s" - eng "Unknown column '%-.64s' in '%-.64s'" - jps "'%-.64s' column ‚Í '%-.64s' ‚É‚Í‚ ‚è‚Ü‚¹‚ñ.", - est "Tundmatu tulp '%-.64s' '%-.64s'-s" - fre "Champ '%-.64s' inconnu dans %-.64s" - ger "Unbekanntes Tabellenfeld '%-.64s' in %-.64s" - greek "Áãíùóôï ðåäßï '%-.64s' óå '%-.64s'" - hun "A(z) '%-.64s' oszlop ervenytelen '%-.64s'-ben" - ita "Colonna sconosciuta '%-.64s' in '%-.64s'" - jpn "'%-.64s' column ¤Ï '%-.64s' ¤Ë¤Ï¤¢¤ê¤Þ¤»¤ó." - kor "Unknown Ä®·³ '%-.64s' in '%-.64s'" - nor "Ukjent kolonne '%-.64s' i tabell %-.64s" - norwegian-ny "Ukjent felt '%-.64s' i tabell %-.64s" - pol "Nieznana kolumna '%-.64s' w %-.64s" - por "Coluna '%-.64s' desconhecida em '%-.64s'" - rum "Coloana invalida '%-.64s' in '%-.64s'" - rus "îÅÉÚ×ÅÓÔÎÙÊ ÓÔÏÌÂÅà '%-.64s' × '%-.64s'" - serbian "Nepoznata kolona '%-.64s' u '%-.64s'" - slo "Neznáme pole '%-.64s' v '%-.64s'" - spa "La columna '%-.64s' en %-.64s es desconocida" - swe "Okänd kolumn '%-.64s' i %-.64s" - ukr "îÅצÄÏÍÉÊ ÓÔÏ×ÂÅÃØ '%-.64s' Õ '%-.64s'" + cze "Nezn-Bámý sloupec '%-.192s' v %-.192s" + dan "Ukendt kolonne '%-.192s' i tabel %-.192s" + nla "Onbekende kolom '%-.192s' in %-.192s" + eng "Unknown column '%-.192s' in '%-.192s'" + jps "'%-.192s' column ‚Í '%-.192s' ‚É‚Í‚ ‚è‚Ü‚¹‚ñ.", + est "Tundmatu tulp '%-.192s' '%-.192s'-s" + fre "Champ '%-.192s' inconnu dans %-.192s" + ger "Unbekanntes Tabellenfeld '%-.192s' in %-.192s" + greek "Áãíùóôï ðåäßï '%-.192s' óå '%-.192s'" + hun "A(z) '%-.192s' oszlop ervenytelen '%-.192s'-ben" + ita "Colonna sconosciuta '%-.192s' in '%-.192s'" + jpn "'%-.192s' column ¤Ï '%-.192s' ¤Ë¤Ï¤¢¤ê¤Þ¤»¤ó." + kor "Unknown Ä®·³ '%-.192s' in '%-.192s'" + nor "Ukjent kolonne '%-.192s' i tabell %-.192s" + norwegian-ny "Ukjent felt '%-.192s' i tabell %-.192s" + pol "Nieznana kolumna '%-.192s' w %-.192s" + por "Coluna '%-.192s' desconhecida em '%-.192s'" + rum "Coloana invalida '%-.192s' in '%-.192s'" + rus "îÅÉÚ×ÅÓÔÎÙÊ ÓÔÏÌÂÅà '%-.192s' × '%-.192s'" + serbian "Nepoznata kolona '%-.192s' u '%-.192s'" + slo "Neznáme pole '%-.192s' v '%-.192s'" + spa "La columna '%-.192s' en %-.192s es desconocida" + swe "Okänd kolumn '%-.192s' i %-.192s" + ukr "îÅצÄÏÍÉÊ ÓÔÏ×ÂÅÃØ '%-.192s' Õ '%-.192s'" ER_WRONG_FIELD_WITH_GROUP 42000 S1009 - cze "Pou-B¾ité '%-.64s' nebylo v group by" - dan "Brugte '%-.64s' som ikke var i group by" - nla "Opdracht gebruikt '%-.64s' dat niet in de GROUP BY voorkomt" - eng "'%-.64s' isn't in GROUP BY" - jps "'%-.64s' isn't in GROUP BY", - est "'%-.64s' puudub GROUP BY klauslis" - fre "'%-.64s' n'est pas dans 'group by'" - ger "'%-.64s' ist nicht in GROUP BY vorhanden" - greek "×ñçóéìïðïéÞèçêå '%-.64s' ðïõ äåí õðÞñ÷å óôï group by" - hun "Used '%-.64s' with wasn't in group by" - ita "Usato '%-.64s' che non e` nel GROUP BY" - kor "'%-.64s'Àº GROUP BY¼Ó¿¡ ¾øÀ½" - nor "Brukte '%-.64s' som ikke var i group by" - norwegian-ny "Brukte '%-.64s' som ikkje var i group by" - pol "U¿yto '%-.64s' bez umieszczenia w group by" - por "'%-.64s' não está em 'GROUP BY'" - rum "'%-.64s' nu exista in clauza GROUP BY" - rus "'%-.64s' ÎÅ ÐÒÉÓÕÔÓÔ×ÕÅÔ × GROUP BY" - serbian "Entitet '%-.64s' nije naveden u komandi 'GROUP BY'" - slo "Pou¾ité '%-.64s' nebolo v 'group by'" - spa "Usado '%-.64s' el cual no esta group by" - swe "'%-.64s' finns inte i GROUP BY" - ukr "'%-.64s' ÎÅ ¤ Õ GROUP BY" + cze "Pou-B¾ité '%-.192s' nebylo v group by" + dan "Brugte '%-.192s' som ikke var i group by" + nla "Opdracht gebruikt '%-.192s' dat niet in de GROUP BY voorkomt" + eng "'%-.192s' isn't in GROUP BY" + jps "'%-.192s' isn't in GROUP BY", + est "'%-.192s' puudub GROUP BY klauslis" + fre "'%-.192s' n'est pas dans 'group by'" + ger "'%-.192s' ist nicht in GROUP BY vorhanden" + greek "×ñçóéìïðïéÞèçêå '%-.192s' ðïõ äåí õðÞñ÷å óôï group by" + hun "Used '%-.192s' with wasn't in group by" + ita "Usato '%-.192s' che non e` nel GROUP BY" + kor "'%-.192s'Àº GROUP BY¼Ó¿¡ ¾øÀ½" + nor "Brukte '%-.192s' som ikke var i group by" + norwegian-ny "Brukte '%-.192s' som ikkje var i group by" + pol "U¿yto '%-.192s' bez umieszczenia w group by" + por "'%-.192s' não está em 'GROUP BY'" + rum "'%-.192s' nu exista in clauza GROUP BY" + rus "'%-.192s' ÎÅ ÐÒÉÓÕÔÓÔ×ÕÅÔ × GROUP BY" + serbian "Entitet '%-.192s' nije naveden u komandi 'GROUP BY'" + slo "Pou¾ité '%-.192s' nebolo v 'group by'" + spa "Usado '%-.192s' el cual no esta group by" + swe "'%-.192s' finns inte i GROUP BY" + ukr "'%-.192s' ÎÅ ¤ Õ GROUP BY" ER_WRONG_GROUP_FIELD 42000 S1009 - cze "Nemohu pou-B¾ít group na '%-.64s'" - dan "Kan ikke gruppere på '%-.64s'" - nla "Kan '%-.64s' niet groeperen" - eng "Can't group on '%-.64s'" - est "Ei saa grupeerida '%-.64s' järgi" - fre "Ne peut regrouper '%-.64s'" - ger "Gruppierung über '%-.64s' nicht möglich" - greek "Áäýíáôç ç ïìáäïðïßçóç (group on) '%-.64s'" - hun "A group nem hasznalhato: '%-.64s'" - ita "Impossibile raggruppare per '%-.64s'" - kor "'%-.64s'¸¦ ±×·ìÇÒ ¼ö ¾øÀ½" - nor "Kan ikke gruppere på '%-.64s'" - norwegian-ny "Kan ikkje gruppere på '%-.64s'" - pol "Nie mo¿na grupowaæ po '%-.64s'" - por "Não pode agrupar em '%-.64s'" - rum "Nu pot sa grupez pe (group on) '%-.64s'" - rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÉÚ×ÅÓÔÉ ÇÒÕÐÐÉÒÏ×ËÕ ÐÏ '%-.64s'" - serbian "Ne mogu da grupišem po '%-.64s'" - slo "Nemô¾em pou¾i» 'group' na '%-.64s'" - spa "No puedo agrupar por '%-.64s'" - swe "Kan inte använda GROUP BY med '%-.64s'" - ukr "îÅ ÍÏÖÕ ÇÒÕÐÕ×ÁÔÉ ÐÏ '%-.64s'" + cze "Nemohu pou-B¾ít group na '%-.192s'" + dan "Kan ikke gruppere på '%-.192s'" + nla "Kan '%-.192s' niet groeperen" + eng "Can't group on '%-.192s'" + est "Ei saa grupeerida '%-.192s' järgi" + fre "Ne peut regrouper '%-.192s'" + ger "Gruppierung über '%-.192s' nicht möglich" + greek "Áäýíáôç ç ïìáäïðïßçóç (group on) '%-.192s'" + hun "A group nem hasznalhato: '%-.192s'" + ita "Impossibile raggruppare per '%-.192s'" + kor "'%-.192s'¸¦ ±×·ìÇÒ ¼ö ¾øÀ½" + nor "Kan ikke gruppere på '%-.192s'" + norwegian-ny "Kan ikkje gruppere på '%-.192s'" + pol "Nie mo¿na grupowaæ po '%-.192s'" + por "Não pode agrupar em '%-.192s'" + rum "Nu pot sa grupez pe (group on) '%-.192s'" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÉÚ×ÅÓÔÉ ÇÒÕÐÐÉÒÏ×ËÕ ÐÏ '%-.192s'" + serbian "Ne mogu da grupišem po '%-.192s'" + slo "Nemô¾em pou¾i» 'group' na '%-.192s'" + spa "No puedo agrupar por '%-.192s'" + swe "Kan inte använda GROUP BY med '%-.192s'" + ukr "îÅ ÍÏÖÕ ÇÒÕÐÕ×ÁÔÉ ÐÏ '%-.192s'" ER_WRONG_SUM_SELECT 42000 S1009 cze "P-Bøíkaz obsahuje zároveò funkci sum a sloupce" dan "Udtrykket har summer (sum) funktioner og kolonner i samme udtryk" @@ -1429,103 +1429,103 @@ ER_TOO_LONG_IDENT 42000 S1009 swe "Kolumnnamn '%-.100s' är för långt" ukr "¶Í'Ñ ¦ÄÅÎÔÉƦËÁÔÏÒÁ '%-.100s' ÚÁÄÏ×ÇÅ" ER_DUP_FIELDNAME 42S21 S1009 - cze "Zdvojen-Bé jméno sloupce '%-.64s'" - dan "Feltnavnet '%-.64s' findes allerede" - nla "Dubbele kolom naam '%-.64s'" - eng "Duplicate column name '%-.64s'" - jps "'%-.64s' ‚Æ‚¢‚¤ column –¼‚Íd•¡‚µ‚Ä‚Ü‚·", - est "Kattuv tulba nimi '%-.64s'" - fre "Nom du champ '%-.64s' déjà utilisé" - ger "Doppelter Spaltenname: '%-.64s'" - greek "ÅðáíÜëçøç column name '%-.64s'" - hun "Duplikalt oszlopazonosito: '%-.64s'" - ita "Nome colonna duplicato '%-.64s'" - jpn "'%-.64s' ¤È¤¤¤¦ column ̾¤Ï½ÅÊ£¤·¤Æ¤Þ¤¹" - kor "Áߺ¹µÈ Ä®·³ À̸§: '%-.64s'" - nor "Feltnavnet '%-.64s' eksisterte fra før" - norwegian-ny "Feltnamnet '%-.64s' eksisterte frå før" - pol "Powtórzona nazwa kolumny '%-.64s'" - por "Nome da coluna '%-.64s' duplicado" - rum "Numele coloanei '%-.64s' e duplicat" - rus "äÕÂÌÉÒÕÀÝÅÅÓÑ ÉÍÑ ÓÔÏÌÂÃÁ '%-.64s'" - serbian "Duplirano ime kolone '%-.64s'" - slo "Opakované meno poµa '%-.64s'" - spa "Nombre de columna duplicado '%-.64s'" - swe "Kolumnnamn '%-.64s finns flera gånger" - ukr "äÕÂÌÀÀÞÅ ¦Í'Ñ ÓÔÏ×ÂÃÑ '%-.64s'" + cze "Zdvojen-Bé jméno sloupce '%-.192s'" + dan "Feltnavnet '%-.192s' findes allerede" + nla "Dubbele kolom naam '%-.192s'" + eng "Duplicate column name '%-.192s'" + jps "'%-.192s' ‚Æ‚¢‚¤ column –¼‚Íd•¡‚µ‚Ä‚Ü‚·", + est "Kattuv tulba nimi '%-.192s'" + fre "Nom du champ '%-.192s' déjà utilisé" + ger "Doppelter Spaltenname: '%-.192s'" + greek "ÅðáíÜëçøç column name '%-.192s'" + hun "Duplikalt oszlopazonosito: '%-.192s'" + ita "Nome colonna duplicato '%-.192s'" + jpn "'%-.192s' ¤È¤¤¤¦ column ̾¤Ï½ÅÊ£¤·¤Æ¤Þ¤¹" + kor "Áߺ¹µÈ Ä®·³ À̸§: '%-.192s'" + nor "Feltnavnet '%-.192s' eksisterte fra før" + norwegian-ny "Feltnamnet '%-.192s' eksisterte frå før" + pol "Powtórzona nazwa kolumny '%-.192s'" + por "Nome da coluna '%-.192s' duplicado" + rum "Numele coloanei '%-.192s' e duplicat" + rus "äÕÂÌÉÒÕÀÝÅÅÓÑ ÉÍÑ ÓÔÏÌÂÃÁ '%-.192s'" + serbian "Duplirano ime kolone '%-.192s'" + slo "Opakované meno poµa '%-.192s'" + spa "Nombre de columna duplicado '%-.192s'" + swe "Kolumnnamn '%-.192s finns flera gånger" + ukr "äÕÂÌÀÀÞÅ ¦Í'Ñ ÓÔÏ×ÂÃÑ '%-.192s'" ER_DUP_KEYNAME 42000 S1009 - cze "Zdvojen-Bé jméno klíèe '%-.64s'" - dan "Indeksnavnet '%-.64s' findes allerede" - nla "Dubbele zoeksleutel naam '%-.64s'" - eng "Duplicate key name '%-.64s'" - jps "'%-.64s' ‚Æ‚¢‚¤ key ‚Ì–¼‘O‚Íd•¡‚µ‚Ä‚¢‚Ü‚·", - est "Kattuv võtme nimi '%-.64s'" - fre "Nom de clef '%-.64s' déjà utilisé" - ger "Doppelter Name für Schlüssel vorhanden: '%-.64s'" - greek "ÅðáíÜëçøç key name '%-.64s'" - hun "Duplikalt kulcsazonosito: '%-.64s'" - ita "Nome chiave duplicato '%-.64s'" - jpn "'%-.64s' ¤È¤¤¤¦ key ¤Î̾Á°¤Ï½ÅÊ£¤·¤Æ¤¤¤Þ¤¹" - kor "Áߺ¹µÈ Å° À̸§ : '%-.64s'" - nor "Nøkkelnavnet '%-.64s' eksisterte fra før" - norwegian-ny "Nøkkelnamnet '%-.64s' eksisterte frå før" - pol "Powtórzony nazwa klucza '%-.64s'" - por "Nome da chave '%-.64s' duplicado" - rum "Numele cheiei '%-.64s' e duplicat" - rus "äÕÂÌÉÒÕÀÝÅÅÓÑ ÉÍÑ ËÌÀÞÁ '%-.64s'" - serbian "Duplirano ime kljuèa '%-.64s'" - slo "Opakované meno kµúèa '%-.64s'" - spa "Nombre de clave duplicado '%-.64s'" - swe "Nyckelnamn '%-.64s' finns flera gånger" - ukr "äÕÂÌÀÀÞÅ ¦Í'Ñ ËÌÀÞÁ '%-.64s'" + cze "Zdvojen-Bé jméno klíèe '%-.192s'" + dan "Indeksnavnet '%-.192s' findes allerede" + nla "Dubbele zoeksleutel naam '%-.192s'" + eng "Duplicate key name '%-.192s'" + jps "'%-.192s' ‚Æ‚¢‚¤ key ‚Ì–¼‘O‚Íd•¡‚µ‚Ä‚¢‚Ü‚·", + est "Kattuv võtme nimi '%-.192s'" + fre "Nom de clef '%-.192s' déjà utilisé" + ger "Doppelter Name für Schlüssel vorhanden: '%-.192s'" + greek "ÅðáíÜëçøç key name '%-.192s'" + hun "Duplikalt kulcsazonosito: '%-.192s'" + ita "Nome chiave duplicato '%-.192s'" + jpn "'%-.192s' ¤È¤¤¤¦ key ¤Î̾Á°¤Ï½ÅÊ£¤·¤Æ¤¤¤Þ¤¹" + kor "Áߺ¹µÈ Å° À̸§ : '%-.192s'" + nor "Nøkkelnavnet '%-.192s' eksisterte fra før" + norwegian-ny "Nøkkelnamnet '%-.192s' eksisterte frå før" + pol "Powtórzony nazwa klucza '%-.192s'" + por "Nome da chave '%-.192s' duplicado" + rum "Numele cheiei '%-.192s' e duplicat" + rus "äÕÂÌÉÒÕÀÝÅÅÓÑ ÉÍÑ ËÌÀÞÁ '%-.192s'" + serbian "Duplirano ime kljuèa '%-.192s'" + slo "Opakované meno kµúèa '%-.192s'" + spa "Nombre de clave duplicado '%-.192s'" + swe "Nyckelnamn '%-.192s' finns flera gånger" + ukr "äÕÂÌÀÀÞÅ ¦Í'Ñ ËÌÀÞÁ '%-.192s'" ER_DUP_ENTRY 23000 S1009 - cze "Zdvojen-Bý klíè '%-.64s' (èíslo klíèe %d)" - dan "Ens værdier '%-.64s' for indeks %d" - nla "Dubbele ingang '%-.64s' voor zoeksleutel %d" - eng "Duplicate entry '%-.64s' for key %d" - jps "'%-.64s' ‚Í key %d ‚É‚¨‚¢‚Äd•¡‚µ‚Ä‚¢‚Ü‚·", - est "Kattuv väärtus '%-.64s' võtmele %d" - fre "Duplicata du champ '%-.64s' pour la clef %d" - ger "Doppelter Eintrag '%-.64s' für Schlüssel %d" - greek "ÄéðëÞ åããñáöÞ '%-.64s' ãéá ôï êëåéäß %d" - hun "Duplikalt bejegyzes '%-.64s' a %d kulcs szerint." - ita "Valore duplicato '%-.64s' per la chiave %d" - jpn "'%-.64s' ¤Ï key %d ¤Ë¤ª¤¤¤Æ½ÅÊ£¤·¤Æ¤¤¤Þ¤¹" - kor "Áߺ¹µÈ ÀÔ·Â °ª '%-.64s': key %d" - nor "Like verdier '%-.64s' for nøkkel %d" - norwegian-ny "Like verdiar '%-.64s' for nykkel %d" - pol "Powtórzone wyst?pienie '%-.64s' dla klucza %d" - por "Entrada '%-.64s' duplicada para a chave %d" - rum "Cimpul '%-.64s' e duplicat pentru cheia %d" - rus "äÕÂÌÉÒÕÀÝÁÑÓÑ ÚÁÐÉÓØ '%-.64s' ÐÏ ËÌÀÞÕ %d" - serbian "Dupliran unos '%-.64s' za kljuè '%d'" - slo "Opakovaný kµúè '%-.64s' (èíslo kµúèa %d)" - spa "Entrada duplicada '%-.64s' para la clave %d" - swe "Dubbel nyckel '%-.64s' för nyckel %d" - ukr "äÕÂÌÀÀÞÉÊ ÚÁÐÉÓ '%-.64s' ÄÌÑ ËÌÀÞÁ %d" + cze "Zdvojen-Bý klíè '%-.192s' (èíslo klíèe %d)" + dan "Ens værdier '%-.192s' for indeks %d" + nla "Dubbele ingang '%-.192s' voor zoeksleutel %d" + eng "Duplicate entry '%-.192s' for key %d" + jps "'%-.192s' ‚Í key %d ‚É‚¨‚¢‚Äd•¡‚µ‚Ä‚¢‚Ü‚·", + est "Kattuv väärtus '%-.192s' võtmele %d" + fre "Duplicata du champ '%-.192s' pour la clef %d" + ger "Doppelter Eintrag '%-.192s' für Schlüssel %d" + greek "ÄéðëÞ åããñáöÞ '%-.192s' ãéá ôï êëåéäß %d" + hun "Duplikalt bejegyzes '%-.192s' a %d kulcs szerint." + ita "Valore duplicato '%-.192s' per la chiave %d" + jpn "'%-.192s' ¤Ï key %d ¤Ë¤ª¤¤¤Æ½ÅÊ£¤·¤Æ¤¤¤Þ¤¹" + kor "Áߺ¹µÈ ÀÔ·Â °ª '%-.192s': key %d" + nor "Like verdier '%-.192s' for nøkkel %d" + norwegian-ny "Like verdiar '%-.192s' for nykkel %d" + pol "Powtórzone wyst?pienie '%-.192s' dla klucza %d" + por "Entrada '%-.192s' duplicada para a chave %d" + rum "Cimpul '%-.192s' e duplicat pentru cheia %d" + rus "äÕÂÌÉÒÕÀÝÁÑÓÑ ÚÁÐÉÓØ '%-.192s' ÐÏ ËÌÀÞÕ %d" + serbian "Dupliran unos '%-.192s' za kljuè '%d'" + slo "Opakovaný kµúè '%-.192s' (èíslo kµúèa %d)" + spa "Entrada duplicada '%-.192s' para la clave %d" + swe "Dubbel nyckel '%-.192s' för nyckel %d" + ukr "äÕÂÌÀÀÞÉÊ ÚÁÐÉÓ '%-.192s' ÄÌÑ ËÌÀÞÁ %d" ER_WRONG_FIELD_SPEC 42000 S1009 - cze "Chybn-Bá specifikace sloupce '%-.64s'" - dan "Forkert kolonnespecifikaton for felt '%-.64s'" - nla "Verkeerde kolom specificatie voor kolom '%-.64s'" - eng "Incorrect column specifier for column '%-.64s'" - est "Vigane tulba kirjeldus tulbale '%-.64s'" - fre "Mauvais paramètre de champ pour le champ '%-.64s'" - ger "Falsche Spezifikation für Feld '%-.64s'" - greek "ÅóöáëìÝíï column specifier ãéá ôï ðåäßï '%-.64s'" - hun "Rossz oszlopazonosito: '%-.64s'" - ita "Specifica errata per la colonna '%-.64s'" - kor "Ä®·³ '%-.64s'ÀÇ ºÎÁ¤È®ÇÑ Ä®·³ Á¤ÀÇÀÚ" - nor "Feil kolonne spesifikator for felt '%-.64s'" - norwegian-ny "Feil kolonne spesifikator for kolonne '%-.64s'" - pol "B³êdna specyfikacja kolumny dla kolumny '%-.64s'" - por "Especificador de coluna incorreto para a coluna '%-.64s'" - rum "Specificandul coloanei '%-.64s' este incorect" - rus "îÅËÏÒÒÅËÔÎÙÊ ÏÐÒÅÄÅÌÉÔÅÌØ ÓÔÏÌÂÃÁ ÄÌÑ ÓÔÏÌÂÃÁ '%-.64s'" - serbian "Pogrešan naziv kolone za kolonu '%-.64s'" - slo "Chyba v ¹pecifikácii poµa '%-.64s'" - spa "Especificador de columna erroneo para la columna '%-.64s'" - swe "Felaktigt kolumntyp för kolumn '%-.64s'" - ukr "îÅצÒÎÉÊ ÓÐÅÃÉƦËÁÔÏÒ ÓÔÏ×ÂÃÑ '%-.64s'" + cze "Chybn-Bá specifikace sloupce '%-.192s'" + dan "Forkert kolonnespecifikaton for felt '%-.192s'" + nla "Verkeerde kolom specificatie voor kolom '%-.192s'" + eng "Incorrect column specifier for column '%-.192s'" + est "Vigane tulba kirjeldus tulbale '%-.192s'" + fre "Mauvais paramètre de champ pour le champ '%-.192s'" + ger "Falsche Spezifikation für Feld '%-.192s'" + greek "ÅóöáëìÝíï column specifier ãéá ôï ðåäßï '%-.192s'" + hun "Rossz oszlopazonosito: '%-.192s'" + ita "Specifica errata per la colonna '%-.192s'" + kor "Ä®·³ '%-.192s'ÀÇ ºÎÁ¤È®ÇÑ Ä®·³ Á¤ÀÇÀÚ" + nor "Feil kolonne spesifikator for felt '%-.192s'" + norwegian-ny "Feil kolonne spesifikator for kolonne '%-.192s'" + pol "B³êdna specyfikacja kolumny dla kolumny '%-.192s'" + por "Especificador de coluna incorreto para a coluna '%-.192s'" + rum "Specificandul coloanei '%-.192s' este incorect" + rus "îÅËÏÒÒÅËÔÎÙÊ ÏÐÒÅÄÅÌÉÔÅÌØ ÓÔÏÌÂÃÁ ÄÌÑ ÓÔÏÌÂÃÁ '%-.192s'" + serbian "Pogrešan naziv kolone za kolonu '%-.192s'" + slo "Chyba v ¹pecifikácii poµa '%-.192s'" + spa "Especificador de columna erroneo para la columna '%-.192s'" + swe "Felaktigt kolumntyp för kolumn '%-.192s'" + ukr "îÅצÒÎÉÊ ÓÐÅÃÉƦËÁÔÏÒ ÓÔÏ×ÂÃÑ '%-.192s'" ER_PARSE_ERROR 42000 s1009 cze "%s bl-Bízko '%-.80s' na øádku %d" dan "%s nær '%-.80s' på linje %d" @@ -1577,53 +1577,53 @@ ER_EMPTY_QUERY 42000 swe "Frågan var tom" ukr "ðÕÓÔÉÊ ÚÁÐÉÔ" ER_NONUNIQ_TABLE 42000 S1009 - cze "Nejednozna-Bèná tabulka/alias: '%-.64s'" - dan "Tabellen/aliaset: '%-.64s' er ikke unikt" - nla "Niet unieke waarde tabel/alias: '%-.64s'" - eng "Not unique table/alias: '%-.64s'" - jps "'%-.64s' ‚͈êˆÓ‚Ì table/alias –¼‚Å‚Í‚ ‚è‚Ü‚¹‚ñ", - est "Ei ole unikaalne tabel/alias '%-.64s'" - fre "Table/alias: '%-.64s' non unique" - ger "Tabellenname/Alias '%-.64s' nicht eindeutig" - greek "Áäýíáôç ç áíåýñåóç unique table/alias: '%-.64s'" - hun "Nem egyedi tabla/alias: '%-.64s'" - ita "Tabella/alias non unico: '%-.64s'" - jpn "'%-.64s' ¤Ï°ì°Õ¤Î table/alias ̾¤Ç¤Ï¤¢¤ê¤Þ¤»¤ó" - kor "Unique ÇÏÁö ¾ÊÀº Å×À̺í/alias: '%-.64s'" - nor "Ikke unikt tabell/alias: '%-.64s'" - norwegian-ny "Ikkje unikt tabell/alias: '%-.64s'" - pol "Tabela/alias nie s? unikalne: '%-.64s'" - por "Tabela/alias '%-.64s' não única" - rum "Tabela/alias: '%-.64s' nu este unic" - rus "ðÏ×ÔÏÒÑÀÝÁÑÓÑ ÔÁÂÌÉÃÁ/ÐÓÅ×ÄÏÎÉÍ '%-.64s'" - serbian "Tabela ili alias nisu bili jedinstveni: '%-.64s'" - slo "Nie jednoznaèná tabuµka/alias: '%-.64s'" - spa "Tabla/alias: '%-.64s' es no unica" - swe "Icke unikt tabell/alias: '%-.64s'" - ukr "îÅÕΦËÁÌØÎÁ ÔÁÂÌÉÃÑ/ÐÓÅ×ÄÏΦÍ: '%-.64s'" + cze "Nejednozna-Bèná tabulka/alias: '%-.192s'" + dan "Tabellen/aliaset: '%-.192s' er ikke unikt" + nla "Niet unieke waarde tabel/alias: '%-.192s'" + eng "Not unique table/alias: '%-.192s'" + jps "'%-.192s' ‚͈êˆÓ‚Ì table/alias –¼‚Å‚Í‚ ‚è‚Ü‚¹‚ñ", + est "Ei ole unikaalne tabel/alias '%-.192s'" + fre "Table/alias: '%-.192s' non unique" + ger "Tabellenname/Alias '%-.192s' nicht eindeutig" + greek "Áäýíáôç ç áíåýñåóç unique table/alias: '%-.192s'" + hun "Nem egyedi tabla/alias: '%-.192s'" + ita "Tabella/alias non unico: '%-.192s'" + jpn "'%-.192s' ¤Ï°ì°Õ¤Î table/alias ̾¤Ç¤Ï¤¢¤ê¤Þ¤»¤ó" + kor "Unique ÇÏÁö ¾ÊÀº Å×À̺í/alias: '%-.192s'" + nor "Ikke unikt tabell/alias: '%-.192s'" + norwegian-ny "Ikkje unikt tabell/alias: '%-.192s'" + pol "Tabela/alias nie s? unikalne: '%-.192s'" + por "Tabela/alias '%-.192s' não única" + rum "Tabela/alias: '%-.192s' nu este unic" + rus "ðÏ×ÔÏÒÑÀÝÁÑÓÑ ÔÁÂÌÉÃÁ/ÐÓÅ×ÄÏÎÉÍ '%-.192s'" + serbian "Tabela ili alias nisu bili jedinstveni: '%-.192s'" + slo "Nie jednoznaèná tabuµka/alias: '%-.192s'" + spa "Tabla/alias: '%-.192s' es no unica" + swe "Icke unikt tabell/alias: '%-.192s'" + ukr "îÅÕΦËÁÌØÎÁ ÔÁÂÌÉÃÑ/ÐÓÅ×ÄÏΦÍ: '%-.192s'" ER_INVALID_DEFAULT 42000 S1009 - cze "Chybn-Bá defaultní hodnota pro '%-.64s'" - dan "Ugyldig standardværdi for '%-.64s'" - nla "Foutieve standaard waarde voor '%-.64s'" - eng "Invalid default value for '%-.64s'" - est "Vigane vaikeväärtus '%-.64s' jaoks" - fre "Valeur par défaut invalide pour '%-.64s'" - ger "Fehlerhafter Vorgabewert (DEFAULT) für '%-.64s'" - greek "ÅóöáëìÝíç ðñïêáèïñéóìÝíç ôéìÞ (default value) ãéá '%-.64s'" - hun "Ervenytelen ertek: '%-.64s'" - ita "Valore di default non valido per '%-.64s'" - kor "'%-.64s'ÀÇ À¯È¿ÇÏÁö ¸øÇÑ µðÆúÆ® °ªÀ» »ç¿ëÇϼ̽À´Ï´Ù." - nor "Ugyldig standardverdi for '%-.64s'" - norwegian-ny "Ugyldig standardverdi for '%-.64s'" - pol "Niew³a?ciwa warto?æ domy?lna dla '%-.64s'" - por "Valor padrão (default) inválido para '%-.64s'" - rum "Valoarea de default este invalida pentru '%-.64s'" - rus "îÅËÏÒÒÅËÔÎÏÅ ÚÎÁÞÅÎÉÅ ÐÏ ÕÍÏÌÞÁÎÉÀ ÄÌÑ '%-.64s'" - serbian "Loša default vrednost za '%-.64s'" - slo "Chybná implicitná hodnota pre '%-.64s'" - spa "Valor por defecto invalido para '%-.64s'" - swe "Ogiltigt DEFAULT värde för '%-.64s'" - ukr "îÅצÒÎÅ ÚÎÁÞÅÎÎÑ ÐÏ ÚÁÍÏ×ÞÕ×ÁÎÎÀ ÄÌÑ '%-.64s'" + cze "Chybn-Bá defaultní hodnota pro '%-.192s'" + dan "Ugyldig standardværdi for '%-.192s'" + nla "Foutieve standaard waarde voor '%-.192s'" + eng "Invalid default value for '%-.192s'" + est "Vigane vaikeväärtus '%-.192s' jaoks" + fre "Valeur par défaut invalide pour '%-.192s'" + ger "Fehlerhafter Vorgabewert (DEFAULT) für '%-.192s'" + greek "ÅóöáëìÝíç ðñïêáèïñéóìÝíç ôéìÞ (default value) ãéá '%-.192s'" + hun "Ervenytelen ertek: '%-.192s'" + ita "Valore di default non valido per '%-.192s'" + kor "'%-.192s'ÀÇ À¯È¿ÇÏÁö ¸øÇÑ µðÆúÆ® °ªÀ» »ç¿ëÇϼ̽À´Ï´Ù." + nor "Ugyldig standardverdi for '%-.192s'" + norwegian-ny "Ugyldig standardverdi for '%-.192s'" + pol "Niew³a?ciwa warto?æ domy?lna dla '%-.192s'" + por "Valor padrão (default) inválido para '%-.192s'" + rum "Valoarea de default este invalida pentru '%-.192s'" + rus "îÅËÏÒÒÅËÔÎÏÅ ÚÎÁÞÅÎÉÅ ÐÏ ÕÍÏÌÞÁÎÉÀ ÄÌÑ '%-.192s'" + serbian "Loša default vrednost za '%-.192s'" + slo "Chybná implicitná hodnota pre '%-.192s'" + spa "Valor por defecto invalido para '%-.192s'" + swe "Ogiltigt DEFAULT värde för '%-.192s'" + ukr "îÅצÒÎÅ ÚÎÁÞÅÎÎÑ ÐÏ ÚÁÍÏ×ÞÕ×ÁÎÎÀ ÄÌÑ '%-.192s'" ER_MULTIPLE_PRI_KEY 42000 S1009 cze "Definov-Báno více primárních klíèù" dan "Flere primærnøgler specificeret" @@ -1723,78 +1723,78 @@ ER_TOO_LONG_KEY 42000 S1009 swe "För lång nyckel. Högsta tillåtna nyckellängd är %d" ukr "úÁÚÎÁÞÅÎÉÊ ËÌÀÞ ÚÁÄÏ×ÇÉÊ. îÁʦÌØÛÁ ÄÏ×ÖÉÎÁ ËÌÀÞÁ %d ÂÁÊÔ¦×" ER_KEY_COLUMN_DOES_NOT_EXITS 42000 S1009 - cze "Kl-Bíèový sloupec '%-.64s' v tabulce neexistuje" - dan "Nøglefeltet '%-.64s' eksisterer ikke i tabellen" - nla "Zoeksleutel kolom '%-.64s' bestaat niet in tabel" - eng "Key column '%-.64s' doesn't exist in table" - jps "Key column '%-.64s' ‚ªƒe[ƒuƒ‹‚É‚ ‚è‚Ü‚¹‚ñ.", - est "Võtme tulp '%-.64s' puudub tabelis" - fre "La clé '%-.64s' n'existe pas dans la table" - ger "In der Tabelle gibt es kein Schlüsselfeld '%-.64s'" - greek "Ôï ðåäßï êëåéäß '%-.64s' äåí õðÜñ÷åé óôïí ðßíáêá" - hun "A(z) '%-.64s'kulcsoszlop nem letezik a tablaban" - ita "La colonna chiave '%-.64s' non esiste nella tabella" - jpn "Key column '%-.64s' ¤¬¥Æ¡¼¥Ö¥ë¤Ë¤¢¤ê¤Þ¤»¤ó." - kor "Key Ä®·³ '%-.64s'´Â Å×ÀÌºí¿¡ Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù." - nor "Nøkkel felt '%-.64s' eksiterer ikke i tabellen" - norwegian-ny "Nykkel kolonne '%-.64s' eksiterar ikkje i tabellen" - pol "Kolumna '%-.64s' zdefiniowana w kluczu nie istnieje w tabeli" - por "Coluna chave '%-.64s' não existe na tabela" - rum "Coloana cheie '%-.64s' nu exista in tabela" - rus "ëÌÀÞÅ×ÏÊ ÓÔÏÌÂÅà '%-.64s' × ÔÁÂÌÉÃÅ ÎÅ ÓÕÝÅÓÔ×ÕÅÔ" - serbian "Kljuèna kolona '%-.64s' ne postoji u tabeli" - slo "Kµúèový ståpec '%-.64s' v tabuµke neexistuje" - spa "La columna clave '%-.64s' no existe en la tabla" - swe "Nyckelkolumn '%-.64s' finns inte" - ukr "ëÌÀÞÏ×ÉÊ ÓÔÏ×ÂÅÃØ '%-.64s' ÎÅ ¦ÓÎÕ¤ Õ ÔÁÂÌÉæ" + cze "Kl-Bíèový sloupec '%-.192s' v tabulce neexistuje" + dan "Nøglefeltet '%-.192s' eksisterer ikke i tabellen" + nla "Zoeksleutel kolom '%-.192s' bestaat niet in tabel" + eng "Key column '%-.192s' doesn't exist in table" + jps "Key column '%-.192s' ‚ªƒe[ƒuƒ‹‚É‚ ‚è‚Ü‚¹‚ñ.", + est "Võtme tulp '%-.192s' puudub tabelis" + fre "La clé '%-.192s' n'existe pas dans la table" + ger "In der Tabelle gibt es kein Schlüsselfeld '%-.192s'" + greek "Ôï ðåäßï êëåéäß '%-.192s' äåí õðÜñ÷åé óôïí ðßíáêá" + hun "A(z) '%-.192s'kulcsoszlop nem letezik a tablaban" + ita "La colonna chiave '%-.192s' non esiste nella tabella" + jpn "Key column '%-.192s' ¤¬¥Æ¡¼¥Ö¥ë¤Ë¤¢¤ê¤Þ¤»¤ó." + kor "Key Ä®·³ '%-.192s'´Â Å×ÀÌºí¿¡ Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù." + nor "Nøkkel felt '%-.192s' eksiterer ikke i tabellen" + norwegian-ny "Nykkel kolonne '%-.192s' eksiterar ikkje i tabellen" + pol "Kolumna '%-.192s' zdefiniowana w kluczu nie istnieje w tabeli" + por "Coluna chave '%-.192s' não existe na tabela" + rum "Coloana cheie '%-.192s' nu exista in tabela" + rus "ëÌÀÞÅ×ÏÊ ÓÔÏÌÂÅà '%-.192s' × ÔÁÂÌÉÃÅ ÎÅ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "Kljuèna kolona '%-.192s' ne postoji u tabeli" + slo "Kµúèový ståpec '%-.192s' v tabuµke neexistuje" + spa "La columna clave '%-.192s' no existe en la tabla" + swe "Nyckelkolumn '%-.192s' finns inte" + ukr "ëÌÀÞÏ×ÉÊ ÓÔÏ×ÂÅÃØ '%-.192s' ÎÅ ¦ÓÎÕ¤ Õ ÔÁÂÌÉæ" ER_BLOB_USED_AS_KEY 42000 S1009 - cze "Blob sloupec '%-.64s' nem-Bù¾e být pou¾it jako klíè" - dan "BLOB feltet '%-.64s' kan ikke bruges ved specifikation af indeks" - nla "BLOB kolom '%-.64s' kan niet gebruikt worden bij zoeksleutel specificatie" - eng "BLOB column '%-.64s' can't be used in key specification with the used table type" - est "BLOB-tüüpi tulpa '%-.64s' ei saa kasutada võtmena" - fre "Champ BLOB '%-.64s' ne peut être utilisé dans une clé" - ger "BLOB-Feld '%-.64s' kann beim verwendeten Tabellentyp nicht als Schlüssel verwendet werden" - greek "Ðåäßï ôýðïõ Blob '%-.64s' äåí ìðïñåß íá ÷ñçóéìïðïéçèåß óôïí ïñéóìü åíüò êëåéäéïý (key specification)" - hun "Blob objektum '%-.64s' nem hasznalhato kulcskent" - ita "La colonna BLOB '%-.64s' non puo` essere usata nella specifica della chiave" - kor "BLOB Ä®·³ '%-.64s'´Â Å° Á¤ÀÇ¿¡¼ »ç¿ëµÉ ¼ö ¾ø½À´Ï´Ù." - nor "Blob felt '%-.64s' kan ikke brukes ved spesifikasjon av nøkler" - norwegian-ny "Blob kolonne '%-.64s' kan ikkje brukast ved spesifikasjon av nyklar" - pol "Kolumna typu Blob '%-.64s' nie mo¿e byæ u¿yta w specyfikacji klucza" - por "Coluna BLOB '%-.64s' não pode ser utilizada na especificação de chave para o tipo de tabela usado" - rum "Coloana de tip BLOB '%-.64s' nu poate fi folosita in specificarea cheii cu tipul de tabla folosit" - rus "óÔÏÌÂÅà ÔÉÐÁ BLOB '%-.64s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÓÐÏÌØÚÏ×ÁÎ ËÁË ÚÎÁÞÅÎÉÅ ËÌÀÞÁ × ÔÁÂÌÉÃÅ ÔÁËÏÇÏ ÔÉÐÁ" - serbian "BLOB kolona '%-.64s' ne može biti upotrebljena za navoðenje kljuèa sa tipom tabele koji se trenutno koristi" - slo "Blob pole '%-.64s' nemô¾e by» pou¾ité ako kµúè" - spa "La columna Blob '%-.64s' no puede ser usada en una declaracion de clave" - swe "En BLOB '%-.64s' kan inte vara nyckel med den använda tabelltypen" - ukr "BLOB ÓÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÉÊ Õ ×ÉÚÎÁÞÅÎΦ ËÌÀÞÁ × ÃØÏÍÕ ÔÉЦ ÔÁÂÌÉæ" + cze "Blob sloupec '%-.192s' nem-Bù¾e být pou¾it jako klíè" + dan "BLOB feltet '%-.192s' kan ikke bruges ved specifikation af indeks" + nla "BLOB kolom '%-.192s' kan niet gebruikt worden bij zoeksleutel specificatie" + eng "BLOB column '%-.192s' can't be used in key specification with the used table type" + est "BLOB-tüüpi tulpa '%-.192s' ei saa kasutada võtmena" + fre "Champ BLOB '%-.192s' ne peut être utilisé dans une clé" + ger "BLOB-Feld '%-.192s' kann beim verwendeten Tabellentyp nicht als Schlüssel verwendet werden" + greek "Ðåäßï ôýðïõ Blob '%-.192s' äåí ìðïñåß íá ÷ñçóéìïðïéçèåß óôïí ïñéóìü åíüò êëåéäéïý (key specification)" + hun "Blob objektum '%-.192s' nem hasznalhato kulcskent" + ita "La colonna BLOB '%-.192s' non puo` essere usata nella specifica della chiave" + kor "BLOB Ä®·³ '%-.192s'´Â Å° Á¤ÀÇ¿¡¼ »ç¿ëµÉ ¼ö ¾ø½À´Ï´Ù." + nor "Blob felt '%-.192s' kan ikke brukes ved spesifikasjon av nøkler" + norwegian-ny "Blob kolonne '%-.192s' kan ikkje brukast ved spesifikasjon av nyklar" + pol "Kolumna typu Blob '%-.192s' nie mo¿e byæ u¿yta w specyfikacji klucza" + por "Coluna BLOB '%-.192s' não pode ser utilizada na especificação de chave para o tipo de tabela usado" + rum "Coloana de tip BLOB '%-.192s' nu poate fi folosita in specificarea cheii cu tipul de tabla folosit" + rus "óÔÏÌÂÅà ÔÉÐÁ BLOB '%-.192s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÓÐÏÌØÚÏ×ÁÎ ËÁË ÚÎÁÞÅÎÉÅ ËÌÀÞÁ × ÔÁÂÌÉÃÅ ÔÁËÏÇÏ ÔÉÐÁ" + serbian "BLOB kolona '%-.192s' ne može biti upotrebljena za navoðenje kljuèa sa tipom tabele koji se trenutno koristi" + slo "Blob pole '%-.192s' nemô¾e by» pou¾ité ako kµúè" + spa "La columna Blob '%-.192s' no puede ser usada en una declaracion de clave" + swe "En BLOB '%-.192s' kan inte vara nyckel med den använda tabelltypen" + ukr "BLOB ÓÔÏ×ÂÅÃØ '%-.192s' ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÉÊ Õ ×ÉÚÎÁÞÅÎΦ ËÌÀÞÁ × ÃØÏÍÕ ÔÉЦ ÔÁÂÌÉæ" ER_TOO_BIG_FIELDLENGTH 42000 S1009 - cze "P-Bøíli¹ velká délka sloupce '%-.64s' (nejvíce %d). Pou¾ijte BLOB" - dan "For stor feltlængde for kolonne '%-.64s' (maks = %d). Brug BLOB i stedet" - nla "Te grote kolomlengte voor '%-.64s' (max = %d). Maak hiervoor gebruik van het type BLOB" - eng "Column length too big for column '%-.64s' (max = %d); use BLOB or TEXT instead" - jps "column '%-.64s' ‚Í,Šm•Û‚·‚é column ‚Ì‘å‚«‚³‚ª‘½‚·‚¬‚Ü‚·. (Å‘å %d ‚Ü‚Å). BLOB ‚ð‚©‚í‚è‚ÉŽg—p‚µ‚Ä‚‚¾‚³‚¢.", - est "Tulba '%-.64s' pikkus on liiga pikk (maksimaalne pikkus: %d). Kasuta BLOB väljatüüpi" - fre "Champ '%-.64s' trop long (max = %d). Utilisez un BLOB" - ger "Feldlänge für Feld '%-.64s' zu groß (maximal %d). BLOB- oder TEXT-Spaltentyp verwenden!" - greek "Ðïëý ìåãÜëï ìÞêïò ãéá ôï ðåäßï '%-.64s' (max = %d). Ðáñáêáëþ ÷ñçóéìïðïéåßóôå ôïí ôýðï BLOB" - hun "A(z) '%-.64s' oszlop tul hosszu. (maximum = %d). Hasznaljon BLOB tipust inkabb." - ita "La colonna '%-.64s' e` troppo grande (max=%d). Utilizza un BLOB." - jpn "column '%-.64s' ¤Ï,³ÎÊݤ¹¤ë column ¤ÎÂ礤µ¤¬Â¿¤¹¤®¤Þ¤¹. (ºÇÂç %d ¤Þ¤Ç). BLOB ¤ò¤«¤ï¤ê¤Ë»ÈÍѤ·¤Æ¤¯¤À¤µ¤¤." - kor "Ä®·³ '%-.64s'ÀÇ Ä®·³ ±æÀÌ°¡ ³Ê¹« ±é´Ï´Ù (ÃÖ´ë = %d). ´ë½Å¿¡ BLOB¸¦ »ç¿ëÇϼ¼¿ä." - nor "For stor nøkkellengde for kolonne '%-.64s' (maks = %d). Bruk BLOB istedenfor" - norwegian-ny "For stor nykkellengde for felt '%-.64s' (maks = %d). Bruk BLOB istadenfor" - pol "Zbyt du¿a d³ugo?æ kolumny '%-.64s' (maks. = %d). W zamian u¿yj typu BLOB" - por "Comprimento da coluna '%-.64s' grande demais (max = %d); use BLOB em seu lugar" - rum "Lungimea coloanei '%-.64s' este prea lunga (maximum = %d). Foloseste BLOB mai bine" - rus "óÌÉÛËÏÍ ÂÏÌØÛÁÑ ÄÌÉÎÁ ÓÔÏÌÂÃÁ '%-.64s' (ÍÁËÓÉÍÕÍ = %d). éÓÐÏÌØÚÕÊÔÅ ÔÉÐ BLOB ÉÌÉ TEXT ×ÍÅÓÔÏ ÔÅËÕÝÅÇÏ" - serbian "Previše podataka za kolonu '%-.64s' (maksimum je %d). Upotrebite BLOB polje" - slo "Príli¹ veµká då¾ka pre pole '%-.64s' (maximum = %d). Pou¾ite BLOB" - spa "Longitud de columna demasiado grande para la columna '%-.64s' (maximo = %d).Usar BLOB en su lugar" - swe "För stor kolumnlängd angiven för '%-.64s' (max= %d). Använd en BLOB instället" - ukr "úÁÄÏ×ÇÁ ÄÏ×ÖÉÎÁ ÓÔÏ×ÂÃÑ '%-.64s' (max = %d). ÷ÉËÏÒÉÓÔÁÊÔÅ ÔÉÐ BLOB" + cze "P-Bøíli¹ velká délka sloupce '%-.192s' (nejvíce %d). Pou¾ijte BLOB" + dan "For stor feltlængde for kolonne '%-.192s' (maks = %d). Brug BLOB i stedet" + nla "Te grote kolomlengte voor '%-.192s' (max = %d). Maak hiervoor gebruik van het type BLOB" + eng "Column length too big for column '%-.192s' (max = %d); use BLOB or TEXT instead" + jps "column '%-.192s' ‚Í,Šm•Û‚·‚é column ‚Ì‘å‚«‚³‚ª‘½‚·‚¬‚Ü‚·. (Å‘å %d ‚Ü‚Å). BLOB ‚ð‚©‚í‚è‚ÉŽg—p‚µ‚Ä‚‚¾‚³‚¢.", + est "Tulba '%-.192s' pikkus on liiga pikk (maksimaalne pikkus: %d). Kasuta BLOB väljatüüpi" + fre "Champ '%-.192s' trop long (max = %d). Utilisez un BLOB" + ger "Feldlänge für Feld '%-.192s' zu groß (maximal %d). BLOB- oder TEXT-Spaltentyp verwenden!" + greek "Ðïëý ìåãÜëï ìÞêïò ãéá ôï ðåäßï '%-.192s' (max = %d). Ðáñáêáëþ ÷ñçóéìïðïéåßóôå ôïí ôýðï BLOB" + hun "A(z) '%-.192s' oszlop tul hosszu. (maximum = %d). Hasznaljon BLOB tipust inkabb." + ita "La colonna '%-.192s' e` troppo grande (max=%d). Utilizza un BLOB." + jpn "column '%-.192s' ¤Ï,³ÎÊݤ¹¤ë column ¤ÎÂ礤µ¤¬Â¿¤¹¤®¤Þ¤¹. (ºÇÂç %d ¤Þ¤Ç). BLOB ¤ò¤«¤ï¤ê¤Ë»ÈÍѤ·¤Æ¤¯¤À¤µ¤¤." + kor "Ä®·³ '%-.192s'ÀÇ Ä®·³ ±æÀÌ°¡ ³Ê¹« ±é´Ï´Ù (ÃÖ´ë = %d). ´ë½Å¿¡ BLOB¸¦ »ç¿ëÇϼ¼¿ä." + nor "For stor nøkkellengde for kolonne '%-.192s' (maks = %d). Bruk BLOB istedenfor" + norwegian-ny "For stor nykkellengde for felt '%-.192s' (maks = %d). Bruk BLOB istadenfor" + pol "Zbyt du¿a d³ugo?æ kolumny '%-.192s' (maks. = %d). W zamian u¿yj typu BLOB" + por "Comprimento da coluna '%-.192s' grande demais (max = %d); use BLOB em seu lugar" + rum "Lungimea coloanei '%-.192s' este prea lunga (maximum = %d). Foloseste BLOB mai bine" + rus "óÌÉÛËÏÍ ÂÏÌØÛÁÑ ÄÌÉÎÁ ÓÔÏÌÂÃÁ '%-.192s' (ÍÁËÓÉÍÕÍ = %d). éÓÐÏÌØÚÕÊÔÅ ÔÉÐ BLOB ÉÌÉ TEXT ×ÍÅÓÔÏ ÔÅËÕÝÅÇÏ" + serbian "Previše podataka za kolonu '%-.192s' (maksimum je %d). Upotrebite BLOB polje" + slo "Príli¹ veµká då¾ka pre pole '%-.192s' (maximum = %d). Pou¾ite BLOB" + spa "Longitud de columna demasiado grande para la columna '%-.192s' (maximo = %d).Usar BLOB en su lugar" + swe "För stor kolumnlängd angiven för '%-.192s' (max= %d). Använd en BLOB instället" + ukr "úÁÄÏ×ÇÁ ÄÏ×ÖÉÎÁ ÓÔÏ×ÂÃÑ '%-.192s' (max = %d). ÷ÉËÏÒÉÓÔÁÊÔÅ ÔÉÐ BLOB" ER_WRONG_AUTO_KEY 42000 S1009 cze "M-Bù¾ete mít pouze jedno AUTO pole a to musí být definováno jako klíè" dan "Der kan kun specificeres eet AUTO_INCREMENT-felt, og det skal være indekseret" @@ -1919,30 +1919,30 @@ ER_SHUTDOWN_COMPLETE swe "%s: Avslutning klar\n" ukr "%s: òÏÂÏÔÕ ÚÁ×ÅÒÛÅÎÏ\n" ER_FORCING_CLOSE 08S01 - cze "%s: n-Básilné uzavøení threadu %ld u¾ivatele '%-.32s'\n" - dan "%s: Forceret nedlukning af tråd: %ld bruger: '%-.32s'\n" - nla "%s: Afsluiten afgedwongen van thread %ld gebruiker: '%-.32s'\n" - eng "%s: Forcing close of thread %ld user: '%-.32s'\n" - jps "%s: ƒXƒŒƒbƒh %ld ‹§I—¹ user: '%-.32s'\n", - est "%s: Sulgen jõuga lõime %ld kasutaja: '%-.32s'\n" - fre "%s: Arrêt forcé de la tâche (thread) %ld utilisateur: '%-.32s'\n" - ger "%s: Thread %ld zwangsweise beendet. Benutzer: '%-.32s'\n" - greek "%s: Ôï thread èá êëåßóåé %ld user: '%-.32s'\n" - hun "%s: A(z) %ld thread kenyszeritett zarasa. Felhasznalo: '%-.32s'\n" - ita "%s: Forzata la chiusura del thread %ld utente: '%-.32s'\n" - jpn "%s: ¥¹¥ì¥Ã¥É %ld ¶¯À©½ªÎ» user: '%-.32s'\n" - kor "%s: thread %ldÀÇ °Á¦ Á¾·á user: '%-.32s'\n" - nor "%s: Påtvinget avslutning av tråd %ld bruker: '%-.32s'\n" - norwegian-ny "%s: Påtvinga avslutning av tråd %ld brukar: '%-.32s'\n" - pol "%s: Wymuszenie zamkniêcia w?tku %ld u¿ytkownik: '%-.32s'\n" - por "%s: Forçando finalização da 'thread' %ld - usuário '%-.32s'\n" - rum "%s: Terminare fortata a thread-ului %ld utilizatorului: '%-.32s'\n" - rus "%s: ðÒÉÎÕÄÉÔÅÌØÎÏ ÚÁËÒÙ×ÁÅÍ ÐÏÔÏË %ld ÐÏÌØÚÏ×ÁÔÅÌÑ: '%-.32s'\n" - serbian "%s: Usiljeno gašenje thread-a %ld koji pripada korisniku: '%-.32s'\n" - slo "%s: násilné ukonèenie vlákna %ld u¾ívateµa '%-.32s'\n" - spa "%s: Forzando a cerrar el thread %ld usuario: '%-.32s'\n" - swe "%s: Stänger av tråd %ld; användare: '%-.32s'\n" - ukr "%s: ðÒÉÓËÏÒÀÀ ÚÁËÒÉÔÔÑ Ç¦ÌËÉ %ld ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'\n" + cze "%s: n-Básilné uzavøení threadu %ld u¾ivatele '%-.48s'\n" + dan "%s: Forceret nedlukning af tråd: %ld bruger: '%-.48s'\n" + nla "%s: Afsluiten afgedwongen van thread %ld gebruiker: '%-.48s'\n" + eng "%s: Forcing close of thread %ld user: '%-.48s'\n" + jps "%s: ƒXƒŒƒbƒh %ld ‹§I—¹ user: '%-.48s'\n", + est "%s: Sulgen jõuga lõime %ld kasutaja: '%-.48s'\n" + fre "%s: Arrêt forcé de la tâche (thread) %ld utilisateur: '%-.48s'\n" + ger "%s: Thread %ld zwangsweise beendet. Benutzer: '%-.48s'\n" + greek "%s: Ôï thread èá êëåßóåé %ld user: '%-.48s'\n" + hun "%s: A(z) %ld thread kenyszeritett zarasa. Felhasznalo: '%-.48s'\n" + ita "%s: Forzata la chiusura del thread %ld utente: '%-.48s'\n" + jpn "%s: ¥¹¥ì¥Ã¥É %ld ¶¯À©½ªÎ» user: '%-.48s'\n" + kor "%s: thread %ldÀÇ °Á¦ Á¾·á user: '%-.48s'\n" + nor "%s: Påtvinget avslutning av tråd %ld bruker: '%-.48s'\n" + norwegian-ny "%s: Påtvinga avslutning av tråd %ld brukar: '%-.48s'\n" + pol "%s: Wymuszenie zamkniêcia w?tku %ld u¿ytkownik: '%-.48s'\n" + por "%s: Forçando finalização da 'thread' %ld - usuário '%-.48s'\n" + rum "%s: Terminare fortata a thread-ului %ld utilizatorului: '%-.48s'\n" + rus "%s: ðÒÉÎÕÄÉÔÅÌØÎÏ ÚÁËÒÙ×ÁÅÍ ÐÏÔÏË %ld ÐÏÌØÚÏ×ÁÔÅÌÑ: '%-.48s'\n" + serbian "%s: Usiljeno gašenje thread-a %ld koji pripada korisniku: '%-.48s'\n" + slo "%s: násilné ukonèenie vlákna %ld u¾ívateµa '%-.48s'\n" + spa "%s: Forzando a cerrar el thread %ld usuario: '%-.48s'\n" + swe "%s: Stänger av tråd %ld; användare: '%-.48s'\n" + ukr "%s: ðÒÉÓËÏÒÀÀ ÚÁËÒÉÔÔÑ Ç¦ÌËÉ %ld ËÏÒÉÓÔÕ×ÁÞÁ: '%-.48s'\n" ER_IPSOCK_ERROR 08S01 cze "Nemohu vytvo-Bøit IP socket" dan "Kan ikke oprette IP socket" @@ -1969,30 +1969,30 @@ ER_IPSOCK_ERROR 08S01 swe "Kan inte skapa IP-socket" ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ IP ÒÏÚ'¤Í" ER_NO_SUCH_INDEX 42S12 S1009 - cze "Tabulka '%-.64s' nem-Bá index odpovídající CREATE INDEX. Vytvoøte tabulku znovu" - dan "Tabellen '%-.64s' har ikke den nøgle, som blev brugt i CREATE INDEX. Genopret tabellen" - nla "Tabel '%-.64s' heeft geen INDEX zoals deze gemaakt worden met CREATE INDEX. Maak de tabel opnieuw" - eng "Table '%-.64s' has no index like the one used in CREATE INDEX; recreate the table" - jps "Table '%-.64s' ‚Í‚»‚̂悤‚È index ‚ðŽ‚Á‚Ä‚¢‚Ü‚¹‚ñ(CREATE INDEX ŽÀsŽž‚ÉŽw’肳‚ê‚Ä‚¢‚Ü‚¹‚ñ). ƒe[ƒuƒ‹‚ðì‚è’¼‚µ‚Ä‚‚¾‚³‚¢", - est "Tabelil '%-.64s' puuduvad võtmed. Loo tabel uuesti" - fre "La table '%-.64s' n'a pas d'index comme celle utilisée dans CREATE INDEX. Recréez la table" - ger "Tabelle '%-.64s' besitzt keinen wie den in CREATE INDEX verwendeten Index. Tabelle neu anlegen" - greek "Ï ðßíáêáò '%-.64s' äåí Ý÷åé åõñåôÞñéï (index) óáí áõôü ðïõ ÷ñçóéìïðïéåßôå óôçí CREATE INDEX. Ðáñáêáëþ, îáíáäçìéïõñãÞóôå ôïí ðßíáêá" - hun "A(z) '%-.64s' tablahoz nincs meg a CREATE INDEX altal hasznalt index. Alakitsa at a tablat" - ita "La tabella '%-.64s' non ha nessun indice come quello specificatato dalla CREATE INDEX. Ricrea la tabella" - jpn "Table '%-.64s' ¤Ï¤½¤Î¤è¤¦¤Ê index ¤ò»ý¤Ã¤Æ¤¤¤Þ¤»¤ó(CREATE INDEX ¼Â¹Ô»þ¤Ë»ØÄꤵ¤ì¤Æ¤¤¤Þ¤»¤ó). ¥Æ¡¼¥Ö¥ë¤òºî¤êľ¤·¤Æ¤¯¤À¤µ¤¤" - kor "Å×À̺í '%-.64s'´Â À妽º¸¦ ¸¸µéÁö ¾Ê¾Ò½À´Ï´Ù. alter Å×À̺í¸í·ÉÀ» ÀÌ¿ëÇÏ¿© Å×À̺íÀ» ¼öÁ¤Çϼ¼¿ä..." - nor "Tabellen '%-.64s' har ingen index som den som er brukt i CREATE INDEX. Gjenopprett tabellen" - norwegian-ny "Tabellen '%-.64s' har ingen index som den som er brukt i CREATE INDEX. Oprett tabellen på nytt" - pol "Tabela '%-.64s' nie ma indeksu takiego jak w CREATE INDEX. Stwórz tabelê" - por "Tabela '%-.64s' não possui um índice como o usado em CREATE INDEX. Recrie a tabela" - rum "Tabela '%-.64s' nu are un index ca acela folosit in CREATE INDEX. Re-creeaza tabela" - rus "÷ ÔÁÂÌÉÃÅ '%-.64s' ÎÅÔ ÔÁËÏÇÏ ÉÎÄÅËÓÁ, ËÁË × CREATE INDEX. óÏÚÄÁÊÔÅ ÔÁÂÌÉÃÕ ÚÁÎÏ×Ï" - serbian "Tabela '%-.64s' nema isti indeks kao onaj upotrebljen pri komandi 'CREATE INDEX'. Napravite tabelu ponovo" - slo "Tabuµka '%-.64s' nemá index zodpovedajúci CREATE INDEX. Vytvorte tabulku znova" - spa "La tabla '%-.64s' no tiene indice como el usado en CREATE INDEX. Crea de nuevo la tabla" - swe "Tabellen '%-.64s' har inget index som motsvarar det angivna i CREATE INDEX. Skapa om tabellen" - ukr "ôÁÂÌÉÃÑ '%-.64s' ÍÁ¤ ¦ÎÄÅËÓ, ÝÏ ÎÅ ÓЦ×ÐÁÄÁ¤ Ú ×ËÁÚÁÎÎÉÍ Õ CREATE INDEX. óÔ×ÏÒ¦ÔØ ÔÁÂÌÉÃÀ ÚÎÏ×Õ" + cze "Tabulka '%-.192s' nem-Bá index odpovídající CREATE INDEX. Vytvoøte tabulku znovu" + dan "Tabellen '%-.192s' har ikke den nøgle, som blev brugt i CREATE INDEX. Genopret tabellen" + nla "Tabel '%-.192s' heeft geen INDEX zoals deze gemaakt worden met CREATE INDEX. Maak de tabel opnieuw" + eng "Table '%-.192s' has no index like the one used in CREATE INDEX; recreate the table" + jps "Table '%-.192s' ‚Í‚»‚̂悤‚È index ‚ðŽ‚Á‚Ä‚¢‚Ü‚¹‚ñ(CREATE INDEX ŽÀsŽž‚ÉŽw’肳‚ê‚Ä‚¢‚Ü‚¹‚ñ). ƒe[ƒuƒ‹‚ðì‚è’¼‚µ‚Ä‚‚¾‚³‚¢", + est "Tabelil '%-.192s' puuduvad võtmed. Loo tabel uuesti" + fre "La table '%-.192s' n'a pas d'index comme celle utilisée dans CREATE INDEX. Recréez la table" + ger "Tabelle '%-.192s' besitzt keinen wie den in CREATE INDEX verwendeten Index. Tabelle neu anlegen" + greek "Ï ðßíáêáò '%-.192s' äåí Ý÷åé åõñåôÞñéï (index) óáí áõôü ðïõ ÷ñçóéìïðïéåßôå óôçí CREATE INDEX. Ðáñáêáëþ, îáíáäçìéïõñãÞóôå ôïí ðßíáêá" + hun "A(z) '%-.192s' tablahoz nincs meg a CREATE INDEX altal hasznalt index. Alakitsa at a tablat" + ita "La tabella '%-.192s' non ha nessun indice come quello specificatato dalla CREATE INDEX. Ricrea la tabella" + jpn "Table '%-.192s' ¤Ï¤½¤Î¤è¤¦¤Ê index ¤ò»ý¤Ã¤Æ¤¤¤Þ¤»¤ó(CREATE INDEX ¼Â¹Ô»þ¤Ë»ØÄꤵ¤ì¤Æ¤¤¤Þ¤»¤ó). ¥Æ¡¼¥Ö¥ë¤òºî¤êľ¤·¤Æ¤¯¤À¤µ¤¤" + kor "Å×À̺í '%-.192s'´Â À妽º¸¦ ¸¸µéÁö ¾Ê¾Ò½À´Ï´Ù. alter Å×À̺í¸í·ÉÀ» ÀÌ¿ëÇÏ¿© Å×À̺íÀ» ¼öÁ¤Çϼ¼¿ä..." + nor "Tabellen '%-.192s' har ingen index som den som er brukt i CREATE INDEX. Gjenopprett tabellen" + norwegian-ny "Tabellen '%-.192s' har ingen index som den som er brukt i CREATE INDEX. Oprett tabellen på nytt" + pol "Tabela '%-.192s' nie ma indeksu takiego jak w CREATE INDEX. Stwórz tabelê" + por "Tabela '%-.192s' não possui um índice como o usado em CREATE INDEX. Recrie a tabela" + rum "Tabela '%-.192s' nu are un index ca acela folosit in CREATE INDEX. Re-creeaza tabela" + rus "÷ ÔÁÂÌÉÃÅ '%-.192s' ÎÅÔ ÔÁËÏÇÏ ÉÎÄÅËÓÁ, ËÁË × CREATE INDEX. óÏÚÄÁÊÔÅ ÔÁÂÌÉÃÕ ÚÁÎÏ×Ï" + serbian "Tabela '%-.192s' nema isti indeks kao onaj upotrebljen pri komandi 'CREATE INDEX'. Napravite tabelu ponovo" + slo "Tabuµka '%-.192s' nemá index zodpovedajúci CREATE INDEX. Vytvorte tabulku znova" + spa "La tabla '%-.192s' no tiene indice como el usado en CREATE INDEX. Crea de nuevo la tabla" + swe "Tabellen '%-.192s' har inget index som motsvarar det angivna i CREATE INDEX. Skapa om tabellen" + ukr "ôÁÂÌÉÃÑ '%-.192s' ÍÁ¤ ¦ÎÄÅËÓ, ÝÏ ÎÅ ÓЦ×ÐÁÄÁ¤ Ú ×ËÁÚÁÎÎÉÍ Õ CREATE INDEX. óÔ×ÏÒ¦ÔØ ÔÁÂÌÉÃÀ ÚÎÏ×Õ" ER_WRONG_FIELD_TERMINATORS 42000 S1009 cze "Argument separ-Bátoru polo¾ek nebyl oèekáván. Pøeètìte si manuál" dan "Felt adskiller er ikke som forventet, se dokumentationen" @@ -2190,30 +2190,30 @@ ER_CANT_REMOVE_ALL_FIELDS 42000 swe "Man kan inte radera alla fält med ALTER TABLE. Använd DROP TABLE istället" ukr "îÅ ÍÏÖÌÉ×Ï ×ÉÄÁÌÉÔÉ ×Ó¦ ÓÔÏ×Âæ ÚÁ ÄÏÐÏÍÏÇÏÀ ALTER TABLE. äÌÑ ÃØÏÇÏ ÓËÏÒÉÓÔÁÊÔÅÓÑ DROP TABLE" ER_CANT_DROP_FIELD_OR_KEY 42000 - cze "Nemohu zru-B¹it '%-.64s' (provést DROP). Zkontrolujte, zda neexistují záznamy/klíèe" - dan "Kan ikke udføre DROP '%-.64s'. Undersøg om feltet/nøglen eksisterer." - nla "Kan '%-.64s' niet weggooien. Controleer of het veld of de zoeksleutel daadwerkelijk bestaat." - eng "Can't DROP '%-.64s'; check that column/key exists" - jps "'%-.64s' ‚ð”jŠü‚Å‚«‚Ü‚¹‚ñ‚Å‚µ‚½; check that column/key exists", - est "Ei suuda kustutada '%-.64s'. Kontrolli kas tulp/võti eksisteerib" - fre "Ne peut effacer (DROP) '%-.64s'. Vérifiez s'il existe" - ger "Kann '%-.64s' nicht löschen. Existiert die Spalte oder der Schlüssel?" - greek "Áäýíáôç ç äéáãñáöÞ (DROP) '%-.64s'. Ðáñáêáëþ åëÝãîôå áí ôï ðåäßï/êëåéäß õðÜñ÷åé" - hun "A DROP '%-.64s' nem lehetseges. Ellenorizze, hogy a mezo/kulcs letezik-e" - ita "Impossibile cancellare '%-.64s'. Controllare che il campo chiave esista" - jpn "'%-.64s' ¤òÇË´þ¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿; check that column/key exists" - kor "'%-.64s'¸¦ DROPÇÒ ¼ö ¾ø½À´Ï´Ù. Ä®·³À̳ª Å°°¡ Á¸ÀçÇÏ´ÂÁö äũÇϼ¼¿ä." - nor "Kan ikke DROP '%-.64s'. Undersøk om felt/nøkkel eksisterer." - norwegian-ny "Kan ikkje DROP '%-.64s'. Undersøk om felt/nøkkel eksisterar." - pol "Nie mo¿na wykonaæ operacji DROP '%-.64s'. Sprawd¥, czy to pole/klucz istnieje" - por "Não se pode fazer DROP '%-.64s'. Confira se esta coluna/chave existe" - rum "Nu pot sa DROP '%-.64s'. Verifica daca coloana/cheia exista" - rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ (DROP) '%-.64s'. õÂÅÄÉÔÅÓØ ÞÔÏ ÓÔÏÌÂÅÃ/ËÌÀÞ ÄÅÊÓÔ×ÉÔÅÌØÎÏ ÓÕÝÅÓÔ×ÕÅÔ" - serbian "Ne mogu da izvršim komandu drop 'DROP' na '%-.64s'. Proverite da li ta kolona (odnosno kljuè) postoji" - slo "Nemô¾em zru¹i» (DROP) '%-.64s'. Skontrolujte, èi neexistujú záznamy/kµúèe" - spa "No puedo ELIMINAR '%-.64s'. compuebe que el campo/clave existe" - swe "Kan inte ta bort '%-.64s'. Kontrollera att fältet/nyckel finns" - ukr "îÅ ÍÏÖÕ DROP '%-.64s'. ðÅÒÅצÒÔÅ, ÞÉ ÃÅÊ ÓÔÏ×ÂÅÃØ/ËÌÀÞ ¦ÓÎÕ¤" + cze "Nemohu zru-B¹it '%-.192s' (provést DROP). Zkontrolujte, zda neexistují záznamy/klíèe" + dan "Kan ikke udføre DROP '%-.192s'. Undersøg om feltet/nøglen eksisterer." + nla "Kan '%-.192s' niet weggooien. Controleer of het veld of de zoeksleutel daadwerkelijk bestaat." + eng "Can't DROP '%-.192s'; check that column/key exists" + jps "'%-.192s' ‚ð”jŠü‚Å‚«‚Ü‚¹‚ñ‚Å‚µ‚½; check that column/key exists", + est "Ei suuda kustutada '%-.192s'. Kontrolli kas tulp/võti eksisteerib" + fre "Ne peut effacer (DROP) '%-.192s'. Vérifiez s'il existe" + ger "Kann '%-.192s' nicht löschen. Existiert die Spalte oder der Schlüssel?" + greek "Áäýíáôç ç äéáãñáöÞ (DROP) '%-.192s'. Ðáñáêáëþ åëÝãîôå áí ôï ðåäßï/êëåéäß õðÜñ÷åé" + hun "A DROP '%-.192s' nem lehetseges. Ellenorizze, hogy a mezo/kulcs letezik-e" + ita "Impossibile cancellare '%-.192s'. Controllare che il campo chiave esista" + jpn "'%-.192s' ¤òÇË´þ¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿; check that column/key exists" + kor "'%-.192s'¸¦ DROPÇÒ ¼ö ¾ø½À´Ï´Ù. Ä®·³À̳ª Å°°¡ Á¸ÀçÇÏ´ÂÁö äũÇϼ¼¿ä." + nor "Kan ikke DROP '%-.192s'. Undersøk om felt/nøkkel eksisterer." + norwegian-ny "Kan ikkje DROP '%-.192s'. Undersøk om felt/nøkkel eksisterar." + pol "Nie mo¿na wykonaæ operacji DROP '%-.192s'. Sprawd¥, czy to pole/klucz istnieje" + por "Não se pode fazer DROP '%-.192s'. Confira se esta coluna/chave existe" + rum "Nu pot sa DROP '%-.192s'. Verifica daca coloana/cheia exista" + rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ (DROP) '%-.192s'. õÂÅÄÉÔÅÓØ ÞÔÏ ÓÔÏÌÂÅÃ/ËÌÀÞ ÄÅÊÓÔ×ÉÔÅÌØÎÏ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "Ne mogu da izvršim komandu drop 'DROP' na '%-.192s'. Proverite da li ta kolona (odnosno kljuè) postoji" + slo "Nemô¾em zru¹i» (DROP) '%-.192s'. Skontrolujte, èi neexistujú záznamy/kµúèe" + spa "No puedo ELIMINAR '%-.192s'. compuebe que el campo/clave existe" + swe "Kan inte ta bort '%-.192s'. Kontrollera att fältet/nyckel finns" + ukr "îÅ ÍÏÖÕ DROP '%-.192s'. ðÅÒÅצÒÔÅ, ÞÉ ÃÅÊ ÓÔÏ×ÂÅÃØ/ËÌÀÞ ¦ÓÎÕ¤" ER_INSERT_INFO cze "Z-Báznamù: %ld Zdvojených: %ld Varování: %ld" dan "Poster: %ld Ens: %ld Advarsler: %ld" @@ -2240,11 +2240,11 @@ ER_INSERT_INFO swe "Rader: %ld Dubletter: %ld Varningar: %ld" ukr "úÁÐÉÓ¦×: %ld äÕÂ̦ËÁÔ¦×: %ld úÁÓÔÅÒÅÖÅÎØ: %ld" ER_UPDATE_TABLE_USED - eng "You can't specify target table '%-.64s' for update in FROM clause" - ger "Die Verwendung der zu aktualisierenden Zieltabelle '%-.64s' ist in der FROM-Klausel nicht zulässig." - rus "îÅ ÄÏÐÕÓËÁÅÔÓÑ ÕËÁÚÁÎÉÅ ÔÁÂÌÉÃÙ '%-.64s' × ÓÐÉÓËÅ ÔÁÂÌÉà FROM ÄÌÑ ×ÎÅÓÅÎÉÑ × ÎÅÅ ÉÚÍÅÎÅÎÉÊ" - swe "INSERT-table '%-.64s' får inte finnas i FROM tabell-listan" - ukr "ôÁÂÌÉÃÑ '%-.64s' ÝÏ ÚͦÎÀ¤ÔØÓÑ ÎÅ ÄÏÚ×ÏÌÅÎÁ Õ ÐÅÒÅ̦ËÕ ÔÁÂÌÉÃØ FROM" + eng "You can't specify target table '%-.192s' for update in FROM clause" + ger "Die Verwendung der zu aktualisierenden Zieltabelle '%-.192s' ist in der FROM-Klausel nicht zulässig." + rus "îÅ ÄÏÐÕÓËÁÅÔÓÑ ÕËÁÚÁÎÉÅ ÔÁÂÌÉÃÙ '%-.192s' × ÓÐÉÓËÅ ÔÁÂÌÉà FROM ÄÌÑ ×ÎÅÓÅÎÉÑ × ÎÅÅ ÉÚÍÅÎÅÎÉÊ" + swe "INSERT-table '%-.192s' får inte finnas i FROM tabell-listan" + ukr "ôÁÂÌÉÃÑ '%-.192s' ÝÏ ÚͦÎÀ¤ÔØÓÑ ÎÅ ÄÏÚ×ÏÌÅÎÁ Õ ÐÅÒÅ̦ËÕ ÔÁÂÌÉÃØ FROM" ER_NO_SUCH_THREAD cze "Nezn-Bámá identifikace threadu: %lu" dan "Ukendt tråd id: %lu" @@ -2319,28 +2319,28 @@ ER_NO_TABLES_USED swe "Inga tabeller angivna" ukr "îÅ ×ÉËÏÒÉÓÔÁÎÏ ÔÁÂÌÉÃØ" ER_TOO_BIG_SET - cze "P-Bøíli¹ mnoho øetìzcù pro sloupec %-.64s a SET" - dan "For mange tekststrenge til specifikationen af SET i kolonne %-.64s" - nla "Teveel strings voor kolom %-.64s en SET" - eng "Too many strings for column %-.64s and SET" - est "Liiga palju string tulbale %-.64s tüübile SET" - fre "Trop de chaînes dans la colonne %-.64s avec SET" - ger "Zu viele Strings für Feld %-.64s und SET angegeben" - greek "ÐÜñá ðïëëÜ strings ãéá ôï ðåäßï %-.64s êáé SET" - hun "Tul sok karakter: %-.64s es SET" - ita "Troppe stringhe per la colonna %-.64s e la SET" - kor "Ä®·³ %-.64s¿Í SET¿¡¼ ½ºÆ®¸µÀÌ ³Ê¹« ¸¹½À´Ï´Ù." - nor "For mange tekststrenger kolonne %-.64s og SET" - norwegian-ny "For mange tekststrengar felt %-.64s og SET" - pol "Zbyt wiele ³añcuchów dla kolumny %-.64s i polecenia SET" - por "'Strings' demais para coluna '%-.64s' e SET" - rum "Prea multe siruri pentru coloana %-.64s si SET" - rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÚÎÁÞÅÎÉÊ ÄÌÑ ÓÔÏÌÂÃÁ %-.64s × SET" - serbian "Previše string-ova za kolonu '%-.64s' i komandu 'SET'" - slo "Príli¹ mnoho re»azcov pre pole %-.64s a SET" - spa "Muchas strings para columna %-.64s y SET" - swe "För många alternativ till kolumn %-.64s för SET" - ukr "úÁÂÁÇÁÔÏ ÓÔÒÏË ÄÌÑ ÓÔÏ×ÂÃÑ %-.64s ÔÁ SET" + cze "P-Bøíli¹ mnoho øetìzcù pro sloupec %-.192s a SET" + dan "For mange tekststrenge til specifikationen af SET i kolonne %-.192s" + nla "Teveel strings voor kolom %-.192s en SET" + eng "Too many strings for column %-.192s and SET" + est "Liiga palju string tulbale %-.192s tüübile SET" + fre "Trop de chaînes dans la colonne %-.192s avec SET" + ger "Zu viele Strings für Feld %-.192s und SET angegeben" + greek "ÐÜñá ðïëëÜ strings ãéá ôï ðåäßï %-.192s êáé SET" + hun "Tul sok karakter: %-.192s es SET" + ita "Troppe stringhe per la colonna %-.192s e la SET" + kor "Ä®·³ %-.192s¿Í SET¿¡¼ ½ºÆ®¸µÀÌ ³Ê¹« ¸¹½À´Ï´Ù." + nor "For mange tekststrenger kolonne %-.192s og SET" + norwegian-ny "For mange tekststrengar felt %-.192s og SET" + pol "Zbyt wiele ³añcuchów dla kolumny %-.192s i polecenia SET" + por "'Strings' demais para coluna '%-.192s' e SET" + rum "Prea multe siruri pentru coloana %-.192s si SET" + rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÚÎÁÞÅÎÉÊ ÄÌÑ ÓÔÏÌÂÃÁ %-.192s × SET" + serbian "Previše string-ova za kolonu '%-.192s' i komandu 'SET'" + slo "Príli¹ mnoho re»azcov pre pole %-.192s a SET" + spa "Muchas strings para columna %-.192s y SET" + swe "För många alternativ till kolumn %-.192s för SET" + ukr "úÁÂÁÇÁÔÏ ÓÔÒÏË ÄÌÑ ÓÔÏ×ÂÃÑ %-.192s ÔÁ SET" ER_NO_UNIQUE_LOGFILE cze "Nemohu vytvo-Bøit jednoznaèné jméno logovacího souboru %-.200s.(1-999)\n" dan "Kan ikke lave unikt log-filnavn %-.200s.(1-999)\n" @@ -2365,79 +2365,79 @@ ER_NO_UNIQUE_LOGFILE swe "Kan inte generera ett unikt filnamn %-.200s.(1-999)\n" ukr "îÅ ÍÏÖÕ ÚÇÅÎÅÒÕ×ÁÔÉ ÕΦËÁÌØÎÅ ¦Í'Ñ log-ÆÁÊÌÕ %-.200s.(1-999)\n" ER_TABLE_NOT_LOCKED_FOR_WRITE - cze "Tabulka '%-.64s' byla zam-Bèena s READ a nemù¾e být zmìnìna" - dan "Tabellen '%-.64s' var låst med READ lås og kan ikke opdateres" - nla "Tabel '%-.64s' was gelocked met een lock om te lezen. Derhalve kunnen geen wijzigingen worden opgeslagen." - eng "Table '%-.64s' was locked with a READ lock and can't be updated" - jps "Table '%-.64s' ‚Í READ lock ‚É‚È‚Á‚Ä‚¢‚ÄAXV‚Í‚Å‚«‚Ü‚¹‚ñ", - est "Tabel '%-.64s' on lukustatud READ lukuga ning ei ole muudetav" - fre "Table '%-.64s' verrouillée lecture (READ): modification impossible" - ger "Tabelle '%-.64s' ist mit Lesesperre versehen und kann nicht aktualisiert werden" - greek "Ï ðßíáêáò '%-.64s' Ý÷åé êëåéäùèåß ìå READ lock êáé äåí åðéôñÝðïíôáé áëëáãÝò" - hun "A(z) '%-.64s' tabla zarolva lett (READ lock) es nem lehet frissiteni" - ita "La tabella '%-.64s' e` soggetta a lock in lettura e non puo` essere aggiornata" - jpn "Table '%-.64s' ¤Ï READ lock ¤Ë¤Ê¤Ã¤Æ¤¤¤Æ¡¢¹¹¿·¤Ï¤Ç¤¤Þ¤»¤ó" - kor "Å×À̺í '%-.64s'´Â READ ¶ôÀÌ Àá°ÜÀÖ¾î¼ °»½ÅÇÒ ¼ö ¾ø½À´Ï´Ù." - nor "Tabellen '%-.64s' var låst med READ lås og kan ikke oppdateres" - norwegian-ny "Tabellen '%-.64s' var låst med READ lås og kan ikkje oppdaterast" - pol "Tabela '%-.64s' zosta³a zablokowana przez READ i nie mo¿e zostaæ zaktualizowana" - por "Tabela '%-.64s' foi travada com trava de leitura e não pode ser atualizada" - rum "Tabela '%-.64s' a fost locked cu un READ lock si nu poate fi actualizata" - rus "ôÁÂÌÉÃÁ '%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎÁ ÕÒÏ×ÎÅÍ READ lock É ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÚÍÅÎÅÎÁ" - serbian "Tabela '%-.64s' je zakljuèana READ lock-om; iz nje se može samo èitati ali u nju se ne može pisati" - slo "Tabuµka '%-.64s' bola zamknutá s READ a nemô¾e by» zmenená" - spa "Tabla '%-.64s' fue trabada con un READ lock y no puede ser actualizada" - swe "Tabell '%-.64s' kan inte uppdateras emedan den är låst för läsning" - ukr "ôÁÂÌÉÃÀ '%-.64s' ÚÁÂÌÏËÏ×ÁÎÏ Ô¦ÌØËÉ ÄÌÑ ÞÉÔÁÎÎÑ, ÔÏÍÕ §§ ÎÅ ÍÏÖÎÁ ÏÎÏ×ÉÔÉ" + cze "Tabulka '%-.192s' byla zam-Bèena s READ a nemù¾e být zmìnìna" + dan "Tabellen '%-.192s' var låst med READ lås og kan ikke opdateres" + nla "Tabel '%-.192s' was gelocked met een lock om te lezen. Derhalve kunnen geen wijzigingen worden opgeslagen." + eng "Table '%-.192s' was locked with a READ lock and can't be updated" + jps "Table '%-.192s' ‚Í READ lock ‚É‚È‚Á‚Ä‚¢‚ÄAXV‚Í‚Å‚«‚Ü‚¹‚ñ", + est "Tabel '%-.192s' on lukustatud READ lukuga ning ei ole muudetav" + fre "Table '%-.192s' verrouillée lecture (READ): modification impossible" + ger "Tabelle '%-.192s' ist mit Lesesperre versehen und kann nicht aktualisiert werden" + greek "Ï ðßíáêáò '%-.192s' Ý÷åé êëåéäùèåß ìå READ lock êáé äåí åðéôñÝðïíôáé áëëáãÝò" + hun "A(z) '%-.192s' tabla zarolva lett (READ lock) es nem lehet frissiteni" + ita "La tabella '%-.192s' e` soggetta a lock in lettura e non puo` essere aggiornata" + jpn "Table '%-.192s' ¤Ï READ lock ¤Ë¤Ê¤Ã¤Æ¤¤¤Æ¡¢¹¹¿·¤Ï¤Ç¤¤Þ¤»¤ó" + kor "Å×À̺í '%-.192s'´Â READ ¶ôÀÌ Àá°ÜÀÖ¾î¼ °»½ÅÇÒ ¼ö ¾ø½À´Ï´Ù." + nor "Tabellen '%-.192s' var låst med READ lås og kan ikke oppdateres" + norwegian-ny "Tabellen '%-.192s' var låst med READ lås og kan ikkje oppdaterast" + pol "Tabela '%-.192s' zosta³a zablokowana przez READ i nie mo¿e zostaæ zaktualizowana" + por "Tabela '%-.192s' foi travada com trava de leitura e não pode ser atualizada" + rum "Tabela '%-.192s' a fost locked cu un READ lock si nu poate fi actualizata" + rus "ôÁÂÌÉÃÁ '%-.192s' ÚÁÂÌÏËÉÒÏ×ÁÎÁ ÕÒÏ×ÎÅÍ READ lock É ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÚÍÅÎÅÎÁ" + serbian "Tabela '%-.192s' je zakljuèana READ lock-om; iz nje se može samo èitati ali u nju se ne može pisati" + slo "Tabuµka '%-.192s' bola zamknutá s READ a nemô¾e by» zmenená" + spa "Tabla '%-.192s' fue trabada con un READ lock y no puede ser actualizada" + swe "Tabell '%-.192s' kan inte uppdateras emedan den är låst för läsning" + ukr "ôÁÂÌÉÃÀ '%-.192s' ÚÁÂÌÏËÏ×ÁÎÏ Ô¦ÌØËÉ ÄÌÑ ÞÉÔÁÎÎÑ, ÔÏÍÕ §§ ÎÅ ÍÏÖÎÁ ÏÎÏ×ÉÔÉ" ER_TABLE_NOT_LOCKED - cze "Tabulka '%-.64s' nebyla zam-Bèena s LOCK TABLES" - dan "Tabellen '%-.64s' var ikke låst med LOCK TABLES" - nla "Tabel '%-.64s' was niet gelocked met LOCK TABLES" - eng "Table '%-.64s' was not locked with LOCK TABLES" - jps "Table '%-.64s' ‚Í LOCK TABLES ‚É‚æ‚Á‚ăƒbƒN‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", - est "Tabel '%-.64s' ei ole lukustatud käsuga LOCK TABLES" - fre "Table '%-.64s' non verrouillée: utilisez LOCK TABLES" - ger "Tabelle '%-.64s' wurde nicht mit LOCK TABLES gesperrt" - greek "Ï ðßíáêáò '%-.64s' äåí Ý÷åé êëåéäùèåß ìå LOCK TABLES" - hun "A(z) '%-.64s' tabla nincs zarolva a LOCK TABLES-szel" - ita "Non e` stato impostato il lock per la tabella '%-.64s' con LOCK TABLES" - jpn "Table '%-.64s' ¤Ï LOCK TABLES ¤Ë¤è¤Ã¤Æ¥í¥Ã¥¯¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" - kor "Å×À̺í '%-.64s'´Â LOCK TABLES ¸í·ÉÀ¸·Î Àá±âÁö ¾Ê¾Ò½À´Ï´Ù." - nor "Tabellen '%-.64s' var ikke låst med LOCK TABLES" - norwegian-ny "Tabellen '%-.64s' var ikkje låst med LOCK TABLES" - pol "Tabela '%-.64s' nie zosta³a zablokowana poleceniem LOCK TABLES" - por "Tabela '%-.64s' não foi travada com LOCK TABLES" - rum "Tabela '%-.64s' nu a fost locked cu LOCK TABLES" - rus "ôÁÂÌÉÃÁ '%-.64s' ÎÅ ÂÙÌÁ ÚÁÂÌÏËÉÒÏ×ÁÎÁ Ó ÐÏÍÏÝØÀ LOCK TABLES" - serbian "Tabela '%-.64s' nije bila zakljuèana komandom 'LOCK TABLES'" - slo "Tabuµka '%-.64s' nebola zamknutá s LOCK TABLES" - spa "Tabla '%-.64s' no fue trabada con LOCK TABLES" - swe "Tabell '%-.64s' är inte låst med LOCK TABLES" - ukr "ôÁÂÌÉÃÀ '%-.64s' ÎÅ ÂÕÌÏ ÂÌÏËÏ×ÁÎÏ Ú LOCK TABLES" + cze "Tabulka '%-.192s' nebyla zam-Bèena s LOCK TABLES" + dan "Tabellen '%-.192s' var ikke låst med LOCK TABLES" + nla "Tabel '%-.192s' was niet gelocked met LOCK TABLES" + eng "Table '%-.192s' was not locked with LOCK TABLES" + jps "Table '%-.192s' ‚Í LOCK TABLES ‚É‚æ‚Á‚ăƒbƒN‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", + est "Tabel '%-.192s' ei ole lukustatud käsuga LOCK TABLES" + fre "Table '%-.192s' non verrouillée: utilisez LOCK TABLES" + ger "Tabelle '%-.192s' wurde nicht mit LOCK TABLES gesperrt" + greek "Ï ðßíáêáò '%-.192s' äåí Ý÷åé êëåéäùèåß ìå LOCK TABLES" + hun "A(z) '%-.192s' tabla nincs zarolva a LOCK TABLES-szel" + ita "Non e` stato impostato il lock per la tabella '%-.192s' con LOCK TABLES" + jpn "Table '%-.192s' ¤Ï LOCK TABLES ¤Ë¤è¤Ã¤Æ¥í¥Ã¥¯¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "Å×À̺í '%-.192s'´Â LOCK TABLES ¸í·ÉÀ¸·Î Àá±âÁö ¾Ê¾Ò½À´Ï´Ù." + nor "Tabellen '%-.192s' var ikke låst med LOCK TABLES" + norwegian-ny "Tabellen '%-.192s' var ikkje låst med LOCK TABLES" + pol "Tabela '%-.192s' nie zosta³a zablokowana poleceniem LOCK TABLES" + por "Tabela '%-.192s' não foi travada com LOCK TABLES" + rum "Tabela '%-.192s' nu a fost locked cu LOCK TABLES" + rus "ôÁÂÌÉÃÁ '%-.192s' ÎÅ ÂÙÌÁ ÚÁÂÌÏËÉÒÏ×ÁÎÁ Ó ÐÏÍÏÝØÀ LOCK TABLES" + serbian "Tabela '%-.192s' nije bila zakljuèana komandom 'LOCK TABLES'" + slo "Tabuµka '%-.192s' nebola zamknutá s LOCK TABLES" + spa "Tabla '%-.192s' no fue trabada con LOCK TABLES" + swe "Tabell '%-.192s' är inte låst med LOCK TABLES" + ukr "ôÁÂÌÉÃÀ '%-.192s' ÎÅ ÂÕÌÏ ÂÌÏËÏ×ÁÎÏ Ú LOCK TABLES" ER_BLOB_CANT_HAVE_DEFAULT 42000 - cze "Blob polo-B¾ka '%-.64s' nemù¾e mít defaultní hodnotu" - dan "BLOB feltet '%-.64s' kan ikke have en standard værdi" - nla "Blob veld '%-.64s' can geen standaardwaarde bevatten" - eng "BLOB/TEXT column '%-.64s' can't have a default value" - est "BLOB-tüüpi tulp '%-.64s' ei saa omada vaikeväärtust" - fre "BLOB '%-.64s' ne peut avoir de valeur par défaut" - ger "BLOB/TEXT-Feld '%-.64s' darf keinen Vorgabewert (DEFAULT) haben" - greek "Ôá Blob ðåäßá '%-.64s' äåí ìðïñïýí íá Ý÷ïõí ðñïêáèïñéóìÝíåò ôéìÝò (default value)" - hun "A(z) '%-.64s' blob objektumnak nem lehet alapertelmezett erteke" - ita "Il campo BLOB '%-.64s' non puo` avere un valore di default" - jpn "BLOB column '%-.64s' can't have a default value" - kor "BLOB Ä®·³ '%-.64s' ´Â µðÆúÆ® °ªÀ» °¡Áú ¼ö ¾ø½À´Ï´Ù." - nor "Blob feltet '%-.64s' kan ikke ha en standard verdi" - norwegian-ny "Blob feltet '%-.64s' kan ikkje ha ein standard verdi" - pol "Pole typu blob '%-.64s' nie mo¿e mieæ domy?lnej warto?ci" - por "Coluna BLOB '%-.64s' não pode ter um valor padrão (default)" - rum "Coloana BLOB '%-.64s' nu poate avea o valoare default" - rus "îÅ×ÏÚÍÏÖÎÏ ÕËÁÚÙ×ÁÔØ ÚÎÁÞÅÎÉÅ ÐÏ ÕÍÏÌÞÁÎÉÀ ÄÌÑ ÓÔÏÌÂÃÁ BLOB '%-.64s'" - serbian "BLOB kolona '%-.64s' ne može imati default vrednost" - slo "Pole BLOB '%-.64s' nemô¾e ma» implicitnú hodnotu" - spa "Campo Blob '%-.64s' no puede tener valores patron" - swe "BLOB fält '%-.64s' kan inte ha ett DEFAULT-värde" - ukr "óÔÏ×ÂÅÃØ BLOB '%-.64s' ÎÅ ÍÏÖÅ ÍÁÔÉ ÚÎÁÞÅÎÎÑ ÐÏ ÚÁÍÏ×ÞÕ×ÁÎÎÀ" + cze "Blob polo-B¾ka '%-.192s' nemù¾e mít defaultní hodnotu" + dan "BLOB feltet '%-.192s' kan ikke have en standard værdi" + nla "Blob veld '%-.192s' can geen standaardwaarde bevatten" + eng "BLOB/TEXT column '%-.192s' can't have a default value" + est "BLOB-tüüpi tulp '%-.192s' ei saa omada vaikeväärtust" + fre "BLOB '%-.192s' ne peut avoir de valeur par défaut" + ger "BLOB/TEXT-Feld '%-.192s' darf keinen Vorgabewert (DEFAULT) haben" + greek "Ôá Blob ðåäßá '%-.192s' äåí ìðïñïýí íá Ý÷ïõí ðñïêáèïñéóìÝíåò ôéìÝò (default value)" + hun "A(z) '%-.192s' blob objektumnak nem lehet alapertelmezett erteke" + ita "Il campo BLOB '%-.192s' non puo` avere un valore di default" + jpn "BLOB column '%-.192s' can't have a default value" + kor "BLOB Ä®·³ '%-.192s' ´Â µðÆúÆ® °ªÀ» °¡Áú ¼ö ¾ø½À´Ï´Ù." + nor "Blob feltet '%-.192s' kan ikke ha en standard verdi" + norwegian-ny "Blob feltet '%-.192s' kan ikkje ha ein standard verdi" + pol "Pole typu blob '%-.192s' nie mo¿e mieæ domy?lnej warto?ci" + por "Coluna BLOB '%-.192s' não pode ter um valor padrão (default)" + rum "Coloana BLOB '%-.192s' nu poate avea o valoare default" + rus "îÅ×ÏÚÍÏÖÎÏ ÕËÁÚÙ×ÁÔØ ÚÎÁÞÅÎÉÅ ÐÏ ÕÍÏÌÞÁÎÉÀ ÄÌÑ ÓÔÏÌÂÃÁ BLOB '%-.192s'" + serbian "BLOB kolona '%-.192s' ne može imati default vrednost" + slo "Pole BLOB '%-.192s' nemô¾e ma» implicitnú hodnotu" + spa "Campo Blob '%-.192s' no puede tener valores patron" + swe "BLOB fält '%-.192s' kan inte ha ett DEFAULT-värde" + ukr "óÔÏ×ÂÅÃØ BLOB '%-.192s' ÎÅ ÍÏÖÅ ÍÁÔÉ ÚÎÁÞÅÎÎÑ ÐÏ ÚÁÍÏ×ÞÕ×ÁÎÎÀ" ER_WRONG_DB_NAME 42000 cze "Nep-Bøípustné jméno databáze '%-.100s'" dan "Ugyldigt database navn '%-.100s'" @@ -2534,121 +2534,121 @@ ER_UNKNOWN_ERROR swe "Oidentifierat fel" ukr "îÅצÄÏÍÁ ÐÏÍÉÌËÁ" ER_UNKNOWN_PROCEDURE 42000 - cze "Nezn-Bámá procedura %-.64s" - dan "Ukendt procedure %-.64s" - nla "Onbekende procedure %-.64s" - eng "Unknown procedure '%-.64s'" - est "Tundmatu protseduur '%-.64s'" - fre "Procédure %-.64s inconnue" - ger "Unbekannte Prozedur '%-.64s'" - greek "Áãíùóôç äéáäéêáóßá '%-.64s'" - hun "Ismeretlen eljaras: '%-.64s'" - ita "Procedura '%-.64s' sconosciuta" - kor "¾Ë¼ö ¾ø´Â ¼öÇ๮ : '%-.64s'" - nor "Ukjent prosedyre %-.64s" - norwegian-ny "Ukjend prosedyre %-.64s" - pol "Unkown procedure %-.64s" - por "'Procedure' '%-.64s' desconhecida" - rum "Procedura unknown '%-.64s'" - rus "îÅÉÚ×ÅÓÔÎÁÑ ÐÒÏÃÅÄÕÒÁ '%-.64s'" - serbian "Nepoznata procedura '%-.64s'" - slo "Neznámá procedúra '%-.64s'" - spa "Procedimiento desconocido %-.64s" - swe "Okänd procedur: %-.64s" - ukr "îÅצÄÏÍÁ ÐÒÏÃÅÄÕÒÁ '%-.64s'" + cze "Nezn-Bámá procedura %-.192s" + dan "Ukendt procedure %-.192s" + nla "Onbekende procedure %-.192s" + eng "Unknown procedure '%-.192s'" + est "Tundmatu protseduur '%-.192s'" + fre "Procédure %-.192s inconnue" + ger "Unbekannte Prozedur '%-.192s'" + greek "Áãíùóôç äéáäéêáóßá '%-.192s'" + hun "Ismeretlen eljaras: '%-.192s'" + ita "Procedura '%-.192s' sconosciuta" + kor "¾Ë¼ö ¾ø´Â ¼öÇ๮ : '%-.192s'" + nor "Ukjent prosedyre %-.192s" + norwegian-ny "Ukjend prosedyre %-.192s" + pol "Unkown procedure %-.192s" + por "'Procedure' '%-.192s' desconhecida" + rum "Procedura unknown '%-.192s'" + rus "îÅÉÚ×ÅÓÔÎÁÑ ÐÒÏÃÅÄÕÒÁ '%-.192s'" + serbian "Nepoznata procedura '%-.192s'" + slo "Neznámá procedúra '%-.192s'" + spa "Procedimiento desconocido %-.192s" + swe "Okänd procedur: %-.192s" + ukr "îÅצÄÏÍÁ ÐÒÏÃÅÄÕÒÁ '%-.192s'" ER_WRONG_PARAMCOUNT_TO_PROCEDURE 42000 - cze "Chybn-Bý poèet parametrù procedury %-.64s" - dan "Forkert antal parametre til proceduren %-.64s" - nla "Foutief aantal parameters doorgegeven aan procedure %-.64s" - eng "Incorrect parameter count to procedure '%-.64s'" - est "Vale parameetrite hulk protseduurile '%-.64s'" - fre "Mauvais nombre de paramètres pour la procedure %-.64s" - ger "Falsche Parameterzahl für Prozedur '%-.64s'" - greek "ËÜèïò áñéèìüò ðáñáìÝôñùí óôç äéáäéêáóßá '%-.64s'" - hun "Rossz parameter a(z) '%-.64s'eljaras szamitasanal" - ita "Numero di parametri errato per la procedura '%-.64s'" - kor "'%-.64s' ¼öÇ๮¿¡ ´ëÇÑ ºÎÁ¤È®ÇÑ ÆĶó¸ÞÅÍ" - nor "Feil parameter antall til prosedyren %-.64s" - norwegian-ny "Feil parameter tal til prosedyra %-.64s" - pol "Incorrect parameter count to procedure %-.64s" - por "Número de parâmetros incorreto para a 'procedure' '%-.64s'" - rum "Procedura '%-.64s' are un numar incorect de parametri" - rus "îÅËÏÒÒÅËÔÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÐÁÒÁÍÅÔÒÏ× ÄÌÑ ÐÒÏÃÅÄÕÒÙ '%-.64s'" - serbian "Pogrešan broj parametara za proceduru '%-.64s'" - slo "Chybný poèet parametrov procedúry '%-.64s'" - spa "Equivocado parametro count para procedimiento %-.64s" - swe "Felaktigt antal parametrar till procedur %-.64s" - ukr "èÉÂÎÁ ˦ÌØ˦ÓÔØ ÐÁÒÁÍÅÔÒ¦× ÐÒÏÃÅÄÕÒÉ '%-.64s'" + cze "Chybn-Bý poèet parametrù procedury %-.192s" + dan "Forkert antal parametre til proceduren %-.192s" + nla "Foutief aantal parameters doorgegeven aan procedure %-.192s" + eng "Incorrect parameter count to procedure '%-.192s'" + est "Vale parameetrite hulk protseduurile '%-.192s'" + fre "Mauvais nombre de paramètres pour la procedure %-.192s" + ger "Falsche Parameterzahl für Prozedur '%-.192s'" + greek "ËÜèïò áñéèìüò ðáñáìÝôñùí óôç äéáäéêáóßá '%-.192s'" + hun "Rossz parameter a(z) '%-.192s'eljaras szamitasanal" + ita "Numero di parametri errato per la procedura '%-.192s'" + kor "'%-.192s' ¼öÇ๮¿¡ ´ëÇÑ ºÎÁ¤È®ÇÑ ÆĶó¸ÞÅÍ" + nor "Feil parameter antall til prosedyren %-.192s" + norwegian-ny "Feil parameter tal til prosedyra %-.192s" + pol "Incorrect parameter count to procedure %-.192s" + por "Número de parâmetros incorreto para a 'procedure' '%-.192s'" + rum "Procedura '%-.192s' are un numar incorect de parametri" + rus "îÅËÏÒÒÅËÔÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÐÁÒÁÍÅÔÒÏ× ÄÌÑ ÐÒÏÃÅÄÕÒÙ '%-.192s'" + serbian "Pogrešan broj parametara za proceduru '%-.192s'" + slo "Chybný poèet parametrov procedúry '%-.192s'" + spa "Equivocado parametro count para procedimiento %-.192s" + swe "Felaktigt antal parametrar till procedur %-.192s" + ukr "èÉÂÎÁ ˦ÌØ˦ÓÔØ ÐÁÒÁÍÅÔÒ¦× ÐÒÏÃÅÄÕÒÉ '%-.192s'" ER_WRONG_PARAMETERS_TO_PROCEDURE - cze "Chybn-Bé parametry procedury %-.64s" - dan "Forkert(e) parametre til proceduren %-.64s" - nla "Foutieve parameters voor procedure %-.64s" - eng "Incorrect parameters to procedure '%-.64s'" - est "Vigased parameetrid protseduurile '%-.64s'" - fre "Paramètre erroné pour la procedure %-.64s" - ger "Falsche Parameter für Prozedur '%-.64s'" - greek "ËÜèïò ðáñÜìåôñïé óôçí äéáäéêáóßá '%-.64s'" - hun "Rossz parameter a(z) '%-.64s' eljarasban" - ita "Parametri errati per la procedura '%-.64s'" - kor "'%-.64s' ¼öÇ๮¿¡ ´ëÇÑ ºÎÁ¤È®ÇÑ ÆĶó¸ÞÅÍ" - nor "Feil parametre til prosedyren %-.64s" - norwegian-ny "Feil parameter til prosedyra %-.64s" - pol "Incorrect parameters to procedure %-.64s" - por "Parâmetros incorretos para a 'procedure' '%-.64s'" - rum "Procedura '%-.64s' are parametrii incorecti" - rus "îÅËÏÒÒÅËÔÎÙÅ ÐÁÒÁÍÅÔÒÙ ÄÌÑ ÐÒÏÃÅÄÕÒÙ '%-.64s'" - serbian "Pogrešni parametri prosleðeni proceduri '%-.64s'" - slo "Chybné parametre procedúry '%-.64s'" - spa "Equivocados parametros para procedimiento %-.64s" - swe "Felaktiga parametrar till procedur %-.64s" - ukr "èÉÂÎÉÊ ÐÁÒÁÍÅÔÅÒ ÐÒÏÃÅÄÕÒÉ '%-.64s'" + cze "Chybn-Bé parametry procedury %-.192s" + dan "Forkert(e) parametre til proceduren %-.192s" + nla "Foutieve parameters voor procedure %-.192s" + eng "Incorrect parameters to procedure '%-.192s'" + est "Vigased parameetrid protseduurile '%-.192s'" + fre "Paramètre erroné pour la procedure %-.192s" + ger "Falsche Parameter für Prozedur '%-.192s'" + greek "ËÜèïò ðáñÜìåôñïé óôçí äéáäéêáóßá '%-.192s'" + hun "Rossz parameter a(z) '%-.192s' eljarasban" + ita "Parametri errati per la procedura '%-.192s'" + kor "'%-.192s' ¼öÇ๮¿¡ ´ëÇÑ ºÎÁ¤È®ÇÑ ÆĶó¸ÞÅÍ" + nor "Feil parametre til prosedyren %-.192s" + norwegian-ny "Feil parameter til prosedyra %-.192s" + pol "Incorrect parameters to procedure %-.192s" + por "Parâmetros incorretos para a 'procedure' '%-.192s'" + rum "Procedura '%-.192s' are parametrii incorecti" + rus "îÅËÏÒÒÅËÔÎÙÅ ÐÁÒÁÍÅÔÒÙ ÄÌÑ ÐÒÏÃÅÄÕÒÙ '%-.192s'" + serbian "Pogrešni parametri prosleðeni proceduri '%-.192s'" + slo "Chybné parametre procedúry '%-.192s'" + spa "Equivocados parametros para procedimiento %-.192s" + swe "Felaktiga parametrar till procedur %-.192s" + ukr "èÉÂÎÉÊ ÐÁÒÁÍÅÔÅÒ ÐÒÏÃÅÄÕÒÉ '%-.192s'" ER_UNKNOWN_TABLE 42S02 - cze "Nezn-Bámá tabulka '%-.64s' v %-.32s" - dan "Ukendt tabel '%-.64s' i %-.32s" - nla "Onbekende tabel '%-.64s' in %-.32s" - eng "Unknown table '%-.64s' in %-.32s" - est "Tundmatu tabel '%-.64s' %-.32s-s" - fre "Table inconnue '%-.64s' dans %-.32s" - ger "Unbekannte Tabelle '%-.64s' in '%-.32s'" - greek "Áãíùóôïò ðßíáêáò '%-.64s' óå %-.32s" - hun "Ismeretlen tabla: '%-.64s' %-.32s-ban" - ita "Tabella '%-.64s' sconosciuta in %-.32s" - jpn "Unknown table '%-.64s' in %-.32s" - kor "¾Ë¼ö ¾ø´Â Å×À̺í '%-.64s' (µ¥ÀÌŸº£À̽º %-.32s)" - nor "Ukjent tabell '%-.64s' i %-.32s" - norwegian-ny "Ukjend tabell '%-.64s' i %-.32s" - pol "Unknown table '%-.64s' in %-.32s" - por "Tabela '%-.64s' desconhecida em '%-.32s'" - rum "Tabla '%-.64s' invalida in %-.32s" - rus "îÅÉÚ×ÅÓÔÎÁÑ ÔÁÂÌÉÃÁ '%-.64s' × %-.32s" - serbian "Nepoznata tabela '%-.64s' u '%-.32s'" - slo "Neznáma tabuµka '%-.64s' v %-.32s" - spa "Tabla desconocida '%-.64s' in %-.32s" - swe "Okänd tabell '%-.64s' i '%-.32s'" - ukr "îÅצÄÏÍÁ ÔÁÂÌÉÃÑ '%-.64s' Õ %-.32s" + cze "Nezn-Bámá tabulka '%-.192s' v %-.32s" + dan "Ukendt tabel '%-.192s' i %-.32s" + nla "Onbekende tabel '%-.192s' in %-.32s" + eng "Unknown table '%-.192s' in %-.32s" + est "Tundmatu tabel '%-.192s' %-.32s-s" + fre "Table inconnue '%-.192s' dans %-.32s" + ger "Unbekannte Tabelle '%-.192s' in '%-.32s'" + greek "Áãíùóôïò ðßíáêáò '%-.192s' óå %-.32s" + hun "Ismeretlen tabla: '%-.192s' %-.32s-ban" + ita "Tabella '%-.192s' sconosciuta in %-.32s" + jpn "Unknown table '%-.192s' in %-.32s" + kor "¾Ë¼ö ¾ø´Â Å×À̺í '%-.192s' (µ¥ÀÌŸº£À̽º %-.32s)" + nor "Ukjent tabell '%-.192s' i %-.32s" + norwegian-ny "Ukjend tabell '%-.192s' i %-.32s" + pol "Unknown table '%-.192s' in %-.32s" + por "Tabela '%-.192s' desconhecida em '%-.32s'" + rum "Tabla '%-.192s' invalida in %-.32s" + rus "îÅÉÚ×ÅÓÔÎÁÑ ÔÁÂÌÉÃÁ '%-.192s' × %-.32s" + serbian "Nepoznata tabela '%-.192s' u '%-.32s'" + slo "Neznáma tabuµka '%-.192s' v %-.32s" + spa "Tabla desconocida '%-.192s' in %-.32s" + swe "Okänd tabell '%-.192s' i '%-.32s'" + ukr "îÅצÄÏÍÁ ÔÁÂÌÉÃÑ '%-.192s' Õ %-.32s" ER_FIELD_SPECIFIED_TWICE 42000 - cze "Polo-B¾ka '%-.64s' je zadána dvakrát" - dan "Feltet '%-.64s' er anvendt to gange" - nla "Veld '%-.64s' is dubbel gespecificeerd" - eng "Column '%-.64s' specified twice" - est "Tulp '%-.64s' on määratletud topelt" - fre "Champ '%-.64s' spécifié deux fois" - ger "Feld '%-.64s' wurde zweimal angegeben" - greek "Ôï ðåäßï '%-.64s' Ý÷åé ïñéóèåß äýï öïñÝò" - hun "A(z) '%-.64s' mezot ketszer definialta" - ita "Campo '%-.64s' specificato 2 volte" - kor "Ä®·³ '%-.64s'´Â µÎ¹ø Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù." - nor "Feltet '%-.64s' er spesifisert to ganger" - norwegian-ny "Feltet '%-.64s' er spesifisert to gangar" - pol "Field '%-.64s' specified twice" - por "Coluna '%-.64s' especificada duas vezes" - rum "Coloana '%-.64s' specificata de doua ori" - rus "óÔÏÌÂÅà '%-.64s' ÕËÁÚÁÎ Ä×ÁÖÄÙ" - serbian "Kolona '%-.64s' je navedena dva puta" - slo "Pole '%-.64s' je zadané dvakrát" - spa "Campo '%-.64s' especificado dos veces" - swe "Fält '%-.64s' är redan använt" - ukr "óÔÏ×ÂÅÃØ '%-.64s' ÚÁÚÎÁÞÅÎÏ Äצަ" + cze "Polo-B¾ka '%-.192s' je zadána dvakrát" + dan "Feltet '%-.192s' er anvendt to gange" + nla "Veld '%-.192s' is dubbel gespecificeerd" + eng "Column '%-.192s' specified twice" + est "Tulp '%-.192s' on määratletud topelt" + fre "Champ '%-.192s' spécifié deux fois" + ger "Feld '%-.192s' wurde zweimal angegeben" + greek "Ôï ðåäßï '%-.192s' Ý÷åé ïñéóèåß äýï öïñÝò" + hun "A(z) '%-.192s' mezot ketszer definialta" + ita "Campo '%-.192s' specificato 2 volte" + kor "Ä®·³ '%-.192s'´Â µÎ¹ø Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù." + nor "Feltet '%-.192s' er spesifisert to ganger" + norwegian-ny "Feltet '%-.192s' er spesifisert to gangar" + pol "Field '%-.192s' specified twice" + por "Coluna '%-.192s' especificada duas vezes" + rum "Coloana '%-.192s' specificata de doua ori" + rus "óÔÏÌÂÅà '%-.192s' ÕËÁÚÁÎ Ä×ÁÖÄÙ" + serbian "Kolona '%-.192s' je navedena dva puta" + slo "Pole '%-.192s' je zadané dvakrát" + spa "Campo '%-.192s' especificado dos veces" + swe "Fält '%-.192s' är redan använt" + ukr "óÔÏ×ÂÅÃØ '%-.192s' ÚÁÚÎÁÞÅÎÏ Äצަ" ER_INVALID_GROUP_FUNC_USE cze "Nespr-Bávné pou¾ití funkce group" dan "Forkert brug af grupperings-funktion" @@ -2670,28 +2670,28 @@ ER_INVALID_GROUP_FUNC_USE swe "Felaktig användning av SQL grupp function" ukr "èÉÂÎÅ ×ÉËÏÒÉÓÔÁÎÎÑ ÆÕÎËæ§ ÇÒÕÐÕ×ÁÎÎÑ" ER_UNSUPPORTED_EXTENSION 42000 - cze "Tabulka '%-.64s' pou-B¾ívá roz¹íøení, které v této verzi MySQL není" - dan "Tabellen '%-.64s' bruger et filtypenavn som ikke findes i denne MySQL version" - nla "Tabel '%-.64s' gebruikt een extensie, die niet in deze MySQL-versie voorkomt." - eng "Table '%-.64s' uses an extension that doesn't exist in this MySQL version" - est "Tabel '%-.64s' kasutab laiendust, mis ei eksisteeri antud MySQL versioonis" - fre "Table '%-.64s' : utilise une extension invalide pour cette version de MySQL" - ger "Tabelle '%-.64s' verwendet eine Erweiterung, die in dieser MySQL-Version nicht verfügbar ist" - greek "Ï ðßíáêò '%-.64s' ÷ñçóéìïðïéåß êÜðïéï extension ðïõ äåí õðÜñ÷åé óôçí Ýêäïóç áõôÞ ôçò MySQL" - hun "A(z) '%-.64s' tabla olyan bovitest hasznal, amely nem letezik ebben a MySQL versioban." - ita "La tabella '%-.64s' usa un'estensione che non esiste in questa versione di MySQL" - kor "Å×À̺í '%-.64s'´Â È®Àå¸í·ÉÀ» ÀÌ¿ëÇÏÁö¸¸ ÇöÀçÀÇ MySQL ¹öÁ¯¿¡¼´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù." - nor "Table '%-.64s' uses a extension that doesn't exist in this MySQL version" - norwegian-ny "Table '%-.64s' uses a extension that doesn't exist in this MySQL version" - pol "Table '%-.64s' uses a extension that doesn't exist in this MySQL version" - por "Tabela '%-.64s' usa uma extensão que não existe nesta versão do MySQL" - rum "Tabela '%-.64s' foloseste o extensire inexistenta in versiunea curenta de MySQL" - rus "÷ ÔÁÂÌÉÃÅ '%-.64s' ÉÓÐÏÌØÚÕÀÔÓÑ ×ÏÚÍÏÖÎÏÓÔÉ, ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÍÙÅ × ÜÔÏÊ ×ÅÒÓÉÉ MySQL" - serbian "Tabela '%-.64s' koristi ekstenziju koje ne postoji u ovoj verziji MySQL-a" - slo "Tabuµka '%-.64s' pou¾íva roz¹írenie, ktoré v tejto verzii MySQL nie je" - spa "Tabla '%-.64s' usa una extensión que no existe en esta MySQL versión" - swe "Tabell '%-.64s' har en extension som inte finns i denna version av MySQL" - ukr "ôÁÂÌÉÃÑ '%-.64s' ×ÉËÏÒÉÓÔÏ×Õ¤ ÒÏÚÛÉÒÅÎÎÑ, ÝÏ ÎÅ ¦ÓÎÕ¤ Õ Ã¦Ê ×ÅÒÓ¦§ MySQL" + cze "Tabulka '%-.192s' pou-B¾ívá roz¹íøení, které v této verzi MySQL není" + dan "Tabellen '%-.192s' bruger et filtypenavn som ikke findes i denne MySQL version" + nla "Tabel '%-.192s' gebruikt een extensie, die niet in deze MySQL-versie voorkomt." + eng "Table '%-.192s' uses an extension that doesn't exist in this MySQL version" + est "Tabel '%-.192s' kasutab laiendust, mis ei eksisteeri antud MySQL versioonis" + fre "Table '%-.192s' : utilise une extension invalide pour cette version de MySQL" + ger "Tabelle '%-.192s' verwendet eine Erweiterung, die in dieser MySQL-Version nicht verfügbar ist" + greek "Ï ðßíáêò '%-.192s' ÷ñçóéìïðïéåß êÜðïéï extension ðïõ äåí õðÜñ÷åé óôçí Ýêäïóç áõôÞ ôçò MySQL" + hun "A(z) '%-.192s' tabla olyan bovitest hasznal, amely nem letezik ebben a MySQL versioban." + ita "La tabella '%-.192s' usa un'estensione che non esiste in questa versione di MySQL" + kor "Å×À̺í '%-.192s'´Â È®Àå¸í·ÉÀ» ÀÌ¿ëÇÏÁö¸¸ ÇöÀçÀÇ MySQL ¹öÁ¯¿¡¼´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù." + nor "Table '%-.192s' uses a extension that doesn't exist in this MySQL version" + norwegian-ny "Table '%-.192s' uses a extension that doesn't exist in this MySQL version" + pol "Table '%-.192s' uses a extension that doesn't exist in this MySQL version" + por "Tabela '%-.192s' usa uma extensão que não existe nesta versão do MySQL" + rum "Tabela '%-.192s' foloseste o extensire inexistenta in versiunea curenta de MySQL" + rus "÷ ÔÁÂÌÉÃÅ '%-.192s' ÉÓÐÏÌØÚÕÀÔÓÑ ×ÏÚÍÏÖÎÏÓÔÉ, ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÍÙÅ × ÜÔÏÊ ×ÅÒÓÉÉ MySQL" + serbian "Tabela '%-.192s' koristi ekstenziju koje ne postoji u ovoj verziji MySQL-a" + slo "Tabuµka '%-.192s' pou¾íva roz¹írenie, ktoré v tejto verzii MySQL nie je" + spa "Tabla '%-.192s' usa una extensión que no existe en esta MySQL versión" + swe "Tabell '%-.192s' har en extension som inte finns i denna version av MySQL" + ukr "ôÁÂÌÉÃÑ '%-.192s' ×ÉËÏÒÉÓÔÏ×Õ¤ ÒÏÚÛÉÒÅÎÎÑ, ÝÏ ÎÅ ¦ÓÎÕ¤ Õ Ã¦Ê ×ÅÒÓ¦§ MySQL" ER_TABLE_MUST_HAVE_COLUMNS 42000 cze "Tabulka mus-Bí mít alespoò jeden sloupec" dan "En tabel skal have mindst een kolonne" @@ -2715,27 +2715,27 @@ ER_TABLE_MUST_HAVE_COLUMNS 42000 swe "Tabeller måste ha minst 1 kolumn" ukr "ôÁÂÌÉÃÑ ÐÏ×ÉÎÎÁ ÍÁÔÉ ÈÏÞÁ ÏÄÉÎ ÓÔÏ×ÂÅÃØ" ER_RECORD_FILE_FULL - cze "Tabulka '%-.64s' je pln-Bá" - dan "Tabellen '%-.64s' er fuld" - nla "De tabel '%-.64s' is vol" - eng "The table '%-.64s' is full" - jps "table '%-.64s' ‚Í‚¢‚Á‚Ï‚¢‚Å‚·", - est "Tabel '%-.64s' on täis" - fre "La table '%-.64s' est pleine" - ger "Tabelle '%-.64s' ist voll" - greek "Ï ðßíáêáò '%-.64s' åßíáé ãåìÜôïò" - hun "A '%-.64s' tabla megtelt" - ita "La tabella '%-.64s' e` piena" - jpn "table '%-.64s' ¤Ï¤¤¤Ã¤Ñ¤¤¤Ç¤¹" - kor "Å×À̺í '%-.64s'°¡ full³µ½À´Ï´Ù. " - por "Tabela '%-.64s' está cheia" - rum "Tabela '%-.64s' e plina" - rus "ôÁÂÌÉÃÁ '%-.64s' ÐÅÒÅÐÏÌÎÅÎÁ" - serbian "Tabela '%-.64s' je popunjena do kraja" - slo "Tabuµka '%-.64s' je plná" - spa "La tabla '%-.64s' está llena" - swe "Tabellen '%-.64s' är full" - ukr "ôÁÂÌÉÃÑ '%-.64s' ÚÁÐÏ×ÎÅÎÁ" + cze "Tabulka '%-.192s' je pln-Bá" + dan "Tabellen '%-.192s' er fuld" + nla "De tabel '%-.192s' is vol" + eng "The table '%-.192s' is full" + jps "table '%-.192s' ‚Í‚¢‚Á‚Ï‚¢‚Å‚·", + est "Tabel '%-.192s' on täis" + fre "La table '%-.192s' est pleine" + ger "Tabelle '%-.192s' ist voll" + greek "Ï ðßíáêáò '%-.192s' åßíáé ãåìÜôïò" + hun "A '%-.192s' tabla megtelt" + ita "La tabella '%-.192s' e` piena" + jpn "table '%-.192s' ¤Ï¤¤¤Ã¤Ñ¤¤¤Ç¤¹" + kor "Å×À̺í '%-.192s'°¡ full³µ½À´Ï´Ù. " + por "Tabela '%-.192s' está cheia" + rum "Tabela '%-.192s' e plina" + rus "ôÁÂÌÉÃÁ '%-.192s' ÐÅÒÅÐÏÌÎÅÎÁ" + serbian "Tabela '%-.192s' je popunjena do kraja" + slo "Tabuµka '%-.192s' je plná" + spa "La tabla '%-.192s' está llena" + swe "Tabellen '%-.192s' är full" + ukr "ôÁÂÌÉÃÑ '%-.192s' ÚÁÐÏ×ÎÅÎÁ" ER_UNKNOWN_CHARACTER_SET 42000 cze "Nezn-Bámá znaková sada: '%-.64s'" dan "Ukendt tegnsæt: '%-.64s'" @@ -2865,52 +2865,52 @@ ER_WRONG_OUTER_JOIN 42000 swe "Felaktigt referens i OUTER JOIN. Kontrollera ON-uttrycket" ukr "ðÅÒÅÈÒÅÓÎÁ ÚÁÌÅÖΦÓÔØ Õ OUTER JOIN. ðÅÒÅצÒÔÅ ÕÍÏ×Õ ON" ER_NULL_COLUMN_IN_INDEX 42000 - eng "Table handler doesn't support NULL in given index. Please change column '%-.64s' to be NOT NULL or use another handler" - swe "Tabell hanteraren kan inte indexera NULL kolumner för den givna index typen. Ändra '%-.64s' till NOT NULL eller använd en annan hanterare" + eng "Table handler doesn't support NULL in given index. Please change column '%-.192s' to be NOT NULL or use another handler" + swe "Tabell hanteraren kan inte indexera NULL kolumner för den givna index typen. Ändra '%-.192s' till NOT NULL eller använd en annan hanterare" ER_CANT_FIND_UDF - cze "Nemohu na-Bèíst funkci '%-.64s'" - dan "Kan ikke læse funktionen '%-.64s'" - nla "Kan functie '%-.64s' niet laden" - eng "Can't load function '%-.64s'" - jps "function '%-.64s' ‚ð ƒ[ƒh‚Å‚«‚Ü‚¹‚ñ", - est "Ei suuda avada funktsiooni '%-.64s'" - fre "Imposible de charger la fonction '%-.64s'" - ger "Kann Funktion '%-.64s' nicht laden" - greek "Äåí åßíáé äõíáôÞ ç äéáäéêáóßá load ãéá ôç óõíÜñôçóç '%-.64s'" - hun "A(z) '%-.64s' fuggveny nem toltheto be" - ita "Impossibile caricare la funzione '%-.64s'" - jpn "function '%-.64s' ¤ò ¥í¡¼¥É¤Ç¤¤Þ¤»¤ó" - kor "'%-.64s' ÇÔ¼ö¸¦ ·ÎµåÇÏÁö ¸øÇß½À´Ï´Ù." - por "Não pode carregar a função '%-.64s'" - rum "Nu pot incarca functia '%-.64s'" - rus "îÅ×ÏÚÍÏÖÎÏ ÚÁÇÒÕÚÉÔØ ÆÕÎËÃÉÀ '%-.64s'" - serbian "Ne mogu da uèitam funkciju '%-.64s'" - slo "Nemô¾em naèíta» funkciu '%-.64s'" - spa "No puedo cargar función '%-.64s'" - swe "Kan inte ladda funktionen '%-.64s'" - ukr "îÅ ÍÏÖÕ ÚÁ×ÁÎÔÁÖÉÔÉ ÆÕÎËæÀ '%-.64s'" + cze "Nemohu na-Bèíst funkci '%-.192s'" + dan "Kan ikke læse funktionen '%-.192s'" + nla "Kan functie '%-.192s' niet laden" + eng "Can't load function '%-.192s'" + jps "function '%-.192s' ‚ð ƒ[ƒh‚Å‚«‚Ü‚¹‚ñ", + est "Ei suuda avada funktsiooni '%-.192s'" + fre "Imposible de charger la fonction '%-.192s'" + ger "Kann Funktion '%-.192s' nicht laden" + greek "Äåí åßíáé äõíáôÞ ç äéáäéêáóßá load ãéá ôç óõíÜñôçóç '%-.192s'" + hun "A(z) '%-.192s' fuggveny nem toltheto be" + ita "Impossibile caricare la funzione '%-.192s'" + jpn "function '%-.192s' ¤ò ¥í¡¼¥É¤Ç¤¤Þ¤»¤ó" + kor "'%-.192s' ÇÔ¼ö¸¦ ·ÎµåÇÏÁö ¸øÇß½À´Ï´Ù." + por "Não pode carregar a função '%-.192s'" + rum "Nu pot incarca functia '%-.192s'" + rus "îÅ×ÏÚÍÏÖÎÏ ÚÁÇÒÕÚÉÔØ ÆÕÎËÃÉÀ '%-.192s'" + serbian "Ne mogu da uèitam funkciju '%-.192s'" + slo "Nemô¾em naèíta» funkciu '%-.192s'" + spa "No puedo cargar función '%-.192s'" + swe "Kan inte ladda funktionen '%-.192s'" + ukr "îÅ ÍÏÖÕ ÚÁ×ÁÎÔÁÖÉÔÉ ÆÕÎËæÀ '%-.192s'" ER_CANT_INITIALIZE_UDF - cze "Nemohu inicializovat funkci '%-.64s'; %-.80s" - dan "Kan ikke starte funktionen '%-.64s'; %-.80s" - nla "Kan functie '%-.64s' niet initialiseren; %-.80s" - eng "Can't initialize function '%-.64s'; %-.80s" - jps "function '%-.64s' ‚ð‰Šú‰»‚Å‚«‚Ü‚¹‚ñ; %-.80s", - est "Ei suuda algväärtustada funktsiooni '%-.64s'; %-.80s" - fre "Impossible d'initialiser la fonction '%-.64s'; %-.80s" - ger "Kann Funktion '%-.64s' nicht initialisieren: %-.80s" - greek "Äåí åßíáé äõíáôÞ ç Ýíáñîç ôçò óõíÜñôçóçò '%-.64s'; %-.80s" - hun "A(z) '%-.64s' fuggveny nem inicializalhato; %-.80s" - ita "Impossibile inizializzare la funzione '%-.64s'; %-.80s" - jpn "function '%-.64s' ¤ò½é´ü²½¤Ç¤¤Þ¤»¤ó; %-.80s" - kor "'%-.64s' ÇÔ¼ö¸¦ ÃʱâÈ ÇÏÁö ¸øÇß½À´Ï´Ù.; %-.80s" - por "Não pode inicializar a função '%-.64s' - '%-.80s'" - rum "Nu pot initializa functia '%-.64s'; %-.80s" - rus "îÅ×ÏÚÍÏÖÎÏ ÉÎÉÃÉÁÌÉÚÉÒÏ×ÁÔØ ÆÕÎËÃÉÀ '%-.64s'; %-.80s" - serbian "Ne mogu da inicijalizujem funkciju '%-.64s'; %-.80s" - slo "Nemô¾em inicializova» funkciu '%-.64s'; %-.80s" - spa "No puedo inicializar función '%-.64s'; %-.80s" - swe "Kan inte initialisera funktionen '%-.64s'; '%-.80s'" - ukr "îÅ ÍÏÖÕ ¦Î¦Ã¦Á̦ÚÕ×ÁÔÉ ÆÕÎËæÀ '%-.64s'; %-.80s" + cze "Nemohu inicializovat funkci '%-.192s'; %-.80s" + dan "Kan ikke starte funktionen '%-.192s'; %-.80s" + nla "Kan functie '%-.192s' niet initialiseren; %-.80s" + eng "Can't initialize function '%-.192s'; %-.80s" + jps "function '%-.192s' ‚ð‰Šú‰»‚Å‚«‚Ü‚¹‚ñ; %-.80s", + est "Ei suuda algväärtustada funktsiooni '%-.192s'; %-.80s" + fre "Impossible d'initialiser la fonction '%-.192s'; %-.80s" + ger "Kann Funktion '%-.192s' nicht initialisieren: %-.80s" + greek "Äåí åßíáé äõíáôÞ ç Ýíáñîç ôçò óõíÜñôçóçò '%-.192s'; %-.80s" + hun "A(z) '%-.192s' fuggveny nem inicializalhato; %-.80s" + ita "Impossibile inizializzare la funzione '%-.192s'; %-.80s" + jpn "function '%-.192s' ¤ò½é´ü²½¤Ç¤¤Þ¤»¤ó; %-.80s" + kor "'%-.192s' ÇÔ¼ö¸¦ ÃʱâÈ ÇÏÁö ¸øÇß½À´Ï´Ù.; %-.80s" + por "Não pode inicializar a função '%-.192s' - '%-.80s'" + rum "Nu pot initializa functia '%-.192s'; %-.80s" + rus "îÅ×ÏÚÍÏÖÎÏ ÉÎÉÃÉÁÌÉÚÉÒÏ×ÁÔØ ÆÕÎËÃÉÀ '%-.192s'; %-.80s" + serbian "Ne mogu da inicijalizujem funkciju '%-.192s'; %-.80s" + slo "Nemô¾em inicializova» funkciu '%-.192s'; %-.80s" + spa "No puedo inicializar función '%-.192s'; %-.80s" + swe "Kan inte initialisera funktionen '%-.192s'; '%-.80s'" + ukr "îÅ ÍÏÖÕ ¦Î¦Ã¦Á̦ÚÕ×ÁÔÉ ÆÕÎËæÀ '%-.192s'; %-.80s" ER_UDF_NO_PATHS cze "Pro sd-Bílenou knihovnu nejsou povoleny cesty" dan "Angivelse af sti ikke tilladt for delt bibliotek" @@ -2934,52 +2934,52 @@ ER_UDF_NO_PATHS swe "Man får inte ange sökväg för dynamiska bibliotek" ukr "îÅ ÄÏÚ×ÏÌÅÎÏ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÐÕÔ¦ ÄÌÑ ÒÏÚĦÌÀ×ÁÎÉÈ Â¦Â̦ÏÔÅË" ER_UDF_EXISTS - cze "Funkce '%-.64s' ji-B¾ existuje" - dan "Funktionen '%-.64s' findes allerede" - nla "Functie '%-.64s' bestaat reeds" - eng "Function '%-.64s' already exists" - jps "Function '%-.64s' ‚ÍŠù‚É’è‹`‚³‚ê‚Ä‚¢‚Ü‚·", - est "Funktsioon '%-.64s' juba eksisteerib" - fre "La fonction '%-.64s' existe déjà" - ger "Funktion '%-.64s' existiert schon" - greek "Ç óõíÜñôçóç '%-.64s' õðÜñ÷åé Þäç" - hun "A '%-.64s' fuggveny mar letezik" - ita "La funzione '%-.64s' esiste gia`" - jpn "Function '%-.64s' ¤Ï´û¤ËÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤¹" - kor "'%-.64s' ÇÔ¼ö´Â ÀÌ¹Ì Á¸ÀçÇÕ´Ï´Ù." - por "Função '%-.64s' já existe" - rum "Functia '%-.64s' exista deja" - rus "æÕÎËÃÉÑ '%-.64s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ" - serbian "Funkcija '%-.64s' veæ postoji" - slo "Funkcia '%-.64s' u¾ existuje" - spa "Función '%-.64s' ya existe" - swe "Funktionen '%-.64s' finns redan" - ukr "æÕÎËÃ¦Ñ '%-.64s' ×ÖÅ ¦ÓÎÕ¤" + cze "Funkce '%-.192s' ji-B¾ existuje" + dan "Funktionen '%-.192s' findes allerede" + nla "Functie '%-.192s' bestaat reeds" + eng "Function '%-.192s' already exists" + jps "Function '%-.192s' ‚ÍŠù‚É’è‹`‚³‚ê‚Ä‚¢‚Ü‚·", + est "Funktsioon '%-.192s' juba eksisteerib" + fre "La fonction '%-.192s' existe déjà" + ger "Funktion '%-.192s' existiert schon" + greek "Ç óõíÜñôçóç '%-.192s' õðÜñ÷åé Þäç" + hun "A '%-.192s' fuggveny mar letezik" + ita "La funzione '%-.192s' esiste gia`" + jpn "Function '%-.192s' ¤Ï´û¤ËÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤¹" + kor "'%-.192s' ÇÔ¼ö´Â ÀÌ¹Ì Á¸ÀçÇÕ´Ï´Ù." + por "Função '%-.192s' já existe" + rum "Functia '%-.192s' exista deja" + rus "æÕÎËÃÉÑ '%-.192s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "Funkcija '%-.192s' veæ postoji" + slo "Funkcia '%-.192s' u¾ existuje" + spa "Función '%-.192s' ya existe" + swe "Funktionen '%-.192s' finns redan" + ukr "æÕÎËÃ¦Ñ '%-.192s' ×ÖÅ ¦ÓÎÕ¤" ER_CANT_OPEN_LIBRARY - cze "Nemohu otev-Bøít sdílenou knihovnu '%-.64s' (errno: %d %-.128s)" - dan "Kan ikke åbne delt bibliotek '%-.64s' (errno: %d %-.128s)" - nla "Kan shared library '%-.64s' niet openen (Errcode: %d %-.128s)" - eng "Can't open shared library '%-.64s' (errno: %d %-.128s)" - jps "shared library '%-.64s' ‚ðŠJ‚Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d %-.128s)", - est "Ei suuda avada jagatud teeki '%-.64s' (veakood: %d %-.128s)" - fre "Impossible d'ouvrir la bibliothèque partagée '%-.64s' (errno: %d %-.128s)" - ger "Kann Shared Library '%-.64s' nicht öffnen (Fehler: %d %-.128s)" - greek "Äåí åßíáé äõíáôÞ ç áíÜãíùóç ôçò shared library '%-.64s' (êùäéêüò ëÜèïõò: %d %-.128s)" - hun "A(z) '%-.64s' megosztott konyvtar nem hasznalhato (hibakod: %d %-.128s)" - ita "Impossibile aprire la libreria condivisa '%-.64s' (errno: %d %-.128s)" - jpn "shared library '%-.64s' ¤ò³«¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d %-.128s)" - kor "'%-.64s' °øÀ¯ ¶óÀ̹ö·¯¸®¸¦ ¿¼ö ¾ø½À´Ï´Ù.(¿¡·¯¹øÈ£: %d %-.128s)" - nor "Can't open shared library '%-.64s' (errno: %d %-.128s)" - norwegian-ny "Can't open shared library '%-.64s' (errno: %d %-.128s)" - pol "Can't open shared library '%-.64s' (errno: %d %-.128s)" - por "Não pode abrir biblioteca compartilhada '%-.64s' (erro no. %d '%-.128s')" - rum "Nu pot deschide libraria shared '%-.64s' (Eroare: %d %-.128s)" - rus "îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÄÉÎÁÍÉÞÅÓËÕÀ ÂÉÂÌÉÏÔÅËÕ '%-.64s' (ÏÛÉÂËÁ: %d %-.128s)" - serbian "Ne mogu da otvorim share-ovanu biblioteku '%-.64s' (errno: %d %-.128s)" - slo "Nemô¾em otvori» zdieµanú kni¾nicu '%-.64s' (chybový kód: %d %-.128s)" - spa "No puedo abrir libraria conjugada '%-.64s' (errno: %d %-.128s)" - swe "Kan inte öppna det dynamiska biblioteket '%-.64s' (Felkod: %d %-.128s)" - ukr "îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÒÏÚĦÌÀ×ÁÎÕ Â¦Â̦ÏÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d %-.128s)" + cze "Nemohu otev-Bøít sdílenou knihovnu '%-.192s' (errno: %d %-.128s)" + dan "Kan ikke åbne delt bibliotek '%-.192s' (errno: %d %-.128s)" + nla "Kan shared library '%-.192s' niet openen (Errcode: %d %-.128s)" + eng "Can't open shared library '%-.192s' (errno: %d %-.128s)" + jps "shared library '%-.192s' ‚ðŠJ‚Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d %-.128s)", + est "Ei suuda avada jagatud teeki '%-.192s' (veakood: %d %-.128s)" + fre "Impossible d'ouvrir la bibliothèque partagée '%-.192s' (errno: %d %-.128s)" + ger "Kann Shared Library '%-.192s' nicht öffnen (Fehler: %d %-.128s)" + greek "Äåí åßíáé äõíáôÞ ç áíÜãíùóç ôçò shared library '%-.192s' (êùäéêüò ëÜèïõò: %d %-.128s)" + hun "A(z) '%-.192s' megosztott konyvtar nem hasznalhato (hibakod: %d %-.128s)" + ita "Impossibile aprire la libreria condivisa '%-.192s' (errno: %d %-.128s)" + jpn "shared library '%-.192s' ¤ò³«¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d %-.128s)" + kor "'%-.192s' °øÀ¯ ¶óÀ̹ö·¯¸®¸¦ ¿¼ö ¾ø½À´Ï´Ù.(¿¡·¯¹øÈ£: %d %-.128s)" + nor "Can't open shared library '%-.192s' (errno: %d %-.128s)" + norwegian-ny "Can't open shared library '%-.192s' (errno: %d %-.128s)" + pol "Can't open shared library '%-.192s' (errno: %d %-.128s)" + por "Não pode abrir biblioteca compartilhada '%-.192s' (erro no. %d '%-.128s')" + rum "Nu pot deschide libraria shared '%-.192s' (Eroare: %d %-.128s)" + rus "îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÄÉÎÁÍÉÞÅÓËÕÀ ÂÉÂÌÉÏÔÅËÕ '%-.192s' (ÏÛÉÂËÁ: %d %-.128s)" + serbian "Ne mogu da otvorim share-ovanu biblioteku '%-.192s' (errno: %d %-.128s)" + slo "Nemô¾em otvori» zdieµanú kni¾nicu '%-.192s' (chybový kód: %d %-.128s)" + spa "No puedo abrir libraria conjugada '%-.192s' (errno: %d %-.128s)" + swe "Kan inte öppna det dynamiska biblioteket '%-.192s' (Felkod: %d %-.128s)" + ukr "îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÒÏÚĦÌÀ×ÁÎÕ Â¦Â̦ÏÔÅËÕ '%-.192s' (ÐÏÍÉÌËÁ: %d %-.128s)" ER_CANT_FIND_DL_ENTRY cze "Nemohu naj-Bít funkci '%-.128s' v knihovnì" dan "Kan ikke finde funktionen '%-.128s' i bibliotek" @@ -3003,27 +3003,27 @@ ER_CANT_FIND_DL_ENTRY swe "Hittar inte funktionen '%-.128s' in det dynamiska biblioteket" ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ ÆÕÎËæÀ '%-.128s' Õ Â¦Â̦ÏÔÅæ" ER_FUNCTION_NOT_DEFINED - cze "Funkce '%-.64s' nen-Bí definována" - dan "Funktionen '%-.64s' er ikke defineret" - nla "Functie '%-.64s' is niet gedefinieerd" - eng "Function '%-.64s' is not defined" - jps "Function '%-.64s' ‚Í’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", - est "Funktsioon '%-.64s' ei ole defineeritud" - fre "La fonction '%-.64s' n'est pas définie" - ger "Funktion '%-.64s' ist nicht definiert" - greek "Ç óõíÜñôçóç '%-.64s' äåí Ý÷åé ïñéóèåß" - hun "A '%-.64s' fuggveny nem definialt" - ita "La funzione '%-.64s' non e` definita" - jpn "Function '%-.64s' ¤ÏÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" - kor "'%-.64s' ÇÔ¼ö°¡ Á¤ÀǵǾî ÀÖÁö ¾Ê½À´Ï´Ù." - por "Função '%-.64s' não está definida" - rum "Functia '%-.64s' nu e definita" - rus "æÕÎËÃÉÑ '%-.64s' ÎÅ ÏÐÒÅÄÅÌÅÎÁ" - serbian "Funkcija '%-.64s' nije definisana" - slo "Funkcia '%-.64s' nie je definovaná" - spa "Función '%-.64s' no está definida" - swe "Funktionen '%-.64s' är inte definierad" - ukr "æÕÎËæÀ '%-.64s' ÎÅ ×ÉÚÎÁÞÅÎÏ" + cze "Funkce '%-.192s' nen-Bí definována" + dan "Funktionen '%-.192s' er ikke defineret" + nla "Functie '%-.192s' is niet gedefinieerd" + eng "Function '%-.192s' is not defined" + jps "Function '%-.192s' ‚Í’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", + est "Funktsioon '%-.192s' ei ole defineeritud" + fre "La fonction '%-.192s' n'est pas définie" + ger "Funktion '%-.192s' ist nicht definiert" + greek "Ç óõíÜñôçóç '%-.192s' äåí Ý÷åé ïñéóèåß" + hun "A '%-.192s' fuggveny nem definialt" + ita "La funzione '%-.192s' non e` definita" + jpn "Function '%-.192s' ¤ÏÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "'%-.192s' ÇÔ¼ö°¡ Á¤ÀǵǾî ÀÖÁö ¾Ê½À´Ï´Ù." + por "Função '%-.192s' não está definida" + rum "Functia '%-.192s' nu e definita" + rus "æÕÎËÃÉÑ '%-.192s' ÎÅ ÏÐÒÅÄÅÌÅÎÁ" + serbian "Funkcija '%-.192s' nije definisana" + slo "Funkcia '%-.192s' nie je definovaná" + spa "Función '%-.192s' no está definida" + swe "Funktionen '%-.192s' är inte definierad" + ukr "æÕÎËæÀ '%-.192s' ÎÅ ×ÉÚÎÁÞÅÎÏ" ER_HOST_IS_BLOCKED cze "Stroj '%-.64s' je zablokov-Bán kvùli mnoha chybám pøi pøipojování. Odblokujete pou¾itím 'mysqladmin flush-hosts'" dan "Værten '%-.64s' er blokeret på grund af mange fejlforespørgsler. Lås op med 'mysqladmin flush-hosts'" @@ -3188,27 +3188,27 @@ ER_WRONG_VALUE_COUNT_ON_ROW 21S01 swe "Antalet kolumner motsvarar inte antalet värden på rad: %ld" ukr "ë¦ÌØ˦ÓÔØ ÓÔÏ×ÂÃ¦× ÎÅ ÓЦ×ÐÁÄÁ¤ Ú Ë¦ÌØ˦ÓÔÀ ÚÎÁÞÅÎØ Õ ÓÔÒÏæ %ld" ER_CANT_REOPEN_TABLE - cze "Nemohu znovuotev-Bøít tabulku: '%-.64s" - dan "Kan ikke genåbne tabel '%-.64s" - nla "Kan tabel niet opnieuw openen: '%-.64s" - eng "Can't reopen table: '%-.64s'" - est "Ei suuda taasavada tabelit '%-.64s'" - fre "Impossible de réouvrir la table: '%-.64s" - ger "Kann Tabelle'%-.64s' nicht erneut öffnen" - hun "Nem lehet ujra-megnyitni a tablat: '%-.64s" - ita "Impossibile riaprire la tabella: '%-.64s'" - kor "Å×À̺íÀ» ´Ù½Ã ¿¼ö ¾ø±º¿ä: '%-.64s" - nor "Can't reopen table: '%-.64s" - norwegian-ny "Can't reopen table: '%-.64s" - pol "Can't reopen table: '%-.64s" - por "Não pode reabrir a tabela '%-.64s" - rum "Nu pot redeschide tabela: '%-.64s'" - rus "îÅ×ÏÚÍÏÖÎÏ ÚÁÎÏ×Ï ÏÔËÒÙÔØ ÔÁÂÌÉÃÕ '%-.64s'" - serbian "Ne mogu da ponovo otvorim tabelu '%-.64s'" - slo "Can't reopen table: '%-.64s" - spa "No puedo reabrir tabla: '%-.64s" - swe "Kunde inte stänga och öppna tabell '%-.64s" - ukr "îÅ ÍÏÖÕ ÐÅÒÅצÄËÒÉÔÉ ÔÁÂÌÉÃÀ: '%-.64s'" + cze "Nemohu znovuotev-Bøít tabulku: '%-.192s" + dan "Kan ikke genåbne tabel '%-.192s" + nla "Kan tabel niet opnieuw openen: '%-.192s" + eng "Can't reopen table: '%-.192s'" + est "Ei suuda taasavada tabelit '%-.192s'" + fre "Impossible de réouvrir la table: '%-.192s" + ger "Kann Tabelle'%-.192s' nicht erneut öffnen" + hun "Nem lehet ujra-megnyitni a tablat: '%-.192s" + ita "Impossibile riaprire la tabella: '%-.192s'" + kor "Å×À̺íÀ» ´Ù½Ã ¿¼ö ¾ø±º¿ä: '%-.192s" + nor "Can't reopen table: '%-.192s" + norwegian-ny "Can't reopen table: '%-.192s" + pol "Can't reopen table: '%-.192s" + por "Não pode reabrir a tabela '%-.192s" + rum "Nu pot redeschide tabela: '%-.192s'" + rus "îÅ×ÏÚÍÏÖÎÏ ÚÁÎÏ×Ï ÏÔËÒÙÔØ ÔÁÂÌÉÃÕ '%-.192s'" + serbian "Ne mogu da ponovo otvorim tabelu '%-.192s'" + slo "Can't reopen table: '%-.192s" + spa "No puedo reabrir tabla: '%-.192s" + swe "Kunde inte stänga och öppna tabell '%-.192s" + ukr "îÅ ÍÏÖÕ ÐÅÒÅצÄËÒÉÔÉ ÔÁÂÌÉÃÀ: '%-.192s'" ER_INVALID_USE_OF_NULL 22004 cze "Neplatn-Bé u¾ití hodnoty NULL" dan "Forkert brug af nulværdi (NULL)" @@ -3266,65 +3266,65 @@ ER_MIX_OF_GROUP_FUNC_AND_FIELDS 42000 swe "Man får ha både GROUP-kolumner (MIN(),MAX(),COUNT()...) och fält i en fråga om man inte har en GROUP BY-del" ukr "úͦÛÕ×ÁÎÎÑ GROUP ÓÔÏ×ÂÃ¦× (MIN(),MAX(),COUNT()...) Ú ÎÅ GROUP ÓÔÏ×ÂÃÑÍÉ ¤ ÚÁÂÏÒÏÎÅÎÉÍ, ÑËÝÏ ÎÅ ÍÁ¤ GROUP BY" ER_NONEXISTING_GRANT 42000 - cze "Neexistuje odpov-Bídající grant pro u¾ivatele '%-.32s' na stroji '%-.64s'" - dan "Denne tilladelse findes ikke for brugeren '%-.32s' på vært '%-.64s'" - nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.32s' op host '%-.64s'" - eng "There is no such grant defined for user '%-.32s' on host '%-.64s'" - jps "ƒ†[ƒU[ '%-.32s' (ƒzƒXƒg '%-.64s' ‚̃†[ƒU[) ‚Í‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", - est "Sellist õigust ei ole defineeritud kasutajale '%-.32s' masinast '%-.64s'" - fre "Un tel droit n'est pas défini pour l'utilisateur '%-.32s' sur l'hôte '%-.64s'" - ger "Für Benutzer '%-.32s' auf Host '%-.64s' gibt es keine solche Berechtigung" - hun "A '%-.32s' felhasznalonak nincs ilyen joga a '%-.64s' host-on" - ita "GRANT non definita per l'utente '%-.32s' dalla macchina '%-.64s'" - jpn "¥æ¡¼¥¶¡¼ '%-.32s' (¥Û¥¹¥È '%-.64s' ¤Î¥æ¡¼¥¶¡¼) ¤Ïµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" - kor "»ç¿ëÀÚ '%-.32s' (È£½ºÆ® '%-.64s')¸¦ À§ÇÏ¿© Á¤ÀÇµÈ ±×·± ½ÂÀÎÀº ¾ø½À´Ï´Ù." - por "Não existe tal permissão (grant) definida para o usuário '%-.32s' no 'host' '%-.64s'" - rum "Nu exista un astfel de grant definit pentru utilzatorul '%-.32s' de pe host-ul '%-.64s'" - rus "ôÁËÉÅ ÐÒÁ×Á ÎÅ ÏÐÒÅÄÅÌÅÎÙ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' ÎÁ ÈÏÓÔÅ '%-.64s'" - serbian "Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s'" - spa "No existe permiso definido para usuario '%-.32s' en el servidor '%-.64s'" - swe "Det finns inget privilegium definierat för användare '%-.32s' på '%-.64s'" - ukr "ðÏ×ÎÏ×ÁÖÅÎØ ÎÅ ×ÉÚÎÁÞÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ '%-.32s' Ú ÈÏÓÔÕ '%-.64s'" + cze "Neexistuje odpov-Bídající grant pro u¾ivatele '%-.48s' na stroji '%-.64s'" + dan "Denne tilladelse findes ikke for brugeren '%-.48s' på vært '%-.64s'" + nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.48s' op host '%-.64s'" + eng "There is no such grant defined for user '%-.48s' on host '%-.64s'" + jps "ƒ†[ƒU[ '%-.48s' (ƒzƒXƒg '%-.64s' ‚̃†[ƒU[) ‚Í‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", + est "Sellist õigust ei ole defineeritud kasutajale '%-.48s' masinast '%-.64s'" + fre "Un tel droit n'est pas défini pour l'utilisateur '%-.48s' sur l'hôte '%-.64s'" + ger "Für Benutzer '%-.48s' auf Host '%-.64s' gibt es keine solche Berechtigung" + hun "A '%-.48s' felhasznalonak nincs ilyen joga a '%-.64s' host-on" + ita "GRANT non definita per l'utente '%-.48s' dalla macchina '%-.64s'" + jpn "¥æ¡¼¥¶¡¼ '%-.48s' (¥Û¥¹¥È '%-.64s' ¤Î¥æ¡¼¥¶¡¼) ¤Ïµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "»ç¿ëÀÚ '%-.48s' (È£½ºÆ® '%-.64s')¸¦ À§ÇÏ¿© Á¤ÀÇµÈ ±×·± ½ÂÀÎÀº ¾ø½À´Ï´Ù." + por "Não existe tal permissão (grant) definida para o usuário '%-.48s' no 'host' '%-.64s'" + rum "Nu exista un astfel de grant definit pentru utilzatorul '%-.48s' de pe host-ul '%-.64s'" + rus "ôÁËÉÅ ÐÒÁ×Á ÎÅ ÏÐÒÅÄÅÌÅÎÙ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.48s' ÎÁ ÈÏÓÔÅ '%-.64s'" + serbian "Ne postoji odobrenje za pristup korisniku '%-.48s' na host-u '%-.64s'" + spa "No existe permiso definido para usuario '%-.48s' en el servidor '%-.64s'" + swe "Det finns inget privilegium definierat för användare '%-.48s' på '%-.64s'" + ukr "ðÏ×ÎÏ×ÁÖÅÎØ ÎÅ ×ÉÚÎÁÞÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ '%-.48s' Ú ÈÏÓÔÕ '%-.64s'" ER_TABLEACCESS_DENIED_ERROR 42000 - cze "%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.32s'@'%-.64s' pro tabulku '%-.64s'" - dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for tabellen '%-.64s'" - nla "%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor tabel '%-.64s'" - eng "%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'" - jps "ƒRƒ}ƒ“ƒh %-.16s ‚Í ƒ†[ƒU[ '%-.32s'@'%-.64s' ,ƒe[ƒuƒ‹ '%-.64s' ‚ɑ΂µ‚Ä‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", - est "%-.16s käsk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tabelis '%-.64s'" - fre "La commande '%-.16s' est interdite à l'utilisateur: '%-.32s'@'@%-.64s' sur la table '%-.64s'" - ger "%-.16s Befehl nicht erlaubt für Benutzer '%-.32s'@'%-.64s' auf Tabelle '%-.64s'" - hun "%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' tablaban" - ita "Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla tabella '%-.64s'" - jpn "¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ,¥Æ¡¼¥Ö¥ë '%-.64s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" - kor "'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.32s'@'%-.64s' for Å×À̺í '%-.64s'" - por "Comando '%-.16s' negado para o usuário '%-.32s'@'%-.64s' na tabela '%-.64s'" - rum "Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru tabela '%-.64s'" - rus "ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.32s'@'%-.64s' ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'" - serbian "%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za tabelu '%-.64s'" - spa "%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para tabla '%-.64s'" - swe "%-.16s ej tillåtet för '%-.32s'@'%-.64s' för tabell '%-.64s'" - ukr "%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.32s'@'%-.64s' Õ ÔÁÂÌÉæ '%-.64s'" + cze "%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.48s'@'%-.64s' pro tabulku '%-.192s'" + dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.48s'@'%-.64s' for tabellen '%-.192s'" + nla "%-.16s commando geweigerd voor gebruiker: '%-.48s'@'%-.64s' voor tabel '%-.192s'" + eng "%-.16s command denied to user '%-.48s'@'%-.64s' for table '%-.192s'" + jps "ƒRƒ}ƒ“ƒh %-.16s ‚Í ƒ†[ƒU[ '%-.48s'@'%-.64s' ,ƒe[ƒuƒ‹ '%-.192s' ‚ɑ΂µ‚Ä‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", + est "%-.16s käsk ei ole lubatud kasutajale '%-.48s'@'%-.64s' tabelis '%-.192s'" + fre "La commande '%-.16s' est interdite à l'utilisateur: '%-.48s'@'@%-.64s' sur la table '%-.192s'" + ger "%-.16s Befehl nicht erlaubt für Benutzer '%-.48s'@'%-.64s' auf Tabelle '%-.192s'" + hun "%-.16s parancs a '%-.48s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.192s' tablaban" + ita "Comando %-.16s negato per l'utente: '%-.48s'@'%-.64s' sulla tabella '%-.192s'" + jpn "¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.48s'@'%-.64s' ,¥Æ¡¼¥Ö¥ë '%-.192s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.48s'@'%-.64s' for Å×À̺í '%-.192s'" + por "Comando '%-.16s' negado para o usuário '%-.48s'@'%-.64s' na tabela '%-.192s'" + rum "Comanda %-.16s interzisa utilizatorului: '%-.48s'@'%-.64s' pentru tabela '%-.192s'" + rus "ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.48s'@'%-.64s' ÄÌÑ ÔÁÂÌÉÃÙ '%-.192s'" + serbian "%-.16s komanda zabranjena za korisnika '%-.48s'@'%-.64s' za tabelu '%-.192s'" + spa "%-.16s comando negado para usuario: '%-.48s'@'%-.64s' para tabla '%-.192s'" + swe "%-.16s ej tillåtet för '%-.48s'@'%-.64s' för tabell '%-.192s'" + ukr "%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.48s'@'%-.64s' Õ ÔÁÂÌÉæ '%-.192s'" ER_COLUMNACCESS_DENIED_ERROR 42000 - cze "%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.32s'@'%-.64s' pro sloupec '%-.64s' v tabulce '%-.64s'" - dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for kolonne '%-.64s' in tabellen '%-.64s'" - nla "%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor kolom '%-.64s' in tabel '%-.64s'" - eng "%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'" - jps "ƒRƒ}ƒ“ƒh %-.16s ‚Í ƒ†[ƒU[ '%-.32s'@'%-.64s'\n ƒJƒ‰ƒ€ '%-.64s' ƒe[ƒuƒ‹ '%-.64s' ‚ɑ΂µ‚Ä‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", - est "%-.16s käsk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tulbale '%-.64s' tabelis '%-.64s'" - fre "La commande '%-.16s' est interdite à l'utilisateur: '%-.32s'@'@%-.64s' sur la colonne '%-.64s' de la table '%-.64s'" - ger "%-.16s Befehl nicht erlaubt für Benutzer '%-.32s'@'%-.64s' und Feld '%-.64s' in Tabelle '%-.64s'" - hun "%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' mezo eseten a '%-.64s' tablaban" - ita "Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla colonna '%-.64s' della tabella '%-.64s'" - jpn "¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s'\n ¥«¥é¥à '%-.64s' ¥Æ¡¼¥Ö¥ë '%-.64s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" - kor "'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.32s'@'%-.64s' for Ä®·³ '%-.64s' in Å×À̺í '%-.64s'" - por "Comando '%-.16s' negado para o usuário '%-.32s'@'%-.64s' na coluna '%-.64s', na tabela '%-.64s'" - rum "Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru coloana '%-.64s' in tabela '%-.64s'" - rus "ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.32s'@'%-.64s' ÄÌÑ ÓÔÏÌÂÃÁ '%-.64s' × ÔÁÂÌÉÃÅ '%-.64s'" - serbian "%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za kolonu '%-.64s' iz tabele '%-.64s'" - spa "%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para columna '%-.64s' en la tabla '%-.64s'" - swe "%-.16s ej tillåtet för '%-.32s'@'%-.64s' för kolumn '%-.64s' i tabell '%-.64s'" - ukr "%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.32s'@'%-.64s' ÄÌÑ ÓÔÏ×ÂÃÑ '%-.64s' Õ ÔÁÂÌÉæ '%-.64s'" + cze "%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.48s'@'%-.64s' pro sloupec '%-.192s' v tabulce '%-.192s'" + dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.48s'@'%-.64s' for kolonne '%-.192s' in tabellen '%-.192s'" + nla "%-.16s commando geweigerd voor gebruiker: '%-.48s'@'%-.64s' voor kolom '%-.192s' in tabel '%-.192s'" + eng "%-.16s command denied to user '%-.48s'@'%-.64s' for column '%-.192s' in table '%-.192s'" + jps "ƒRƒ}ƒ“ƒh %-.16s ‚Í ƒ†[ƒU[ '%-.48s'@'%-.64s'\n ƒJƒ‰ƒ€ '%-.192s' ƒe[ƒuƒ‹ '%-.192s' ‚ɑ΂µ‚Ä‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", + est "%-.16s käsk ei ole lubatud kasutajale '%-.48s'@'%-.64s' tulbale '%-.192s' tabelis '%-.192s'" + fre "La commande '%-.16s' est interdite à l'utilisateur: '%-.48s'@'@%-.64s' sur la colonne '%-.192s' de la table '%-.192s'" + ger "%-.16s Befehl nicht erlaubt für Benutzer '%-.48s'@'%-.64s' und Feld '%-.192s' in Tabelle '%-.192s'" + hun "%-.16s parancs a '%-.48s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.192s' mezo eseten a '%-.192s' tablaban" + ita "Comando %-.16s negato per l'utente: '%-.48s'@'%-.64s' sulla colonna '%-.192s' della tabella '%-.192s'" + jpn "¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.48s'@'%-.64s'\n ¥«¥é¥à '%-.192s' ¥Æ¡¼¥Ö¥ë '%-.192s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.48s'@'%-.64s' for Ä®·³ '%-.192s' in Å×À̺í '%-.192s'" + por "Comando '%-.16s' negado para o usuário '%-.48s'@'%-.64s' na coluna '%-.192s', na tabela '%-.192s'" + rum "Comanda %-.16s interzisa utilizatorului: '%-.48s'@'%-.64s' pentru coloana '%-.192s' in tabela '%-.192s'" + rus "ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.48s'@'%-.64s' ÄÌÑ ÓÔÏÌÂÃÁ '%-.192s' × ÔÁÂÌÉÃÅ '%-.192s'" + serbian "%-.16s komanda zabranjena za korisnika '%-.48s'@'%-.64s' za kolonu '%-.192s' iz tabele '%-.192s'" + spa "%-.16s comando negado para usuario: '%-.48s'@'%-.64s' para columna '%-.192s' en la tabla '%-.192s'" + swe "%-.16s ej tillåtet för '%-.48s'@'%-.64s' för kolumn '%-.192s' i tabell '%-.192s'" + ukr "%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.48s'@'%-.64s' ÄÌÑ ÓÔÏ×ÂÃÑ '%-.192s' Õ ÔÁÂÌÉæ '%-.192s'" ER_ILLEGAL_GRANT_FOR_TABLE 42000 cze "Neplatn-Bý pøíkaz GRANT/REVOKE. Prosím, pøeètìte si v manuálu, jaká privilegia je mo¾né pou¾ít." dan "Forkert GRANT/REVOKE kommando. Se i brugervejledningen hvilke privilegier der kan specificeres." @@ -3368,46 +3368,46 @@ ER_GRANT_WRONG_HOST_OR_USER 42000 swe "Felaktigt maskinnamn eller användarnamn använt med GRANT" ukr "áÒÇÕÍÅÎÔ host ÁÂÏ user ÄÌÑ GRANT ÚÁÄÏ×ÇÉÊ" ER_NO_SUCH_TABLE 42S02 - cze "Tabulka '%-.64s.%-.64s' neexistuje" - dan "Tabellen '%-.64s.%-.64s' eksisterer ikke" - nla "Tabel '%-.64s.%-.64s' bestaat niet" - eng "Table '%-.64s.%-.64s' doesn't exist" - est "Tabelit '%-.64s.%-.64s' ei eksisteeri" - fre "La table '%-.64s.%-.64s' n'existe pas" - ger "Tabelle '%-.64s.%-.64s' existiert nicht" - hun "A '%-.64s.%-.64s' tabla nem letezik" - ita "La tabella '%-.64s.%-.64s' non esiste" - jpn "Table '%-.64s.%-.64s' doesn't exist" - kor "Å×À̺í '%-.64s.%-.64s' ´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù." - nor "Table '%-.64s.%-.64s' doesn't exist" - norwegian-ny "Table '%-.64s.%-.64s' doesn't exist" - pol "Table '%-.64s.%-.64s' doesn't exist" - por "Tabela '%-.64s.%-.64s' não existe" - rum "Tabela '%-.64s.%-.64s' nu exista" - rus "ôÁÂÌÉÃÁ '%-.64s.%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ" - serbian "Tabela '%-.64s.%-.64s' ne postoji" - slo "Table '%-.64s.%-.64s' doesn't exist" - spa "Tabla '%-.64s.%-.64s' no existe" - swe "Det finns ingen tabell som heter '%-.64s.%-.64s'" - ukr "ôÁÂÌÉÃÑ '%-.64s.%-.64s' ÎÅ ¦ÓÎÕ¤" + cze "Tabulka '%-.192s.%-.192s' neexistuje" + dan "Tabellen '%-.192s.%-.192s' eksisterer ikke" + nla "Tabel '%-.192s.%-.192s' bestaat niet" + eng "Table '%-.192s.%-.192s' doesn't exist" + est "Tabelit '%-.192s.%-.192s' ei eksisteeri" + fre "La table '%-.192s.%-.192s' n'existe pas" + ger "Tabelle '%-.192s.%-.192s' existiert nicht" + hun "A '%-.192s.%-.192s' tabla nem letezik" + ita "La tabella '%-.192s.%-.192s' non esiste" + jpn "Table '%-.192s.%-.192s' doesn't exist" + kor "Å×À̺í '%-.192s.%-.192s' ´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù." + nor "Table '%-.192s.%-.192s' doesn't exist" + norwegian-ny "Table '%-.192s.%-.192s' doesn't exist" + pol "Table '%-.192s.%-.192s' doesn't exist" + por "Tabela '%-.192s.%-.192s' não existe" + rum "Tabela '%-.192s.%-.192s' nu exista" + rus "ôÁÂÌÉÃÁ '%-.192s.%-.192s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "Tabela '%-.192s.%-.192s' ne postoji" + slo "Table '%-.192s.%-.192s' doesn't exist" + spa "Tabla '%-.192s.%-.192s' no existe" + swe "Det finns ingen tabell som heter '%-.192s.%-.192s'" + ukr "ôÁÂÌÉÃÑ '%-.192s.%-.192s' ÎÅ ¦ÓÎÕ¤" ER_NONEXISTING_TABLE_GRANT 42000 - cze "Neexistuje odpov-Bídající grant pro u¾ivatele '%-.32s' na stroji '%-.64s' pro tabulku '%-.64s'" - dan "Denne tilladelse eksisterer ikke for brugeren '%-.32s' på vært '%-.64s' for tabellen '%-.64s'" - nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.32s' op host '%-.64s' op tabel '%-.64s'" - eng "There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'" - est "Sellist õigust ei ole defineeritud kasutajale '%-.32s' masinast '%-.64s' tabelile '%-.64s'" - fre "Un tel droit n'est pas défini pour l'utilisateur '%-.32s' sur l'hôte '%-.64s' sur la table '%-.64s'" - ger "Eine solche Berechtigung ist für User '%-.32s' auf Host '%-.64s' an Tabelle '%-.64s' nicht definiert" - hun "A '%-.32s' felhasznalo szamara a '%-.64s' host '%-.64s' tablajaban ez a parancs nem engedelyezett" - ita "GRANT non definita per l'utente '%-.32s' dalla macchina '%-.64s' sulla tabella '%-.64s'" - kor "»ç¿ëÀÚ '%-.32s'(È£½ºÆ® '%-.64s')´Â Å×À̺í '%-.64s'¸¦ »ç¿ëÇϱâ À§ÇÏ¿© Á¤ÀÇµÈ ½ÂÀÎÀº ¾ø½À´Ï´Ù. " - por "Não existe tal permissão (grant) definido para o usuário '%-.32s' no 'host' '%-.64s', na tabela '%-.64s'" - rum "Nu exista un astfel de privilegiu (grant) definit pentru utilizatorul '%-.32s' de pe host-ul '%-.64s' pentru tabela '%-.64s'" - rus "ôÁËÉÅ ÐÒÁ×Á ÎÅ ÏÐÒÅÄÅÌÅÎÙ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' ÎÁ ËÏÍÐØÀÔÅÒÅ '%-.64s' ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'" - serbian "Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s' tabeli '%-.64s'" - spa "No existe tal permiso definido para usuario '%-.32s' en el servidor '%-.64s' en la tabla '%-.64s'" - swe "Det finns inget privilegium definierat för användare '%-.32s' på '%-.64s' för tabell '%-.64s'" - ukr "ðÏ×ÎÏ×ÁÖÅÎØ ÎÅ ×ÉÚÎÁÞÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ '%-.32s' Ú ÈÏÓÔÕ '%-.64s' ÄÌÑ ÔÁÂÌÉæ '%-.64s'" + cze "Neexistuje odpov-Bídající grant pro u¾ivatele '%-.48s' na stroji '%-.64s' pro tabulku '%-.192s'" + dan "Denne tilladelse eksisterer ikke for brugeren '%-.48s' på vært '%-.64s' for tabellen '%-.192s'" + nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.48s' op host '%-.64s' op tabel '%-.192s'" + eng "There is no such grant defined for user '%-.48s' on host '%-.64s' on table '%-.192s'" + est "Sellist õigust ei ole defineeritud kasutajale '%-.48s' masinast '%-.64s' tabelile '%-.192s'" + fre "Un tel droit n'est pas défini pour l'utilisateur '%-.48s' sur l'hôte '%-.64s' sur la table '%-.192s'" + ger "Eine solche Berechtigung ist für User '%-.48s' auf Host '%-.64s' an Tabelle '%-.192s' nicht definiert" + hun "A '%-.48s' felhasznalo szamara a '%-.64s' host '%-.192s' tablajaban ez a parancs nem engedelyezett" + ita "GRANT non definita per l'utente '%-.48s' dalla macchina '%-.64s' sulla tabella '%-.192s'" + kor "»ç¿ëÀÚ '%-.48s'(È£½ºÆ® '%-.64s')´Â Å×À̺í '%-.192s'¸¦ »ç¿ëÇϱâ À§ÇÏ¿© Á¤ÀÇµÈ ½ÂÀÎÀº ¾ø½À´Ï´Ù. " + por "Não existe tal permissão (grant) definido para o usuário '%-.48s' no 'host' '%-.64s', na tabela '%-.192s'" + rum "Nu exista un astfel de privilegiu (grant) definit pentru utilizatorul '%-.48s' de pe host-ul '%-.64s' pentru tabela '%-.192s'" + rus "ôÁËÉÅ ÐÒÁ×Á ÎÅ ÏÐÒÅÄÅÌÅÎÙ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.48s' ÎÁ ËÏÍÐØÀÔÅÒÅ '%-.64s' ÄÌÑ ÔÁÂÌÉÃÙ '%-.192s'" + serbian "Ne postoji odobrenje za pristup korisniku '%-.48s' na host-u '%-.64s' tabeli '%-.192s'" + spa "No existe tal permiso definido para usuario '%-.48s' en el servidor '%-.64s' en la tabla '%-.192s'" + swe "Det finns inget privilegium definierat för användare '%-.48s' på '%-.64s' för tabell '%-.192s'" + ukr "ðÏ×ÎÏ×ÁÖÅÎØ ÎÅ ×ÉÚÎÁÞÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ '%-.48s' Ú ÈÏÓÔÕ '%-.64s' ÄÌÑ ÔÁÂÌÉæ '%-.192s'" ER_NOT_ALLOWED_COMMAND 42000 cze "Pou-B¾itý pøíkaz není v této verzi MySQL povolen" dan "Den brugte kommando er ikke tilladt med denne udgave af MySQL" @@ -3451,23 +3451,23 @@ ER_SYNTAX_ERROR 42000 swe "Du har något fel i din syntax" ukr "õ ×ÁÓ ÐÏÍÉÌËÁ Õ ÓÉÎÔÁËÓÉÓ¦ SQL" ER_DELAYED_CANT_CHANGE_LOCK - cze "Zpo-B¾dìný insert threadu nebyl schopen získat po¾adovaný zámek pro tabulku %-.64s" - dan "Forsinket indsættelse tråden (delayed insert thread) kunne ikke opnå lås på tabellen %-.64s" - nla "'Delayed insert' thread kon de aangevraagde 'lock' niet krijgen voor tabel %-.64s" - eng "Delayed insert thread couldn't get requested lock for table %-.64s" - est "INSERT DELAYED lõim ei suutnud saada soovitud lukku tabelile %-.64s" - fre "La tâche 'delayed insert' n'a pas pu obtenir le verrou démandé sur la table %-.64s" - ger "Verzögerter (DELAYED) Einfüge-Thread konnte die angeforderte Sperre für Tabelle '%-.64s' nicht erhalten" - hun "A kesleltetett beillesztes (delayed insert) thread nem kapott zatolast a %-.64s tablahoz" - ita "Il thread di inserimento ritardato non riesce ad ottenere il lock per la tabella %-.64s" - kor "Áö¿¬µÈ insert ¾²·¹µå°¡ Å×À̺í %-.64sÀÇ ¿ä±¸µÈ ¶ôÅ·À» ó¸®ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù." - por "'Thread' de inserção retardada (atrasada) pois não conseguiu obter a trava solicitada para tabela '%-.64s'" - rum "Thread-ul pentru inserarea aminata nu a putut obtine lacatul (lock) pentru tabela %-.64s" - rus "ðÏÔÏË, ÏÂÓÌÕÖÉ×ÁÀÝÉÊ ÏÔÌÏÖÅÎÎÕÀ ×ÓÔÁ×ËÕ (delayed insert), ÎÅ ÓÍÏÇ ÐÏÌÕÞÉÔØ ÚÁÐÒÁÛÉ×ÁÅÍÕÀ ÂÌÏËÉÒÏ×ËÕ ÎÁ ÔÁÂÌÉÃÕ %-.64s" - serbian "Prolongirani 'INSERT' thread nije mogao da dobije traženo zakljuèavanje tabele '%-.64s'" - spa "Thread de inserción retarda no pudiendo bloquear para la tabla %-.64s" - swe "DELAYED INSERT-tråden kunde inte låsa tabell '%-.64s'" - ukr "ç¦ÌËÁ ÄÌÑ INSERT DELAYED ÎÅ ÍÏÖÅ ÏÔÒÉÍÁÔÉ ÂÌÏËÕ×ÁÎÎÑ ÄÌÑ ÔÁÂÌÉæ %-.64s" + cze "Zpo-B¾dìný insert threadu nebyl schopen získat po¾adovaný zámek pro tabulku %-.192s" + dan "Forsinket indsættelse tråden (delayed insert thread) kunne ikke opnå lås på tabellen %-.192s" + nla "'Delayed insert' thread kon de aangevraagde 'lock' niet krijgen voor tabel %-.192s" + eng "Delayed insert thread couldn't get requested lock for table %-.192s" + est "INSERT DELAYED lõim ei suutnud saada soovitud lukku tabelile %-.192s" + fre "La tâche 'delayed insert' n'a pas pu obtenir le verrou démandé sur la table %-.192s" + ger "Verzögerter (DELAYED) Einfüge-Thread konnte die angeforderte Sperre für Tabelle '%-.192s' nicht erhalten" + hun "A kesleltetett beillesztes (delayed insert) thread nem kapott zatolast a %-.192s tablahoz" + ita "Il thread di inserimento ritardato non riesce ad ottenere il lock per la tabella %-.192s" + kor "Áö¿¬µÈ insert ¾²·¹µå°¡ Å×À̺í %-.192sÀÇ ¿ä±¸µÈ ¶ôÅ·À» ó¸®ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù." + por "'Thread' de inserção retardada (atrasada) pois não conseguiu obter a trava solicitada para tabela '%-.192s'" + rum "Thread-ul pentru inserarea aminata nu a putut obtine lacatul (lock) pentru tabela %-.192s" + rus "ðÏÔÏË, ÏÂÓÌÕÖÉ×ÁÀÝÉÊ ÏÔÌÏÖÅÎÎÕÀ ×ÓÔÁ×ËÕ (delayed insert), ÎÅ ÓÍÏÇ ÐÏÌÕÞÉÔØ ÚÁÐÒÁÛÉ×ÁÅÍÕÀ ÂÌÏËÉÒÏ×ËÕ ÎÁ ÔÁÂÌÉÃÕ %-.192s" + serbian "Prolongirani 'INSERT' thread nije mogao da dobije traženo zakljuèavanje tabele '%-.192s'" + spa "Thread de inserción retarda no pudiendo bloquear para la tabla %-.192s" + swe "DELAYED INSERT-tråden kunde inte låsa tabell '%-.192s'" + ukr "ç¦ÌËÁ ÄÌÑ INSERT DELAYED ÎÅ ÍÏÖÅ ÏÔÒÉÍÁÔÉ ÂÌÏËÕ×ÁÎÎÑ ÄÌÑ ÔÁÂÌÉæ %-.192s" ER_TOO_MANY_DELAYED_THREADS cze "P-Bøíli¹ mnoho zpo¾dìných threadù" dan "For mange slettede tråde (threads) i brug" @@ -3487,28 +3487,28 @@ ER_TOO_MANY_DELAYED_THREADS swe "Det finns redan 'max_delayed_threads' trådar i använding" ukr "úÁÂÁÇÁÔÏ ÚÁÔÒÉÍÁÎÉÈ Ç¦ÌÏË ×ÉËÏÒÉÓÔÏ×Õ¤ÔØÓÑ" ER_ABORTING_CONNECTION 08S01 - cze "Zru-B¹eno spojení %ld do databáze: '%-.64s' u¾ivatel: '%-.32s' (%-.64s)" - dan "Afbrudt forbindelse %ld til database: '%-.64s' bruger: '%-.32s' (%-.64s)" - nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.32s' (%-.64s)" - eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" - est "Ühendus katkestatud %ld andmebaasile: '%-.64s' kasutajale: '%-.32s' (%-.64s)" - fre "Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.32s' (%-.64s)" - ger "Abbruch der Verbindung %ld zur Datenbank '%-.64s'. Benutzer: '%-.32s' (%-.64s)" - hun "Megszakitott kapcsolat %ld db: '%-.64s' adatbazishoz, felhasznalo: '%-.32s' (%-.64s)" - ita "Interrotta la connessione %ld al db: '%-.64s' utente: '%-.32s' (%-.64s)" - jpn "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" - kor "µ¥ÀÌŸº£À̽º Á¢¼ÓÀ» À§ÇÑ ¿¬°á %ld°¡ Áß´ÜµÊ : '%-.64s' »ç¿ëÀÚ: '%-.32s' (%-.64s)" - nor "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" - norwegian-ny "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" - pol "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" - por "Conexão %ld abortou para o banco de dados '%-.64s' - usuário '%-.32s' (%-.64s)" - rum "Conectie terminata %ld la baza de date: '%-.64s' utilizator: '%-.32s' (%-.64s)" - rus "ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' (%-.64s)" - serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' (%-.64s)" - slo "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" - spa "Conexión abortada %ld para db: '%-.64s' usuario: '%-.32s' (%-.64s)" - swe "Avbröt länken för tråd %ld till db '%-.64s', användare '%-.32s' (%-.64s)" - ukr "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s' (%-.64s)" + cze "Zru-B¹eno spojení %ld do databáze: '%-.192s' u¾ivatel: '%-.48s' (%-.64s)" + dan "Afbrudt forbindelse %ld til database: '%-.192s' bruger: '%-.48s' (%-.64s)" + nla "Afgebroken verbinding %ld naar db: '%-.192s' gebruiker: '%-.48s' (%-.64s)" + eng "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)" + est "Ühendus katkestatud %ld andmebaasile: '%-.192s' kasutajale: '%-.48s' (%-.64s)" + fre "Connection %ld avortée vers la bd: '%-.192s' utilisateur: '%-.48s' (%-.64s)" + ger "Abbruch der Verbindung %ld zur Datenbank '%-.192s'. Benutzer: '%-.48s' (%-.64s)" + hun "Megszakitott kapcsolat %ld db: '%-.192s' adatbazishoz, felhasznalo: '%-.48s' (%-.64s)" + ita "Interrotta la connessione %ld al db: '%-.192s' utente: '%-.48s' (%-.64s)" + jpn "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)" + kor "µ¥ÀÌŸº£À̽º Á¢¼ÓÀ» À§ÇÑ ¿¬°á %ld°¡ Áß´ÜµÊ : '%-.192s' »ç¿ëÀÚ: '%-.48s' (%-.64s)" + nor "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)" + norwegian-ny "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)" + pol "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)" + por "Conexão %ld abortou para o banco de dados '%-.192s' - usuário '%-.48s' (%-.64s)" + rum "Conectie terminata %ld la baza de date: '%-.192s' utilizator: '%-.48s' (%-.64s)" + rus "ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.192s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.48s' (%-.64s)" + serbian "Prekinuta konekcija broj %ld ka bazi: '%-.192s' korisnik je bio: '%-.48s' (%-.64s)" + slo "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)" + spa "Conexión abortada %ld para db: '%-.192s' usuario: '%-.48s' (%-.64s)" + swe "Avbröt länken för tråd %ld till db '%-.192s', användare '%-.48s' (%-.64s)" + ukr "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.192s' ËÏÒÉÓÔÕ×ÁÞÁ: '%-.48s' (%-.64s)" ER_NET_PACKET_TOO_LARGE 08S01 cze "Zji-B¹tìn pøíchozí packet del¹í ne¾ 'max_allowed_packet'" dan "Modtog en datapakke som var større end 'max_allowed_packet'" @@ -3723,29 +3723,29 @@ ER_TABLE_CANT_HANDLE_AUTO_INCREMENT 42000 swe "Den använda tabelltypen kan inte hantera AUTO_INCREMENT-kolumner" ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ AUTO_INCREMENT ÓÔÏ×Âæ" ER_DELAYED_INSERT_TABLE_LOCKED - cze "INSERT DELAYED nen-Bí mo¾no s tabulkou '%-.64s' pou¾ít, proto¾e je zamèená pomocí LOCK TABLES" - dan "INSERT DELAYED kan ikke bruges med tabellen '%-.64s', fordi tabellen er låst med LOCK TABLES" - nla "INSERT DELAYED kan niet worden gebruikt bij table '%-.64s', vanwege een 'lock met LOCK TABLES" - eng "INSERT DELAYED can't be used with table '%-.64s' because it is locked with LOCK TABLES" - est "INSERT DELAYED ei saa kasutada tabeli '%-.64s' peal, kuna see on lukustatud LOCK TABLES käsuga" - fre "INSERT DELAYED ne peut être utilisé avec la table '%-.64s', car elle est verrouée avec LOCK TABLES" - ger "INSERT DELAYED kann für Tabelle '%-.64s' nicht verwendet werden, da sie mit LOCK TABLES gesperrt ist" - greek "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" - hun "Az INSERT DELAYED nem hasznalhato a '%-.64s' tablahoz, mert a tabla zarolt (LOCK TABLES)" - ita "L'inserimento ritardato (INSERT DELAYED) non puo` essere usato con la tabella '%-.64s', perche` soggetta a lock da 'LOCK TABLES'" - jpn "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" - kor "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" - nor "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" - norwegian-ny "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" - pol "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" - por "INSERT DELAYED não pode ser usado com a tabela '%-.64s', porque ela está travada com LOCK TABLES" - rum "INSERT DELAYED nu poate fi folosit cu tabela '%-.64s', deoarece este locked folosing LOCK TABLES" - rus "îÅÌØÚÑ ÉÓÐÏÌØÚÏ×ÁÔØ INSERT DELAYED ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s', ÐÏÔÏÍÕ ÞÔÏ ÏÎÁ ÚÁÂÌÏËÉÒÏ×ÁÎÁ Ó ÐÏÍÏÝØÀ LOCK TABLES" - serbian "Komanda 'INSERT DELAYED' ne može biti iskorištena u tabeli '%-.64s', zbog toga što je zakljuèana komandom 'LOCK TABLES'" - slo "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" - spa "INSERT DELAYED no puede ser usado con tablas '%-.64s', porque esta bloqueada con LOCK TABLES" - swe "INSERT DELAYED kan inte användas med tabell '%-.64s', emedan den är låst med LOCK TABLES" - ukr "INSERT DELAYED ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÏ Ú ÔÁÂÌÉÃÅÀ '%-.64s', ÔÏÍÕ ÝÏ §§ ÚÁÂÌÏËÏ×ÁÎÏ Ú LOCK TABLES" + cze "INSERT DELAYED nen-Bí mo¾no s tabulkou '%-.192s' pou¾ít, proto¾e je zamèená pomocí LOCK TABLES" + dan "INSERT DELAYED kan ikke bruges med tabellen '%-.192s', fordi tabellen er låst med LOCK TABLES" + nla "INSERT DELAYED kan niet worden gebruikt bij table '%-.192s', vanwege een 'lock met LOCK TABLES" + eng "INSERT DELAYED can't be used with table '%-.192s' because it is locked with LOCK TABLES" + est "INSERT DELAYED ei saa kasutada tabeli '%-.192s' peal, kuna see on lukustatud LOCK TABLES käsuga" + fre "INSERT DELAYED ne peut être utilisé avec la table '%-.192s', car elle est verrouée avec LOCK TABLES" + ger "INSERT DELAYED kann für Tabelle '%-.192s' nicht verwendet werden, da sie mit LOCK TABLES gesperrt ist" + greek "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES" + hun "Az INSERT DELAYED nem hasznalhato a '%-.192s' tablahoz, mert a tabla zarolt (LOCK TABLES)" + ita "L'inserimento ritardato (INSERT DELAYED) non puo` essere usato con la tabella '%-.192s', perche` soggetta a lock da 'LOCK TABLES'" + jpn "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES" + kor "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES" + nor "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES" + norwegian-ny "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES" + pol "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES" + por "INSERT DELAYED não pode ser usado com a tabela '%-.192s', porque ela está travada com LOCK TABLES" + rum "INSERT DELAYED nu poate fi folosit cu tabela '%-.192s', deoarece este locked folosing LOCK TABLES" + rus "îÅÌØÚÑ ÉÓÐÏÌØÚÏ×ÁÔØ INSERT DELAYED ÄÌÑ ÔÁÂÌÉÃÙ '%-.192s', ÐÏÔÏÍÕ ÞÔÏ ÏÎÁ ÚÁÂÌÏËÉÒÏ×ÁÎÁ Ó ÐÏÍÏÝØÀ LOCK TABLES" + serbian "Komanda 'INSERT DELAYED' ne može biti iskorištena u tabeli '%-.192s', zbog toga što je zakljuèana komandom 'LOCK TABLES'" + slo "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES" + spa "INSERT DELAYED no puede ser usado con tablas '%-.192s', porque esta bloqueada con LOCK TABLES" + swe "INSERT DELAYED kan inte användas med tabell '%-.192s', emedan den är låst med LOCK TABLES" + ukr "INSERT DELAYED ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÏ Ú ÔÁÂÌÉÃÅÀ '%-.192s', ÔÏÍÕ ÝÏ §§ ÚÁÂÌÏËÏ×ÁÎÏ Ú LOCK TABLES" ER_WRONG_COLUMN_NAME 42000 cze "Nespr-Bávné jméno sloupce '%-.100s'" dan "Forkert kolonnenavn '%-.100s'" @@ -3764,29 +3764,29 @@ ER_WRONG_COLUMN_NAME 42000 swe "Felaktigt kolumnnamn '%-.100s'" ukr "îÅצÒÎÅ ¦Í'Ñ ÓÔÏ×ÂÃÑ '%-.100s'" ER_WRONG_KEY_COLUMN 42000 - cze "Handler pou-B¾ité tabulky neumí indexovat sloupce '%-.64s'" - dan "Den brugte tabeltype kan ikke indeksere kolonnen '%-.64s'" - nla "De gebruikte tabel 'handler' kan kolom '%-.64s' niet indexeren" - eng "The used storage engine can't index column '%-.64s'" - est "Tabelihandler ei oska indekseerida tulpa '%-.64s'" - fre "Le handler de la table ne peut indexé la colonne '%-.64s'" - ger "Die verwendete Speicher-Engine kann die Spalte '%-.64s' nicht indizieren" - greek "The used table handler can't index column '%-.64s'" - hun "A hasznalt tablakezelo nem tudja a '%-.64s' mezot indexelni" - ita "Il gestore delle tabelle non puo` indicizzare la colonna '%-.64s'" - jpn "The used table handler can't index column '%-.64s'" - kor "The used table handler can't index column '%-.64s'" - nor "The used table handler can't index column '%-.64s'" - norwegian-ny "The used table handler can't index column '%-.64s'" - pol "The used table handler can't index column '%-.64s'" - por "O manipulador de tabela usado não pode indexar a coluna '%-.64s'" - rum "Handler-ul tabelei folosite nu poate indexa coloana '%-.64s'" - rus "éÓÐÏÌØÚÏ×ÁÎÎÙÊ ÏÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÍÏÖÅÔ ÐÒÏÉÎÄÅËÓÉÒÏ×ÁÔØ ÓÔÏÌÂÅà '%-.64s'" - serbian "Handler tabele ne može da indeksira kolonu '%-.64s'" - slo "The used table handler can't index column '%-.64s'" - spa "El manipulador de tabla usado no puede indexar columna '%-.64s'" - swe "Den använda tabelltypen kan inte indexera kolumn '%-.64s'" - ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ×ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ÎÅ ÍÏÖÅ ¦ÎÄÅËÓÕ×ÁÔÉ ÓÔÏ×ÂÅÃØ '%-.64s'" + cze "Handler pou-B¾ité tabulky neumí indexovat sloupce '%-.192s'" + dan "Den brugte tabeltype kan ikke indeksere kolonnen '%-.192s'" + nla "De gebruikte tabel 'handler' kan kolom '%-.192s' niet indexeren" + eng "The used storage engine can't index column '%-.192s'" + est "Tabelihandler ei oska indekseerida tulpa '%-.192s'" + fre "Le handler de la table ne peut indexé la colonne '%-.192s'" + ger "Die verwendete Speicher-Engine kann die Spalte '%-.192s' nicht indizieren" + greek "The used table handler can't index column '%-.192s'" + hun "A hasznalt tablakezelo nem tudja a '%-.192s' mezot indexelni" + ita "Il gestore delle tabelle non puo` indicizzare la colonna '%-.192s'" + jpn "The used table handler can't index column '%-.192s'" + kor "The used table handler can't index column '%-.192s'" + nor "The used table handler can't index column '%-.192s'" + norwegian-ny "The used table handler can't index column '%-.192s'" + pol "The used table handler can't index column '%-.192s'" + por "O manipulador de tabela usado não pode indexar a coluna '%-.192s'" + rum "Handler-ul tabelei folosite nu poate indexa coloana '%-.192s'" + rus "éÓÐÏÌØÚÏ×ÁÎÎÙÊ ÏÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÍÏÖÅÔ ÐÒÏÉÎÄÅËÓÉÒÏ×ÁÔØ ÓÔÏÌÂÅà '%-.192s'" + serbian "Handler tabele ne može da indeksira kolonu '%-.192s'" + slo "The used table handler can't index column '%-.192s'" + spa "El manipulador de tabla usado no puede indexar columna '%-.192s'" + swe "Den använda tabelltypen kan inte indexera kolumn '%-.192s'" + ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ×ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ÎÅ ÍÏÖÅ ¦ÎÄÅËÓÕ×ÁÔÉ ÓÔÏ×ÂÅÃØ '%-.192s'" ER_WRONG_MRG_TABLE cze "V-B¹echny tabulky v MERGE tabulce nejsou definovány stejnì" dan "Tabellerne i MERGE er ikke defineret ens" @@ -3811,46 +3811,46 @@ ER_WRONG_MRG_TABLE swe "Tabellerna i MERGE-tabellen är inte identiskt definierade" ukr "ôÁÂÌÉæ Õ MERGE TABLE ÍÁÀÔØ Ò¦ÚÎÕ ÓÔÒÕËÔÕÒÕ" ER_DUP_UNIQUE 23000 - cze "Kv-Bùli unique constraintu nemozu zapsat do tabulky '%-.64s'" - dan "Kan ikke skrive til tabellen '%-.64s' fordi det vil bryde CONSTRAINT regler" - nla "Kan niet opslaan naar table '%-.64s' vanwege 'unique' beperking" - eng "Can't write, because of unique constraint, to table '%-.64s'" - est "Ei suuda kirjutada tabelisse '%-.64s', kuna see rikub ühesuse kitsendust" - fre "Écriture impossible à cause d'un index UNIQUE sur la table '%-.64s'" - ger "Schreiben in Tabelle '%-.64s' nicht möglich wegen einer Eindeutigkeitsbeschränkung (unique constraint)" - hun "A '%-.64s' nem irhato, az egyedi mezok miatt" - ita "Impossibile scrivere nella tabella '%-.64s' per limitazione di unicita`" - por "Não pode gravar, devido à restrição UNIQUE, na tabela '%-.64s'" - rum "Nu pot scrie pe hard-drive, din cauza constraintului unic (unique constraint) pentru tabela '%-.64s'" - rus "îÅ×ÏÚÍÏÖÎÏ ÚÁÐÉÓÁÔØ × ÔÁÂÌÉÃÕ '%-.64s' ÉÚ-ÚÁ ÏÇÒÁÎÉÞÅÎÉÊ ÕÎÉËÁÌØÎÏÇÏ ËÌÀÞÁ" - serbian "Zbog provere jedinstvenosti ne mogu da upišem podatke u tabelu '%-.64s'" - spa "No puedo escribir, debido al único constraint, para tabla '%-.64s'" - swe "Kan inte skriva till tabell '%-.64s'; UNIQUE-test" - ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ ÄÏ ÔÁÂÌÉæ '%-.64s', Ú ÐÒÉÞÉÎÉ ×ÉÍÏÇ ÕΦËÁÌØÎÏÓÔ¦" + cze "Kv-Bùli unique constraintu nemozu zapsat do tabulky '%-.192s'" + dan "Kan ikke skrive til tabellen '%-.192s' fordi det vil bryde CONSTRAINT regler" + nla "Kan niet opslaan naar table '%-.192s' vanwege 'unique' beperking" + eng "Can't write, because of unique constraint, to table '%-.192s'" + est "Ei suuda kirjutada tabelisse '%-.192s', kuna see rikub ühesuse kitsendust" + fre "Écriture impossible à cause d'un index UNIQUE sur la table '%-.192s'" + ger "Schreiben in Tabelle '%-.192s' nicht möglich wegen einer Eindeutigkeitsbeschränkung (unique constraint)" + hun "A '%-.192s' nem irhato, az egyedi mezok miatt" + ita "Impossibile scrivere nella tabella '%-.192s' per limitazione di unicita`" + por "Não pode gravar, devido à restrição UNIQUE, na tabela '%-.192s'" + rum "Nu pot scrie pe hard-drive, din cauza constraintului unic (unique constraint) pentru tabela '%-.192s'" + rus "îÅ×ÏÚÍÏÖÎÏ ÚÁÐÉÓÁÔØ × ÔÁÂÌÉÃÕ '%-.192s' ÉÚ-ÚÁ ÏÇÒÁÎÉÞÅÎÉÊ ÕÎÉËÁÌØÎÏÇÏ ËÌÀÞÁ" + serbian "Zbog provere jedinstvenosti ne mogu da upišem podatke u tabelu '%-.192s'" + spa "No puedo escribir, debido al único constraint, para tabla '%-.192s'" + swe "Kan inte skriva till tabell '%-.192s'; UNIQUE-test" + ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ ÄÏ ÔÁÂÌÉæ '%-.192s', Ú ÐÒÉÞÉÎÉ ×ÉÍÏÇ ÕΦËÁÌØÎÏÓÔ¦" ER_BLOB_KEY_WITHOUT_LENGTH 42000 - cze "BLOB sloupec '%-.64s' je pou-B¾it ve specifikaci klíèe bez délky" - dan "BLOB kolonnen '%-.64s' brugt i nøglespecifikation uden nøglelængde" - nla "BLOB kolom '%-.64s' gebruikt in zoeksleutel specificatie zonder zoeksleutel lengte" - eng "BLOB/TEXT column '%-.64s' used in key specification without a key length" - est "BLOB-tüüpi tulp '%-.64s' on kasutusel võtmes ilma pikkust määratlemata" - fre "La colonne '%-.64s' de type BLOB est utilisée dans une définition d'index sans longueur d'index" - ger "BLOB- oder TEXT-Spalte '%-.64s' wird in der Schlüsseldefinition ohne Schlüssellängenangabe verwendet" - greek "BLOB column '%-.64s' used in key specification without a key length" - hun "BLOB mezo '%-.64s' hasznalt a mezo specifikacioban, a mezohossz megadasa nelkul" - ita "La colonna '%-.64s' di tipo BLOB e` usata in una chiave senza specificarne la lunghezza" - jpn "BLOB column '%-.64s' used in key specification without a key length" - kor "BLOB column '%-.64s' used in key specification without a key length" - nor "BLOB column '%-.64s' used in key specification without a key length" - norwegian-ny "BLOB column '%-.64s' used in key specification without a key length" - pol "BLOB column '%-.64s' used in key specification without a key length" - por "Coluna BLOB '%-.64s' usada na especificação de chave sem o comprimento da chave" - rum "Coloana BLOB '%-.64s' este folosita in specificarea unei chei fara ca o lungime de cheie sa fie folosita" - rus "óÔÏÌÂÅà ÔÉÐÁ BLOB '%-.64s' ÂÙÌ ÕËÁÚÁÎ × ÏÐÒÅÄÅÌÅÎÉÉ ËÌÀÞÁ ÂÅÚ ÕËÁÚÁÎÉÑ ÄÌÉÎÙ ËÌÀÞÁ" - serbian "BLOB kolona '%-.64s' je upotrebljena u specifikaciji kljuèa bez navoðenja dužine kljuèa" - slo "BLOB column '%-.64s' used in key specification without a key length" - spa "Columna BLOB column '%-.64s' usada en especificación de clave sin tamaño de la clave" - swe "Du har inte angett någon nyckellängd för BLOB '%-.64s'" - ukr "óÔÏ×ÂÅÃØ BLOB '%-.64s' ×ÉËÏÒÉÓÔÁÎÏ Õ ×ÉÚÎÁÞÅÎΦ ËÌÀÞÁ ÂÅÚ ×ËÁÚÁÎÎÑ ÄÏ×ÖÉÎÉ ËÌÀÞÁ" + cze "BLOB sloupec '%-.192s' je pou-B¾it ve specifikaci klíèe bez délky" + dan "BLOB kolonnen '%-.192s' brugt i nøglespecifikation uden nøglelængde" + nla "BLOB kolom '%-.192s' gebruikt in zoeksleutel specificatie zonder zoeksleutel lengte" + eng "BLOB/TEXT column '%-.192s' used in key specification without a key length" + est "BLOB-tüüpi tulp '%-.192s' on kasutusel võtmes ilma pikkust määratlemata" + fre "La colonne '%-.192s' de type BLOB est utilisée dans une définition d'index sans longueur d'index" + ger "BLOB- oder TEXT-Spalte '%-.192s' wird in der Schlüsseldefinition ohne Schlüssellängenangabe verwendet" + greek "BLOB column '%-.192s' used in key specification without a key length" + hun "BLOB mezo '%-.192s' hasznalt a mezo specifikacioban, a mezohossz megadasa nelkul" + ita "La colonna '%-.192s' di tipo BLOB e` usata in una chiave senza specificarne la lunghezza" + jpn "BLOB column '%-.192s' used in key specification without a key length" + kor "BLOB column '%-.192s' used in key specification without a key length" + nor "BLOB column '%-.192s' used in key specification without a key length" + norwegian-ny "BLOB column '%-.192s' used in key specification without a key length" + pol "BLOB column '%-.192s' used in key specification without a key length" + por "Coluna BLOB '%-.192s' usada na especificação de chave sem o comprimento da chave" + rum "Coloana BLOB '%-.192s' este folosita in specificarea unei chei fara ca o lungime de cheie sa fie folosita" + rus "óÔÏÌÂÅà ÔÉÐÁ BLOB '%-.192s' ÂÙÌ ÕËÁÚÁÎ × ÏÐÒÅÄÅÌÅÎÉÉ ËÌÀÞÁ ÂÅÚ ÕËÁÚÁÎÉÑ ÄÌÉÎÙ ËÌÀÞÁ" + serbian "BLOB kolona '%-.192s' je upotrebljena u specifikaciji kljuèa bez navoðenja dužine kljuèa" + slo "BLOB column '%-.192s' used in key specification without a key length" + spa "Columna BLOB column '%-.192s' usada en especificación de clave sin tamaño de la clave" + swe "Du har inte angett någon nyckellängd för BLOB '%-.192s'" + ukr "óÔÏ×ÂÅÃØ BLOB '%-.192s' ×ÉËÏÒÉÓÔÁÎÏ Õ ×ÉÚÎÁÞÅÎΦ ËÌÀÞÁ ÂÅÚ ×ËÁÚÁÎÎÑ ÄÏ×ÖÉÎÉ ËÌÀÞÁ" ER_PRIMARY_CANT_HAVE_NULL 42000 cze "V-B¹echny èásti primárního klíèe musejí být NOT NULL; pokud potøebujete NULL, pou¾ijte UNIQUE" dan "Alle dele af en PRIMARY KEY skal være NOT NULL; Hvis du skal bruge NULL i nøglen, brug UNIQUE istedet" @@ -3936,21 +3936,21 @@ ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE swe "Du använder 'säker uppdateringsmod' och försökte uppdatera en tabell utan en WHERE-sats som använder sig av en nyckel" ukr "÷É Õ ÒÅÖÉͦ ÂÅÚÐÅÞÎÏÇÏ ÏÎÏ×ÌÅÎÎÑ ÔÁ ÎÁÍÁÇÁ¤ÔÅÓØ ÏÎÏ×ÉÔÉ ÔÁÂÌÉÃÀ ÂÅÚ ÏÐÅÒÁÔÏÒÁ WHERE, ÝÏ ×ÉËÏÒÉÓÔÏ×Õ¤ KEY ÓÔÏ×ÂÅÃØ" ER_KEY_DOES_NOT_EXITS 42000 S1009 - cze "Kl-Bíè '%-.64s' v tabulce '%-.64s' neexistuje" - dan "Nøglen '%-.64s' eksisterer ikke i tabellen '%-.64s'" - nla "Zoeksleutel '%-.64s' bestaat niet in tabel '%-.64s'" - eng "Key '%-.64s' doesn't exist in table '%-.64s'" - est "Võti '%-.64s' ei eksisteeri tabelis '%-.64s'" - fre "L'index '%-.64s' n'existe pas sur la table '%-.64s'" - ger "Schlüssel '%-.64s' existiert in der Tabelle '%-.64s' nicht" - hun "A '%-.64s' kulcs nem letezik a '%-.64s' tablaban" - ita "La chiave '%-.64s' non esiste nella tabella '%-.64s'" - por "Chave '%-.64s' não existe na tabela '%-.64s'" - rus "ëÌÀÞ '%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ × ÔÁÂÌÉÃÅ '%-.64s'" - serbian "Kljuè '%-.64s' ne postoji u tabeli '%-.64s'" - spa "Clave '%-.64s' no existe en la tabla '%-.64s'" - swe "Nyckel '%-.64s' finns inte in tabell '%-.64s'" - ukr "ëÌÀÞ '%-.64s' ÎÅ ¦ÓÎÕ¤ × ÔÁÂÌÉæ '%-.64s'" + cze "Kl-Bíè '%-.192s' v tabulce '%-.192s' neexistuje" + dan "Nøglen '%-.192s' eksisterer ikke i tabellen '%-.192s'" + nla "Zoeksleutel '%-.192s' bestaat niet in tabel '%-.192s'" + eng "Key '%-.192s' doesn't exist in table '%-.192s'" + est "Võti '%-.192s' ei eksisteeri tabelis '%-.192s'" + fre "L'index '%-.192s' n'existe pas sur la table '%-.192s'" + ger "Schlüssel '%-.192s' existiert in der Tabelle '%-.192s' nicht" + hun "A '%-.192s' kulcs nem letezik a '%-.192s' tablaban" + ita "La chiave '%-.192s' non esiste nella tabella '%-.192s'" + por "Chave '%-.192s' não existe na tabela '%-.192s'" + rus "ëÌÀÞ '%-.192s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ × ÔÁÂÌÉÃÅ '%-.192s'" + serbian "Kljuè '%-.192s' ne postoji u tabeli '%-.192s'" + spa "Clave '%-.192s' no existe en la tabla '%-.192s'" + swe "Nyckel '%-.192s' finns inte in tabell '%-.192s'" + ukr "ëÌÀÞ '%-.192s' ÎÅ ¦ÓÎÕ¤ × ÔÁÂÌÉæ '%-.192s'" ER_CHECK_NO_SUCH_TABLE 42000 cze "Nemohu otev-Bøít tabulku" dan "Kan ikke åbne tabellen" @@ -4072,20 +4072,20 @@ ER_ERROR_DURING_CHECKPOINT swe "Fick fel %d vid CHECKPOINT" ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ CHECKPOINT" ER_NEW_ABORTING_CONNECTION 08S01 - cze "Spojen-Bí %ld do databáze: '%-.64s' u¾ivatel: '%-.32s' stroj: '%-.64s' (%-.64s) bylo pøeru¹eno" - dan "Afbrød forbindelsen %ld til databasen '%-.64s' bruger: '%-.32s' vært: '%-.64s' (%-.64s)" - nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.32s' host: '%-.64s' (%-.64s)" - eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: '%-.64s' (%-.64s)" - est "Ühendus katkestatud %ld andmebaas: '%-.64s' kasutaja: '%-.32s' masin: '%-.64s' (%-.64s)" - fre "Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.32s' hôte: '%-.64s' (%-.64s)" - ger "Abbruch der Verbindung %ld zur Datenbank '%-.64s'. Benutzer: '%-.32s', Host: '%-.64s' (%-.64s)" - ita "Interrotta la connessione %ld al db: ''%-.64s' utente: '%-.32s' host: '%-.64s' (%-.64s)" - por "Conexão %ld abortada para banco de dados '%-.64s' - usuário '%-.32s' - 'host' '%-.64s' ('%-.64s')" - rus "ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' Ó ÈÏÓÔÁ '%-.64s' (%-.64s)" - serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' a host: '%-.64s' (%-.64s)" - spa "Abortada conexión %ld para db: '%-.64s' usuario: '%-.32s' servidor: '%-.64s' (%-.64s)" - swe "Avbröt länken för tråd %ld till db '%-.64s', användare '%-.32s', host '%-.64s' (%-.64s)" - ukr "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞ: '%-.32s' ÈÏÓÔ: '%-.64s' (%-.64s)" + cze "Spojen-Bí %ld do databáze: '%-.192s' u¾ivatel: '%-.48s' stroj: '%-.64s' (%-.64s) bylo pøeru¹eno" + dan "Afbrød forbindelsen %ld til databasen '%-.192s' bruger: '%-.48s' vært: '%-.64s' (%-.64s)" + nla "Afgebroken verbinding %ld naar db: '%-.192s' gebruiker: '%-.48s' host: '%-.64s' (%-.64s)" + eng "Aborted connection %ld to db: '%-.192s' user: '%-.48s' host: '%-.64s' (%-.64s)" + est "Ühendus katkestatud %ld andmebaas: '%-.192s' kasutaja: '%-.48s' masin: '%-.64s' (%-.64s)" + fre "Connection %ld avortée vers la bd: '%-.192s' utilisateur: '%-.48s' hôte: '%-.64s' (%-.64s)" + ger "Abbruch der Verbindung %ld zur Datenbank '%-.192s'. Benutzer: '%-.48s', Host: '%-.64s' (%-.64s)" + ita "Interrotta la connessione %ld al db: ''%-.192s' utente: '%-.48s' host: '%-.64s' (%-.64s)" + por "Conexão %ld abortada para banco de dados '%-.192s' - usuário '%-.48s' - 'host' '%-.64s' ('%-.64s')" + rus "ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.192s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.48s' Ó ÈÏÓÔÁ '%-.64s' (%-.64s)" + serbian "Prekinuta konekcija broj %ld ka bazi: '%-.192s' korisnik je bio: '%-.48s' a host: '%-.64s' (%-.64s)" + spa "Abortada conexión %ld para db: '%-.192s' usuario: '%-.48s' servidor: '%-.64s' (%-.64s)" + swe "Avbröt länken för tråd %ld till db '%-.192s', användare '%-.48s', host '%-.64s' (%-.64s)" + ukr "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.192s' ËÏÒÉÓÔÕ×ÁÞ: '%-.48s' ÈÏÓÔ: '%-.64s' (%-.64s)" ER_DUMP_NOT_IMPLEMENTED cze "Handler tabulky nepodporuje bin-Bární dump" dan "Denne tabeltype unserstøtter ikke binært tabeldump" @@ -4110,20 +4110,20 @@ ER_FLUSH_MASTER_BINLOG_CLOSED serbian "Binarni log file zatvoren, ne mogu da izvršim komandu 'RESET MASTER'" ukr "òÅÐ̦ËÁæÊÎÉÊ ÌÏÇ ÚÁËÒÉÔÏ, ÎÅ ÍÏÖÕ ×ÉËÏÎÁÔÉ RESET MASTER" ER_INDEX_REBUILD - cze "P-Bøebudování indexu dumpnuté tabulky '%-.64s' nebylo úspì¹né" - dan "Kunne ikke genopbygge indekset for den dumpede tabel '%-.64s'" - nla "Gefaald tijdens heropbouw index van gedumpte tabel '%-.64s'" - eng "Failed rebuilding the index of dumped table '%-.64s'" - fre "La reconstruction de l'index de la table copiée '%-.64s' a échoué" - ger "Neuerstellung des Index der Dump-Tabelle '%-.64s' fehlgeschlagen" - greek "Failed rebuilding the index of dumped table '%-.64s'" - hun "Failed rebuilding the index of dumped table '%-.64s'" - ita "Fallita la ricostruzione dell'indice della tabella copiata '%-.64s'" - por "Falhou na reconstrução do índice da tabela 'dumped' '%-.64s'" - rus "ïÛÉÂËÁ ÐÅÒÅÓÔÒÏÊËÉ ÉÎÄÅËÓÁ ÓÏÈÒÁÎÅÎÎÏÊ ÔÁÂÌÉÃÙ '%-.64s'" - serbian "Izgradnja indeksa dump-ovane tabele '%-.64s' nije uspela" - spa "Falla reconstruyendo el indice de la tabla dumped '%-.64s'" - ukr "îÅ×ÄÁÌŠצÄÎÏ×ÌÅÎÎÑ ¦ÎÄÅËÓÁ ÐÅÒÅÄÁÎϧ ÔÁÂÌÉæ '%-.64s'" + cze "P-Bøebudování indexu dumpnuté tabulky '%-.192s' nebylo úspì¹né" + dan "Kunne ikke genopbygge indekset for den dumpede tabel '%-.192s'" + nla "Gefaald tijdens heropbouw index van gedumpte tabel '%-.192s'" + eng "Failed rebuilding the index of dumped table '%-.192s'" + fre "La reconstruction de l'index de la table copiée '%-.192s' a échoué" + ger "Neuerstellung des Index der Dump-Tabelle '%-.192s' fehlgeschlagen" + greek "Failed rebuilding the index of dumped table '%-.192s'" + hun "Failed rebuilding the index of dumped table '%-.192s'" + ita "Fallita la ricostruzione dell'indice della tabella copiata '%-.192s'" + por "Falhou na reconstrução do índice da tabela 'dumped' '%-.192s'" + rus "ïÛÉÂËÁ ÐÅÒÅÓÔÒÏÊËÉ ÉÎÄÅËÓÁ ÓÏÈÒÁÎÅÎÎÏÊ ÔÁÂÌÉÃÙ '%-.192s'" + serbian "Izgradnja indeksa dump-ovane tabele '%-.192s' nije uspela" + spa "Falla reconstruyendo el indice de la tabla dumped '%-.192s'" + ukr "îÅ×ÄÁÌŠצÄÎÏ×ÌÅÎÎÑ ¦ÎÄÅËÓÁ ÐÅÒÅÄÁÎϧ ÔÁÂÌÉæ '%-.192s'" ER_MASTER cze "Chyba masteru: '%-.64s'" dan "Fejl fra master: '%-.64s'" @@ -4212,35 +4212,35 @@ ER_UNKNOWN_SYSTEM_VARIABLE swe "Okänd systemvariabel: '%-.64s'" ukr "îÅצÄÏÍÁ ÓÉÓÔÅÍÎÁ ÚͦÎÎÁ '%-.64s'" ER_CRASHED_ON_USAGE - cze "Tabulka '%-.64s' je ozna-Bèena jako poru¹ená a mìla by být opravena" - dan "Tabellen '%-.64s' er markeret med fejl og bør repareres" - nla "Tabel '%-.64s' staat als gecrashed gemarkeerd en dient te worden gerepareerd" - eng "Table '%-.64s' is marked as crashed and should be repaired" - est "Tabel '%-.64s' on märgitud vigaseks ja tuleb parandada" - fre "La table '%-.64s' est marquée 'crashed' et devrait être réparée" - ger "Tabelle '%-.64s' ist als defekt markiert und sollte repariert werden" - ita "La tabella '%-.64s' e` segnalata come corrotta e deve essere riparata" - por "Tabela '%-.64s' está marcada como danificada e deve ser reparada" - rus "ôÁÂÌÉÃÁ '%-.64s' ÐÏÍÅÞÅÎÁ ËÁË ÉÓÐÏÒÞÅÎÎÁÑ É ÄÏÌÖÎÁ ÐÒÏÊÔÉ ÐÒÏ×ÅÒËÕ É ÒÅÍÏÎÔ" - serbian "Tabela '%-.64s' je markirana kao ošteæena i trebala bi biti popravljena" - spa "Tabla '%-.64s' está marcada como crashed y debe ser reparada" - swe "Tabell '%-.64s' är trasig och bör repareras med REPAIR TABLE" - ukr "ôÁÂÌÉÃÀ '%-.64s' ÍÁÒËÏ×ÁÎÏ ÑË Ú¦ÐÓÏ×ÁÎÕ ÔÁ §§ ÐÏÔÒ¦ÂÎÏ ×¦ÄÎÏ×ÉÔÉ" + cze "Tabulka '%-.192s' je ozna-Bèena jako poru¹ená a mìla by být opravena" + dan "Tabellen '%-.192s' er markeret med fejl og bør repareres" + nla "Tabel '%-.192s' staat als gecrashed gemarkeerd en dient te worden gerepareerd" + eng "Table '%-.192s' is marked as crashed and should be repaired" + est "Tabel '%-.192s' on märgitud vigaseks ja tuleb parandada" + fre "La table '%-.192s' est marquée 'crashed' et devrait être réparée" + ger "Tabelle '%-.192s' ist als defekt markiert und sollte repariert werden" + ita "La tabella '%-.192s' e` segnalata come corrotta e deve essere riparata" + por "Tabela '%-.192s' está marcada como danificada e deve ser reparada" + rus "ôÁÂÌÉÃÁ '%-.192s' ÐÏÍÅÞÅÎÁ ËÁË ÉÓÐÏÒÞÅÎÎÁÑ É ÄÏÌÖÎÁ ÐÒÏÊÔÉ ÐÒÏ×ÅÒËÕ É ÒÅÍÏÎÔ" + serbian "Tabela '%-.192s' je markirana kao ošteæena i trebala bi biti popravljena" + spa "Tabla '%-.192s' está marcada como crashed y debe ser reparada" + swe "Tabell '%-.192s' är trasig och bör repareras med REPAIR TABLE" + ukr "ôÁÂÌÉÃÀ '%-.192s' ÍÁÒËÏ×ÁÎÏ ÑË Ú¦ÐÓÏ×ÁÎÕ ÔÁ §§ ÐÏÔÒ¦ÂÎÏ ×¦ÄÎÏ×ÉÔÉ" ER_CRASHED_ON_REPAIR - cze "Tabulka '%-.64s' je ozna-Bèena jako poru¹ená a poslední (automatická?) oprava se nezdaøila" - dan "Tabellen '%-.64s' er markeret med fejl og sidste (automatiske?) REPAIR fejlede" - nla "Tabel '%-.64s' staat als gecrashed gemarkeerd en de laatste (automatische?) reparatie poging mislukte" - eng "Table '%-.64s' is marked as crashed and last (automatic?) repair failed" - est "Tabel '%-.64s' on märgitud vigaseks ja viimane (automaatne?) parandus ebaõnnestus" - fre "La table '%-.64s' est marquée 'crashed' et le dernier 'repair' a échoué" - ger "Tabelle '%-.64s' ist als defekt markiert und der letzte (automatische?) Reparaturversuch schlug fehl" - ita "La tabella '%-.64s' e` segnalata come corrotta e l'ultima ricostruzione (automatica?) e` fallita" - por "Tabela '%-.64s' está marcada como danificada e a última reparação (automática?) falhou" - rus "ôÁÂÌÉÃÁ '%-.64s' ÐÏÍÅÞÅÎÁ ËÁË ÉÓÐÏÒÞÅÎÎÁÑ É ÐÏÓÌÅÄÎÉÊ (Á×ÔÏÍÁÔÉÞÅÓËÉÊ?) ÒÅÍÏÎÔ ÎÅ ÂÙÌ ÕÓÐÅÛÎÙÍ" - serbian "Tabela '%-.64s' je markirana kao ošteæena, a zadnja (automatska?) popravka je bila neuspela" - spa "Tabla '%-.64s' está marcada como crashed y la última reparación (automactica?) falló" - swe "Tabell '%-.64s' är trasig och senast (automatiska?) reparation misslyckades" - ukr "ôÁÂÌÉÃÀ '%-.64s' ÍÁÒËÏ×ÁÎÏ ÑË Ú¦ÐÓÏ×ÁÎÕ ÔÁ ÏÓÔÁÎΤ (Á×ÔÏÍÁÔÉÞÎÅ?) צÄÎÏ×ÌÅÎÎÑ ÎÅ ×ÄÁÌÏÓÑ" + cze "Tabulka '%-.192s' je ozna-Bèena jako poru¹ená a poslední (automatická?) oprava se nezdaøila" + dan "Tabellen '%-.192s' er markeret med fejl og sidste (automatiske?) REPAIR fejlede" + nla "Tabel '%-.192s' staat als gecrashed gemarkeerd en de laatste (automatische?) reparatie poging mislukte" + eng "Table '%-.192s' is marked as crashed and last (automatic?) repair failed" + est "Tabel '%-.192s' on märgitud vigaseks ja viimane (automaatne?) parandus ebaõnnestus" + fre "La table '%-.192s' est marquée 'crashed' et le dernier 'repair' a échoué" + ger "Tabelle '%-.192s' ist als defekt markiert und der letzte (automatische?) Reparaturversuch schlug fehl" + ita "La tabella '%-.192s' e` segnalata come corrotta e l'ultima ricostruzione (automatica?) e` fallita" + por "Tabela '%-.192s' está marcada como danificada e a última reparação (automática?) falhou" + rus "ôÁÂÌÉÃÁ '%-.192s' ÐÏÍÅÞÅÎÁ ËÁË ÉÓÐÏÒÞÅÎÎÁÑ É ÐÏÓÌÅÄÎÉÊ (Á×ÔÏÍÁÔÉÞÅÓËÉÊ?) ÒÅÍÏÎÔ ÎÅ ÂÙÌ ÕÓÐÅÛÎÙÍ" + serbian "Tabela '%-.192s' je markirana kao ošteæena, a zadnja (automatska?) popravka je bila neuspela" + spa "Tabla '%-.192s' está marcada como crashed y la última reparación (automactica?) falló" + swe "Tabell '%-.192s' är trasig och senast (automatiska?) reparation misslyckades" + ukr "ôÁÂÌÉÃÀ '%-.192s' ÍÁÒËÏ×ÁÎÏ ÑË Ú¦ÐÓÏ×ÁÎÕ ÔÁ ÏÓÔÁÎΤ (Á×ÔÏÍÁÔÉÞÎÅ?) צÄÎÏ×ÌÅÎÎÑ ÎÅ ×ÄÁÌÏÓÑ" ER_WARNING_NOT_COMPLETE_ROLLBACK dan "Advarsel: Visse data i tabeller der ikke understøtter transaktioner kunne ikke tilbagestilles" nla "Waarschuwing: Roll back mislukt voor sommige buiten transacties gewijzigde tabellen" @@ -4438,18 +4438,18 @@ ER_WRONG_ARGUMENTS swe "Felaktiga argument till %s" ukr "èÉÂÎÉÊ ÁÒÇÕÍÅÎÔ ÄÌÑ %s" ER_NO_PERMISSION_TO_CREATE_USER 42000 - nla "'%-.32s'@'%-.64s' mag geen nieuwe gebruikers creeren" - eng "'%-.32s'@'%-.64s' is not allowed to create new users" - est "Kasutajal '%-.32s'@'%-.64s' ei ole lubatud luua uusi kasutajaid" - fre "'%-.32s'@'%-.64s' n'est pas autorisé à créer de nouveaux utilisateurs" - ger "'%-.32s'@'%-.64s' ist nicht berechtigt, neue Benutzer hinzuzufügen" - ita "A '%-.32s'@'%-.64s' non e' permesso creare nuovi utenti" - por "Não é permitido a '%-.32s'@'%-.64s' criar novos usuários" - rus "'%-.32s'@'%-.64s' ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÓÏÚÄÁ×ÁÔØ ÎÏ×ÙÈ ÐÏÌØÚÏ×ÁÔÅÌÅÊ" - serbian "Korisniku '%-.32s'@'%-.64s' nije dozvoljeno da kreira nove korisnike" - spa "'%-.32s`@`%-.64s` no es permitido para crear nuevos usuarios" - swe "'%-.32s'@'%-.64s' har inte rättighet att skapa nya användare" - ukr "ëÏÒÉÓÔÕ×ÁÞÕ '%-.32s'@'%-.64s' ÎÅ ÄÏÚ×ÏÌÅÎÏ ÓÔ×ÏÒÀ×ÁÔÉ ÎÏ×ÉÈ ËÏÒÉÓÔÕ×ÁÞ¦×" + nla "'%-.48s'@'%-.64s' mag geen nieuwe gebruikers creeren" + eng "'%-.48s'@'%-.64s' is not allowed to create new users" + est "Kasutajal '%-.48s'@'%-.64s' ei ole lubatud luua uusi kasutajaid" + fre "'%-.48s'@'%-.64s' n'est pas autorisé à créer de nouveaux utilisateurs" + ger "'%-.48s'@'%-.64s' ist nicht berechtigt, neue Benutzer hinzuzufügen" + ita "A '%-.48s'@'%-.64s' non e' permesso creare nuovi utenti" + por "Não é permitido a '%-.48s'@'%-.64s' criar novos usuários" + rus "'%-.48s'@'%-.64s' ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÓÏÚÄÁ×ÁÔØ ÎÏ×ÙÈ ÐÏÌØÚÏ×ÁÔÅÌÅÊ" + serbian "Korisniku '%-.48s'@'%-.64s' nije dozvoljeno da kreira nove korisnike" + spa "'%-.48s`@`%-.64s` no es permitido para crear nuevos usuarios" + swe "'%-.48s'@'%-.64s' har inte rättighet att skapa nya användare" + ukr "ëÏÒÉÓÔÕ×ÁÞÕ '%-.48s'@'%-.64s' ÎÅ ÄÏÚ×ÏÌÅÎÏ ÓÔ×ÏÒÀ×ÁÔÉ ÎÏ×ÉÈ ËÏÒÉÓÔÕ×ÁÞ¦×" ER_UNION_TABLES_IN_DIFFERENT_DIR nla "Incorrecte tabel definitie; alle MERGE tabellen moeten tot dezelfde database behoren" eng "Incorrect table definition; all MERGE tables must be in the same database" @@ -4715,19 +4715,19 @@ ER_SLAVE_IGNORED_TABLE spa "Slave SQL thread ignorado el query debido a las reglas de replicación-*-tabla" swe "Slav SQL tråden ignorerade frågan pga en replicate-*-table regel" ER_INCORRECT_GLOBAL_LOCAL_VAR - eng "Variable '%-.64s' is a %s variable" - serbian "Incorrect foreign key definition for '%-.64s': %s" - ger "Variable '%-.64s' ist eine %s-Variable" - nla "Variabele '%-.64s' is geen %s variabele" - spa "Variable '%-.64s' es una %s variable" - swe "Variabel '%-.64s' är av typ %s" + eng "Variable '%-.192s' is a %s variable" + serbian "Incorrect foreign key definition for '%-.192s': %s" + ger "Variable '%-.192s' ist eine %s-Variable" + nla "Variabele '%-.192s' is geen %s variabele" + spa "Variable '%-.192s' es una %s variable" + swe "Variabel '%-.192s' är av typ %s" ER_WRONG_FK_DEF 42000 - eng "Incorrect foreign key definition for '%-.64s': %s" - ger "Falsche Fremdschlüssel-Definition für '%-.64s': %s" - nla "Incorrecte foreign key definitie voor '%-.64s': %s" - por "Definição errada da chave estrangeira para '%-.64s': %s" - spa "Equivocada definición de llave extranjera para '%-.64s': %s" - swe "Felaktig FOREIGN KEY-definition för '%-.64s': %s" + eng "Incorrect foreign key definition for '%-.192s': %s" + ger "Falsche Fremdschlüssel-Definition für '%-.192s': %s" + nla "Incorrecte foreign key definitie voor '%-.192s': %s" + por "Definição errada da chave estrangeira para '%-.192s': %s" + spa "Equivocada definición de llave extranjera para '%-.192s': %s" + swe "Felaktig FOREIGN KEY-definition för '%-.192s': %s" ER_KEY_REF_DO_NOT_MATCH_TABLE_REF eng "Key reference and table reference don't match" ger "Schlüssel- und Tabellenverweis passen nicht zusammen" @@ -4811,12 +4811,12 @@ ER_SELECT_REDUCED 01000 swe "Select %u reducerades vid optimiering" ukr "Select %u was ÓËÁÓÏ×ÁÎÏ ÐÒÉ ÏÐÔÉÍiÚÁÃii" ER_TABLENAME_NOT_ALLOWED_HERE 42000 - eng "Table '%-.64s' from one of the SELECTs cannot be used in %-.32s" - ger "Tabelle '%-.64s', die in einem der SELECT-Befehle verwendet wurde, kann nicht in %-.32s verwendet werden" - nla "Tabel '%-.64s' uit een van de SELECTS kan niet in %-.32s gebruikt worden" - por "Tabela '%-.64s' de um dos SELECTs não pode ser usada em %-.32s" - spa "Tabla '%-.64s' de uno de los SELECT no puede ser usada en %-.32s" - swe "Tabell '%-.64s' från en SELECT kan inte användas i %-.32s" + eng "Table '%-.192s' from one of the SELECTs cannot be used in %-.32s" + ger "Tabelle '%-.192s', die in einem der SELECT-Befehle verwendet wurde, kann nicht in %-.32s verwendet werden" + nla "Tabel '%-.192s' uit een van de SELECTS kan niet in %-.32s gebruikt worden" + por "Tabela '%-.192s' de um dos SELECTs não pode ser usada em %-.32s" + spa "Tabla '%-.192s' de uno de los SELECT no puede ser usada en %-.32s" + swe "Tabell '%-.192s' från en SELECT kan inte användas i %-.32s" ER_NOT_SUPPORTED_AUTH_MODE 08004 eng "Client does not support authentication protocol requested by server; consider upgrading MySQL client" ger "Client unterstützt das vom Server erwartete Authentifizierungsprotokoll nicht. Bitte aktualisieren Sie Ihren MySQL-Client" @@ -4959,12 +4959,12 @@ ER_SERVER_IS_IN_SECURE_AUTH_MODE rus "óÅÒ×ÅÒ ÚÁÐÕÝÅÎ × ÒÅÖÉÍÅ --secure-auth (ÂÅÚÏÐÁÓÎÏÊ Á×ÔÏÒÉÚÁÃÉÉ), ÎÏ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%s'@'%s' ÐÁÒÏÌØ ÓÏÈÒÁÎ£Î × ÓÔÁÒÏÍ ÆÏÒÍÁÔÅ; ÎÅÏÂÈÏÄÉÍÏ ÏÂÎÏ×ÉÔØ ÆÏÒÍÁÔ ÐÁÒÏÌÑ" spa "Servidor está rodando en modo --secure-auth, pero '%s'@'%s' tiene clave en el antiguo formato; por favor cambie la clave para el nuevo formato" ER_WARN_FIELD_RESOLVED - eng "Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d" - ger "Feld oder Verweis '%-.64s%s%-.64s%s%-.64s' im SELECT-Befehl Nr. %d wurde im SELECT-Befehl Nr. %d aufgelöst" - por "Campo ou referência '%-.64s%s%-.64s%s%-.64s' de SELECT #%d foi resolvido em SELECT #%d" - rus "ðÏÌÅ ÉÌÉ ÓÓÙÌËÁ '%-.64s%s%-.64s%s%-.64s' ÉÚ SELECTÁ #%d ÂÙÌÁ ÎÁÊÄÅÎÁ × SELECTÅ #%d" - spa "Campo o referencia '%-.64s%s%-.64s%s%-.64s' de SELECT #%d fue resolvido en SELECT #%d" - ukr "óÔÏ×ÂÅÃØ ÁÂÏ ÐÏÓÉÌÁÎÎÑ '%-.64s%s%-.64s%s%-.64s' ¦Ú SELECTÕ #%d ÂÕÌÏ ÚÎÁÊÄÅÎÅ Õ SELECT¦ #%d" + eng "Field or reference '%-.192s%s%-.192s%s%-.192s' of SELECT #%d was resolved in SELECT #%d" + ger "Feld oder Verweis '%-.192s%s%-.192s%s%-.192s' im SELECT-Befehl Nr. %d wurde im SELECT-Befehl Nr. %d aufgelöst" + por "Campo ou referência '%-.192s%s%-.192s%s%-.192s' de SELECT #%d foi resolvido em SELECT #%d" + rus "ðÏÌÅ ÉÌÉ ÓÓÙÌËÁ '%-.192s%s%-.192s%s%-.192s' ÉÚ SELECTÁ #%d ÂÙÌÁ ÎÁÊÄÅÎÁ × SELECTÅ #%d" + spa "Campo o referencia '%-.192s%s%-.192s%s%-.192s' de SELECT #%d fue resolvido en SELECT #%d" + ukr "óÔÏ×ÂÅÃØ ÁÂÏ ÐÏÓÉÌÁÎÎÑ '%-.192s%s%-.192s%s%-.192s' ¦Ú SELECTÕ #%d ÂÕÌÏ ÚÎÁÊÄÅÎÅ Õ SELECT¦ #%d" ER_BAD_SLAVE_UNTIL_COND eng "Incorrect parameter or combination of parameters for START SLAVE UNTIL" ger "Falscher Parameter oder falsche Kombination von Parametern für START SLAVE UNTIL" @@ -5001,11 +5001,11 @@ ER_WARN_QC_RESIZE swe "Storleken av "Query cache" kunde inte sättas till %lu, ny storlek är %lu" ukr "ëÅÛ ÚÁÐÉÔ¦× ÎÅÓÐÒÏÍÏÖÅÎ ×ÓÔÁÎÏ×ÉÔÉ ÒÏÚÍ¦Ò %lu, ÎÏ×ÉÊ ÒÏÚÍ¦Ò ËÅÛÁ ÚÁÐÉÔ¦× - %lu" ER_BAD_FT_COLUMN - eng "Column '%-.64s' cannot be part of FULLTEXT index" - ger "Feld '%-.64s' kann nicht Teil eines FULLTEXT-Index sein" - por "Coluna '%-.64s' não pode ser parte de índice FULLTEXT" - spa "Columna '%-.64s' no puede ser parte de FULLTEXT index" - swe "Kolumn '%-.64s' kan inte vara del av ett FULLTEXT index" + eng "Column '%-.192s' cannot be part of FULLTEXT index" + ger "Feld '%-.192s' kann nicht Teil eines FULLTEXT-Index sein" + por "Coluna '%-.192s' não pode ser parte de índice FULLTEXT" + spa "Columna '%-.192s' no puede ser parte de FULLTEXT index" + swe "Kolumn '%-.192s' kan inte vara del av ett FULLTEXT index" ER_UNKNOWN_KEY_CACHE eng "Unknown key cache '%-.100s'" ger "Unbekannter Schlüssel-Cache '%-.100s'" @@ -5063,10 +5063,10 @@ ER_TOO_MUCH_AUTO_TIMESTAMP_COLS por "Incorreta definição de tabela; Pode ter somente uma coluna TIMESTAMP com CURRENT_TIMESTAMP em DEFAULT ou ON UPDATE cláusula" spa "Incorrecta definición de tabla; Solamente debe haber una columna TIMESTAMP con CURRENT_TIMESTAMP en DEFAULT o ON UPDATE cláusula" ER_INVALID_ON_UPDATE - eng "Invalid ON UPDATE clause for '%-.64s' column" - ger "Ungültige ON-UPDATE-Klausel für Spalte '%-.64s'" - por "Inválida cláusula ON UPDATE para campo '%-.64s'" - spa "Inválido ON UPDATE cláusula para campo '%-.64s'" + eng "Invalid ON UPDATE clause for '%-.192s' column" + ger "Ungültige ON-UPDATE-Klausel für Spalte '%-.192s'" + por "Inválida cláusula ON UPDATE para campo '%-.192s'" + spa "Inválido ON UPDATE cláusula para campo '%-.192s'" ER_UNSUPPORTED_PS eng "This command is not supported in the prepared statement protocol yet" ger "Dieser Befehl wird im Protokoll für vorbereitete Anweisungen noch nicht unterstützt" @@ -5209,50 +5209,50 @@ ER_SP_CASE_NOT_FOUND 20000 eng "Case not found for CASE statement" ger "Fall für CASE-Anweisung nicht gefunden" ER_FPARSER_TOO_BIG_FILE - eng "Configuration file '%-.64s' is too big" - ger "Konfigurationsdatei '%-.64s' ist zu groß" - rus "óÌÉÛËÏÍ ÂÏÌØÛÏÊ ËÏÎÆÉÇÕÒÁÃÉÏÎÎÙÊ ÆÁÊÌ '%-.64s'" - ukr "úÁÎÁÄÔÏ ×ÅÌÉËÉÊ ËÏÎƦÇÕÒÁæÊÎÉÊ ÆÁÊÌ '%-.64s'" + eng "Configuration file '%-.192s' is too big" + ger "Konfigurationsdatei '%-.192s' ist zu groß" + rus "óÌÉÛËÏÍ ÂÏÌØÛÏÊ ËÏÎÆÉÇÕÒÁÃÉÏÎÎÙÊ ÆÁÊÌ '%-.192s'" + ukr "úÁÎÁÄÔÏ ×ÅÌÉËÉÊ ËÏÎƦÇÕÒÁæÊÎÉÊ ÆÁÊÌ '%-.192s'" ER_FPARSER_BAD_HEADER - eng "Malformed file type header in file '%-.64s'" - ger "Nicht wohlgeformter Dateityp-Header in Datei '%-.64s'" - rus "îÅ×ÅÒÎÙÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÁ ÆÁÊÌÁ '%-.64s'" - ukr "îÅצÒÎÉÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÕ Õ ÆÁÊ̦ '%-.64s'" + eng "Malformed file type header in file '%-.192s'" + ger "Nicht wohlgeformter Dateityp-Header in Datei '%-.192s'" + rus "îÅ×ÅÒÎÙÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÁ ÆÁÊÌÁ '%-.192s'" + ukr "îÅצÒÎÉÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÕ Õ ÆÁÊ̦ '%-.192s'" ER_FPARSER_EOF_IN_COMMENT eng "Unexpected end of file while parsing comment '%-.200s'" ger "Unerwartetes Dateiende beim Parsen des Kommentars '%-.200s'" rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ × ËÏÍÅÎÔÁÒÉÉ '%-.200s'" ukr "îÅÓÐÏĦ×ÁÎÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ Õ ËÏÍÅÎÔÁÒ¦ '%-.200s'" ER_FPARSER_ERROR_IN_PARAMETER - eng "Error while parsing parameter '%-.64s' (line: '%-.64s')" - ger "Fehler beim Parsen des Parameters '%-.64s' (Zeile: '%-.64s')" - rus "ïÛÉÂËÁ ÐÒÉ ÒÁÓÐÏÚÎÁ×ÁÎÉÉ ÐÁÒÁÍÅÔÒÁ '%-.64s' (ÓÔÒÏËÁ: '%-.64s')" - ukr "ðÏÍÉÌËÁ × ÒÏÓЦÚÎÁ×ÁÎΦ ÐÁÒÁÍÅÔÒÕ '%-.64s' (ÒÑÄÏË: '%-.64s')" + eng "Error while parsing parameter '%-.192s' (line: '%-.192s')" + ger "Fehler beim Parsen des Parameters '%-.192s' (Zeile: '%-.192s')" + rus "ïÛÉÂËÁ ÐÒÉ ÒÁÓÐÏÚÎÁ×ÁÎÉÉ ÐÁÒÁÍÅÔÒÁ '%-.192s' (ÓÔÒÏËÁ: '%-.192s')" + ukr "ðÏÍÉÌËÁ × ÒÏÓЦÚÎÁ×ÁÎΦ ÐÁÒÁÍÅÔÒÕ '%-.192s' (ÒÑÄÏË: '%-.192s')" ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER - eng "Unexpected end of file while skipping unknown parameter '%-.64s'" - ger "Unerwartetes Dateiende beim Überspringen des unbekannten Parameters '%-.64s'" - rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ ÐÒÉ ÐÒÏÐÕÓËÅ ÎÅÉÚ×ÅÓÔÎÏÇÏ ÐÁÒÁÍÅÔÒÁ '%-.64s'" - ukr "îÅÓÐÏĦ×ÁÎÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ Õ ÓÐÒϦ ÐÒÏÍÉÎÕÔÉ ÎÅצÄÏÍÉÊ ÐÁÒÁÍÅÔÒ '%-.64s'" + eng "Unexpected end of file while skipping unknown parameter '%-.192s'" + ger "Unerwartetes Dateiende beim Überspringen des unbekannten Parameters '%-.192s'" + rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ ÐÒÉ ÐÒÏÐÕÓËÅ ÎÅÉÚ×ÅÓÔÎÏÇÏ ÐÁÒÁÍÅÔÒÁ '%-.192s'" + ukr "îÅÓÐÏĦ×ÁÎÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ Õ ÓÐÒϦ ÐÒÏÍÉÎÕÔÉ ÎÅצÄÏÍÉÊ ÐÁÒÁÍÅÔÒ '%-.192s'" ER_VIEW_NO_EXPLAIN eng "EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" ger "EXPLAIN/SHOW kann nicht verlangt werden. Rechte für zugrunde liegende Tabelle fehlen" rus "EXPLAIN/SHOW ÎÅ ÍÏÖÅÔ ÂÙÔØ ×ÙÐÏÌÎÅÎÎÏ; ÎÅÄÏÓÔÁÔÏÞÎÏ ÐÒÁ× ÎÁ ÔÁËÂÌÉÃÙ ÚÁÐÒÏÓÁ" ukr "EXPLAIN/SHOW ÎÅ ÍÏÖÅ ÂÕÔÉ ×¦ËÏÎÁÎÏ; ÎÅÍÁ¤ ÐÒÁ× ÎÁ ÔÉÂÌÉæ ÚÁÐÉÔÕ" ER_FRM_UNKNOWN_TYPE - eng "File '%-.64s' has unknown type '%-.64s' in its header" - ger "Datei '%-.64s' hat unbekannten Typ '%-.64s' im Header" - rus "æÁÊÌ '%-.64s' ÓÏÄÅÒÖÉÔ ÎÅÉÚ×ÅÓÔÎÙÊ ÔÉÐ '%-.64s' × ÚÁÇÏÌÏ×ËÅ" - ukr "æÁÊÌ '%-.64s' ÍÁ¤ ÎÅצÄÏÍÉÊ ÔÉÐ '%-.64s' Õ ÚÁÇÏÌÏ×ËÕ" + eng "File '%-.192s' has unknown type '%-.64s' in its header" + ger "Datei '%-.192s' hat unbekannten Typ '%-.64s' im Header" + rus "æÁÊÌ '%-.192s' ÓÏÄÅÒÖÉÔ ÎÅÉÚ×ÅÓÔÎÙÊ ÔÉÐ '%-.64s' × ÚÁÇÏÌÏ×ËÅ" + ukr "æÁÊÌ '%-.192s' ÍÁ¤ ÎÅצÄÏÍÉÊ ÔÉÐ '%-.64s' Õ ÚÁÇÏÌÏ×ËÕ" ER_WRONG_OBJECT - eng "'%-.64s.%-.64s' is not %s" - ger "'%-.64s.%-.64s' ist nicht %s" - rus "'%-.64s.%-.64s' - ÎÅ %s" - ukr "'%-.64s.%-.64s' ÎÅ ¤ %s" + eng "'%-.192s.%-.192s' is not %s" + ger "'%-.192s.%-.192s' ist nicht %s" + rus "'%-.192s.%-.192s' - ÎÅ %s" + ukr "'%-.192s.%-.192s' ÎÅ ¤ %s" ER_NONUPDATEABLE_COLUMN - eng "Column '%-.64s' is not updatable" - ger "Feld '%-.64s' ist nicht aktualisierbar" - rus "óÔÏÌÂÅà '%-.64s' ÎÅ ÏÂÎÏ×ÌÑÅÍÙÊ" - ukr "óÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ÚÍÉÎÅÎÉÊ" + eng "Column '%-.192s' is not updatable" + ger "Feld '%-.192s' ist nicht aktualisierbar" + rus "óÔÏÌÂÅà '%-.192s' ÎÅ ÏÂÎÏ×ÌÑÅÍÙÊ" + ukr "óÔÏ×ÂÅÃØ '%-.192s' ÎÅ ÍÏÖÅ ÂÕÔÉ ÚÍÉÎÅÎÉÊ" ER_VIEW_SELECT_DERIVED eng "View's SELECT contains a subquery in the FROM clause" ger "SELECT der View enthält eine Subquery in der FROM-Klausel" @@ -5269,10 +5269,10 @@ ER_VIEW_SELECT_VARIABLE rus "View SELECT ÓÏÄÅÒÖÉÔ ÐÅÒÅÍÅÎÎÕÀ ÉÌÉ ÐÁÒÁÍÅÔÒ" ukr "View SELECT ÍÁ¤ ÚÍÉÎÎÕ ÁÂÏ ÐÁÒÁÍÅÔÅÒ" ER_VIEW_SELECT_TMPTABLE - eng "View's SELECT refers to a temporary table '%-.64s'" - ger "SELECT der View verweist auf eine temporäre Tabelle '%-.64s'" - rus "View SELECT ÓÏÄÅÒÖÉÔ ÓÓÙÌËÕ ÎÁ ×ÒÅÍÅÎÎÕÀ ÔÁÂÌÉÃÕ '%-.64s'" - ukr "View SELECT ×ÉËÏÒÉÓÔÏ×Õ¤ ÔÉÍÞÁÓÏ×Õ ÔÁÂÌÉÃÀ '%-.64s'" + eng "View's SELECT refers to a temporary table '%-.192s'" + ger "SELECT der View verweist auf eine temporäre Tabelle '%-.192s'" + rus "View SELECT ÓÏÄÅÒÖÉÔ ÓÓÙÌËÕ ÎÁ ×ÒÅÍÅÎÎÕÀ ÔÁÂÌÉÃÕ '%-.192s'" + ukr "View SELECT ×ÉËÏÒÉÓÔÏ×Õ¤ ÔÉÍÞÁÓÏ×Õ ÔÁÂÌÉÃÀ '%-.192s'" ER_VIEW_WRONG_LIST eng "View's SELECT and view's field list have different column counts" ger "SELECT- und Feldliste der Views haben unterschiedliche Anzahlen von Spalten" @@ -5289,7 +5289,7 @@ ER_WARN_VIEW_WITHOUT_KEY rus "ïÂÎÏ×ÌÑÅÍÙÊ view ÎÅ ÓÏÄÅÒÖÉÔ ËÌÀÞÁ ÉÓÐÏÌØÚÏ×ÁÎÎÙÈ(ÏÊ) × ÎÅÍ ÔÁÂÌÉÃ(Ù)" ukr "View, ÝÏ ÏÎÏ×ÌÀÅÔØÓÑ, ΊͦÓÔÉÔØ ÐÏ×ÎÏÇÏ ËÌÀÞÁ ÔÁÂÌÉæ(Ø), ÝÏ ×ÉËÏÒ¦ÓÔÁÎÁ × ÎØÀÏÍÕ" ER_VIEW_INVALID - eng "View '%-.64s.%-.64s' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them" + eng "View '%-.192s.%-.192s' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them" ER_SP_NO_DROP_SP eng "Can't drop or alter a %s from within another stored routine" ger "Kann eine %s nicht von innerhalb einer anderen gespeicherten Routine löschen oder ändern" @@ -5303,8 +5303,8 @@ ER_TRG_DOES_NOT_EXIST eng "Trigger does not exist" ger "Trigger existiert nicht" ER_TRG_ON_VIEW_OR_TEMP_TABLE - eng "Trigger's '%-.64s' is view or temporary table" - ger "'%-.64s' des Triggers ist View oder temporäre Tabelle" + eng "Trigger's '%-.192s' is view or temporary table" + ger "'%-.192s' des Triggers ist View oder temporäre Tabelle" ER_TRG_CANT_CHANGE_ROW eng "Updating of %s row is not allowed in %strigger" ger "Aktualisieren einer %s-Zeile ist in einem %s-Trigger nicht erlaubt" @@ -5312,30 +5312,30 @@ ER_TRG_NO_SUCH_ROW_IN_TRG eng "There is no %s row in %s trigger" ger "Es gibt keine %s-Zeile im %s-Trigger" ER_NO_DEFAULT_FOR_FIELD - eng "Field '%-.64s' doesn't have a default value" - ger "Feld '%-.64s' hat keinen Vorgabewert" + eng "Field '%-.192s' doesn't have a default value" + ger "Feld '%-.192s' hat keinen Vorgabewert" ER_DIVISION_BY_ZERO 22012 eng "Division by 0" ger "Division durch 0" ER_TRUNCATED_WRONG_VALUE_FOR_FIELD - eng "Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld" - ger "Falscher %-.32s-Wert: '%-.128s' für Feld '%.64s' in Zeile %ld" + eng "Incorrect %-.32s value: '%-.128s' for column '%.192s' at row %ld" + ger "Falscher %-.32s-Wert: '%-.128s' für Feld '%.192s' in Zeile %ld" ER_ILLEGAL_VALUE_FOR_TYPE 22007 - eng "Illegal %s '%-.64s' value found during parsing" - ger "Nicht zulässiger %s-Wert '%-.64s' beim Parsen gefunden" + eng "Illegal %s '%-.192s' value found during parsing" + ger "Nicht zulässiger %s-Wert '%-.192s' beim Parsen gefunden" ER_VIEW_NONUPD_CHECK - eng "CHECK OPTION on non-updatable view '%-.64s.%-.64s'" - ger "CHECK OPTION auf nicht-aktualisierbarem View '%-.64s.%-.64s'" - rus "CHECK OPTION ÄÌÑ ÎÅÏÂÎÏ×ÌÑÅÍÏÇÏ VIEW '%-.64s.%-.64s'" - ukr "CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÝÏ ÎÅ ÍÏÖÅ ÂÕÔÉ ÏÎÏ×ÌÅÎÎÉÍ" + eng "CHECK OPTION on non-updatable view '%-.192s.%-.192s'" + ger "CHECK OPTION auf nicht-aktualisierbarem View '%-.192s.%-.192s'" + rus "CHECK OPTION ÄÌÑ ÎÅÏÂÎÏ×ÌÑÅÍÏÇÏ VIEW '%-.192s.%-.192s'" + ukr "CHECK OPTION ÄÌÑ VIEW '%-.192s.%-.192s' ÝÏ ÎÅ ÍÏÖÅ ÂÕÔÉ ÏÎÏ×ÌÅÎÎÉÍ" ER_VIEW_CHECK_FAILED - eng "CHECK OPTION failed '%-.64s.%-.64s'" - ger "CHECK OPTION fehlgeschlagen: '%-.64s.%-.64s'" - rus "ÐÒÏ×ÅÒËÁ CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÐÒÏ×ÁÌÉÌÁÓØ" - ukr "ðÅÒÅצÒËÁ CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÎÅ ÐÒÏÊÛÌÁ" + eng "CHECK OPTION failed '%-.192s.%-.192s'" + ger "CHECK OPTION fehlgeschlagen: '%-.192s.%-.192s'" + rus "ÐÒÏ×ÅÒËÁ CHECK OPTION ÄÌÑ VIEW '%-.192s.%-.192s' ÐÒÏ×ÁÌÉÌÁÓØ" + ukr "ðÅÒÅצÒËÁ CHECK OPTION ÄÌÑ VIEW '%-.192s.%-.192s' ÎÅ ÐÒÏÊÛÌÁ" ER_PROCACCESS_DENIED_ERROR 42000 - eng "%-.16s command denied to user '%-.32s'@'%-.64s' for routine '%-.64s'" - ger "Befehl %-.16s nicht zulässig für Benutzer '%-.32s'@'%-.64s' in Routine '%-.64s'" + eng "%-.16s command denied to user '%-.48s'@'%-.64s' for routine '%-.192s'" + ger "Befehl %-.16s nicht zulässig für Benutzer '%-.48s'@'%-.64s' in Routine '%-.192s'" ER_RELAY_LOG_FAIL eng "Failed purging old relay logs: %s" ger "Bereinigen alter Relais-Logs fehlgeschlagen: %s" @@ -5397,28 +5397,28 @@ ER_PS_MANY_PARAM eng "Prepared statement contains too many placeholders" ger "Vorbereitete Anweisung enthält zu viele Platzhalter" ER_KEY_PART_0 - eng "Key part '%-.64s' length cannot be 0" - ger "Länge des Schlüsselteils '%-.64s' kann nicht 0 sein" + eng "Key part '%-.192s' length cannot be 0" + ger "Länge des Schlüsselteils '%-.192s' kann nicht 0 sein" ER_VIEW_CHECKSUM eng "View text checksum failed" ger "View-Text-Prüfsumme fehlgeschlagen" rus "ðÒÏ×ÅÒËÁ ËÏÎÔÒÏÌØÎÏÊ ÓÕÍÍÙ ÔÅËÓÔÁ VIEW ÐÒÏ×ÁÌÉÌÁÓØ" ukr "ðÅÒÅצÒËÁ ËÏÎÔÒÏÌØÎϧ ÓÕÍÉ ÔÅËÓÔÕ VIEW ÎÅ ÐÒÏÊÛÌÁ" ER_VIEW_MULTIUPDATE - eng "Can not modify more than one base table through a join view '%-.64s.%-.64s'" - ger "Kann nicht mehr als eine Basistabelle über Join-View '%-.64s.%-.64s' ändern" - rus "îÅÌØÚÑ ÉÚÍÅÎÉÔØ ÂÏÌØÛÅ ÞÅÍ ÏÄÎÕ ÂÁÚÏ×ÕÀ ÔÁÂÌÉÃÕ ÉÓÐÏÌØÚÕÑ ÍÎÏÇÏÔÁÂÌÉÞÎÙÊ VIEW '%-.64s.%-.64s'" - ukr "îÅÍÏÖÌÉ×Ï ÏÎÏ×ÉÔÉ Â¦ÌØÛ ÎÉÖ ÏÄÎÕ ÂÁÚÏ×Õ ÔÁÂÌÉÃÀ ×ÙËÏÒÉÓÔÏ×ÕÀÞÉ VIEW '%-.64s.%-.64s', ÝÏ Í¦ÓÔ¦ÔØ ÄÅ˦ÌØËÁ ÔÁÂÌÉÃØ" + eng "Can not modify more than one base table through a join view '%-.192s.%-.192s'" + ger "Kann nicht mehr als eine Basistabelle über Join-View '%-.192s.%-.192s' ändern" + rus "îÅÌØÚÑ ÉÚÍÅÎÉÔØ ÂÏÌØÛÅ ÞÅÍ ÏÄÎÕ ÂÁÚÏ×ÕÀ ÔÁÂÌÉÃÕ ÉÓÐÏÌØÚÕÑ ÍÎÏÇÏÔÁÂÌÉÞÎÙÊ VIEW '%-.192s.%-.192s'" + ukr "îÅÍÏÖÌÉ×Ï ÏÎÏ×ÉÔÉ Â¦ÌØÛ ÎÉÖ ÏÄÎÕ ÂÁÚÏ×Õ ÔÁÂÌÉÃÀ ×ÙËÏÒÉÓÔÏ×ÕÀÞÉ VIEW '%-.192s.%-.192s', ÝÏ Í¦ÓÔ¦ÔØ ÄÅ˦ÌØËÁ ÔÁÂÌÉÃØ" ER_VIEW_NO_INSERT_FIELD_LIST - eng "Can not insert into join view '%-.64s.%-.64s' without fields list" - ger "Kann nicht ohne Feldliste in Join-View '%-.64s.%-.64s' einfügen" - rus "îÅÌØÚÑ ×ÓÔÁ×ÌÑÔØ ÚÁÐÉÓÉ × ÍÎÏÇÏÔÁÂÌÉÞÎÙÊ VIEW '%-.64s.%-.64s' ÂÅÚ ÓÐÉÓËÁ ÐÏÌÅÊ" - ukr "îÅÍÏÖÌÉ×Ï ÕÓÔÁ×ÉÔÉ ÒÑÄËÉ Õ VIEW '%-.64s.%-.64s', ÝÏ Í¦ÓÔÉÔØ ÄÅ˦ÌØËÁ ÔÁÂÌÉÃØ, ÂÅÚ ÓÐÉÓËÕ ÓÔÏ×Âæ×" + eng "Can not insert into join view '%-.192s.%-.192s' without fields list" + ger "Kann nicht ohne Feldliste in Join-View '%-.192s.%-.192s' einfügen" + rus "îÅÌØÚÑ ×ÓÔÁ×ÌÑÔØ ÚÁÐÉÓÉ × ÍÎÏÇÏÔÁÂÌÉÞÎÙÊ VIEW '%-.192s.%-.192s' ÂÅÚ ÓÐÉÓËÁ ÐÏÌÅÊ" + ukr "îÅÍÏÖÌÉ×Ï ÕÓÔÁ×ÉÔÉ ÒÑÄËÉ Õ VIEW '%-.192s.%-.192s', ÝÏ Í¦ÓÔÉÔØ ÄÅ˦ÌØËÁ ÔÁÂÌÉÃØ, ÂÅÚ ÓÐÉÓËÕ ÓÔÏ×Âæ×" ER_VIEW_DELETE_MERGE_VIEW - eng "Can not delete from join view '%-.64s.%-.64s'" - ger "Kann nicht aus Join-View '%-.64s.%-.64s' löschen" - rus "îÅÌØÚÑ ÕÄÁÌÑÔØ ÉÚ ÍÎÏÇÏÔÁÂÌÉÞÎÏÇÏ VIEW '%-.64s.%-.64s'" - ukr "îÅÍÏÖÌÉ×Ï ×ÉÄÁÌÉÔÉ ÒÑÄËÉ Õ VIEW '%-.64s.%-.64s', ÝÏ Í¦ÓÔÉÔØ ÄÅ˦ÌØËÁ ÔÁÂÌÉÃØ" + eng "Can not delete from join view '%-.192s.%-.192s'" + ger "Kann nicht aus Join-View '%-.192s.%-.192s' löschen" + rus "îÅÌØÚÑ ÕÄÁÌÑÔØ ÉÚ ÍÎÏÇÏÔÁÂÌÉÞÎÏÇÏ VIEW '%-.192s.%-.192s'" + ukr "îÅÍÏÖÌÉ×Ï ×ÉÄÁÌÉÔÉ ÒÑÄËÉ Õ VIEW '%-.192s.%-.192s', ÝÏ Í¦ÓÔÉÔØ ÄÅ˦ÌØËÁ ÔÁÂÌÉÃØ" ER_CANNOT_USER eng "Operation %s failed for %.256s" ger "Operation %s schlug fehl für %.256s" @@ -5443,8 +5443,8 @@ ER_XA_RBROLLBACK XA100 eng "XA_RBROLLBACK: Transaction branch was rolled back" ger "XA_RBROLLBACK: Transaktionszweig wurde zurückgerollt" ER_NONEXISTING_PROC_GRANT 42000 - eng "There is no such grant defined for user '%-.32s' on host '%-.64s' on routine '%-.64s'" - ger "Es gibt diese Berechtigung für Benutzer '%-.32s' auf Host '%-.64s' für Routine '%-.64s' nicht" + eng "There is no such grant defined for user '%-.48s' on host '%-.64s' on routine '%-.192s'" + ger "Es gibt diese Berechtigung für Benutzer '%-.48s' auf Host '%-.64s' für Routine '%-.192s' nicht" ER_PROC_AUTO_GRANT_FAIL eng "Failed to grant EXECUTE and ALTER ROUTINE privileges" ger "Gewährung von EXECUTE- und ALTER-ROUTINE-Rechten fehlgeschlagen" @@ -5503,23 +5503,22 @@ ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG eng "Explicit or implicit commit is not allowed in stored function or trigger." ger "Explizites oder implizites Commit ist in gespeicherten Funktionen und in Triggern nicht erlaubt" ER_NO_DEFAULT_FOR_VIEW_FIELD - eng "Field of view '%-.64s.%-.64s' underlying table doesn't have a default value" - ger "Ein Feld der dem View '%-.64s.%-.64s' zugrundeliegenden Tabelle hat keinen Vorgabewert" + eng "Field of view '%-.192s.%-.192s' underlying table doesn't have a default value" + ger "Ein Feld der dem View '%-.192s.%-.192s' zugrundeliegenden Tabelle hat keinen Vorgabewert" ER_SP_NO_RECURSION eng "Recursive stored functions and triggers are not allowed." ger "Rekursive gespeicherte Routinen und Triggers sind nicht erlaubt" ER_TOO_BIG_SCALE 42000 S1009 - eng "Too big scale %d specified for column '%-.64s'. Maximum is %d." - ger "Zu großer Skalierungsfaktor %d für Feld '%-.64s' angegeben. Maximum ist %d" + eng "Too big scale %d specified for column '%-.192s'. Maximum is %d." + ger "Zu großer Skalierungsfaktor %d für Feld '%-.192s' angegeben. Maximum ist %d" ER_TOO_BIG_PRECISION 42000 S1009 - eng "Too big precision %d specified for column '%-.64s'. Maximum is %d." - ger "Zu große Genauigkeit %d für Feld '%-.64s' angegeben. Maximum ist %d" + eng "Too big precision %d specified for column '%-.192s'. Maximum is %d." + ger "Zu große Genauigkeit %d für Feld '%-.192s' angegeben. Maximum ist %d" ER_M_BIGGER_THAN_D 42000 S1009 - eng "For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column '%-.64s')." - ger "Für FLOAT(M,D), DOUBLE(M,D) oder DECIMAL(M,D) muss M >= D sein (Feld '%-.64s')" + eng "For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column '%-.192s')." + ger "Für FLOAT(M,D), DOUBLE(M,D) oder DECIMAL(M,D) muss M >= D sein (Feld '%-.192s')" ER_WRONG_LOCK_OF_SYSTEM_TABLE - eng "You can't combine write-locking of system '%-.64s.%-.64s' table with other tables" - ger "Sie können Schreibsperren auf der Systemtabelle '%-.64s.%-.64s' nicht mit anderen Tabellen kombinieren" + eng "You can't combine write-locking of system tables with other tables or lock types" ER_CONNECT_TO_FOREIGN_DATA_SOURCE eng "Unable to connect to foreign data source: %.64s" ger "Kann nicht mit Fremddatenquelle verbinden: %.64s" @@ -5551,8 +5550,8 @@ ER_WARN_CANT_DROP_DEFAULT_KEYCACHE eng "Cannot drop default keycache" ger "Der vorgabemäßige Schlüssel-Cache kann nicht gelöscht werden" ER_TOO_BIG_DISPLAYWIDTH 42000 S1009 - eng "Display width out of range for column '%-.64s' (max = %d)" - ger "Anzeigebreite außerhalb des zulässigen Bereichs für Spalte '%-.64s' (Maximum: %d)" + eng "Display width out of range for column '%-.192s' (max = %d)" + ger "Anzeigebreite außerhalb des zulässigen Bereichs für Spalte '%-.192s' (Maximum: %d)" ER_XAER_DUPID XAE08 eng "XAER_DUPID: The XID already exists" ger "XAER_DUPID: Die XID existiert bereits" @@ -5560,11 +5559,11 @@ ER_DATETIME_FUNCTION_OVERFLOW 22008 eng "Datetime function: %-.32s field overflow" ger "Datetime-Funktion: %-.32s Feldüberlauf" ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG - eng "Can't update table '%-.64s' in stored function/trigger because it is already used by statement which invoked this stored function/trigger." - ger "Kann Tabelle '%-.64s' in gespeicherter Funktion oder Trigger nicht aktualisieren, weil sie bereits von der Anweisung verwendet wird, die diese gespeicherte Funktion oder den Trigger aufrief" + eng "Can't update table '%-.192s' in stored function/trigger because it is already used by statement which invoked this stored function/trigger." + ger "Kann Tabelle '%-.192s' in gespeicherter Funktion oder Trigger nicht aktualisieren, weil sie bereits von der Anweisung verwendet wird, die diese gespeicherte Funktion oder den Trigger aufrief" ER_VIEW_PREVENT_UPDATE - eng "The definition of table '%-.64s' prevents operation %.64s on table '%-.64s'." - ger "Die Definition der Tabelle '%-.64s' verhindert die Operation %.64s auf Tabelle '%-.64s'" + eng "The definition of table '%-.192s' prevents operation %.192s on table '%-.192s'." + ger "Die Definition der Tabelle '%-.192s' verhindert die Operation %.192s auf Tabelle '%-.192s'" ER_PS_NO_RECURSION eng "The prepared statement contains a stored routine call that refers to that same statement. It's not allowed to execute a prepared statement in such a recursive manner" ger "Die vorbereitete Anweisung enthält einen Aufruf einer gespeicherten Routine, die auf eben dieselbe Anweisung verweist. Es ist nicht erlaubt, eine vorbereitete Anweisung in solch rekursiver Weise auszuführen" @@ -5575,17 +5574,17 @@ ER_MALFORMED_DEFINER eng "Definer is not fully qualified" ger "Definierer des View ist nicht vollständig spezifiziert" ER_VIEW_FRM_NO_USER - eng "View '%-.64s'.'%-.64s' has no definer information (old table format). Current user is used as definer. Please recreate the view!" - ger "View '%-.64s'.'%-.64s' hat keine Definierer-Information (altes Tabellenformat). Der aktuelle Benutzer wird als Definierer verwendet. Bitte erstellen Sie den View neu" + eng "View '%-.192s'.'%-.192s' has no definer information (old table format). Current user is used as definer. Please recreate the view!" + ger "View '%-.192s'.'%-.192s' hat keine Definierer-Information (altes Tabellenformat). Der aktuelle Benutzer wird als Definierer verwendet. Bitte erstellen Sie den View neu" ER_VIEW_OTHER_USER - eng "You need the SUPER privilege for creation view with '%-.64s'@'%-.64s' definer" - ger "Sie brauchen die SUPER-Berechtigung, um einen View mit dem Definierer '%-.64s'@'%-.64s' zu erzeugen" + eng "You need the SUPER privilege for creation view with '%-.192s'@'%-.192s' definer" + ger "Sie brauchen die SUPER-Berechtigung, um einen View mit dem Definierer '%-.192s'@'%-.192s' zu erzeugen" ER_NO_SUCH_USER eng "There is no '%-.64s'@'%-.64s' registered" ger "'%-.64s'@'%-.64s' ist nicht registriert" ER_FORBID_SCHEMA_CHANGE - eng "Changing schema from '%-.64s' to '%-.64s' is not allowed." - ger "Wechsel des Schemas von '%-.64s' auf '%-.64s' ist nicht erlaubt" + eng "Changing schema from '%-.192s' to '%-.192s' is not allowed." + ger "Wechsel des Schemas von '%-.192s' auf '%-.192s' ist nicht erlaubt" ER_ROW_IS_REFERENCED_2 23000 eng "Cannot delete or update a parent row: a foreign key constraint fails (%.192s)" ger "Kann Eltern-Zeile nicht löschen oder aktualisieren: eine Fremdschlüsselbedingung schlägt fehl (%.192s)" @@ -5596,22 +5595,22 @@ ER_SP_BAD_VAR_SHADOW 42000 eng "Variable '%-.64s' must be quoted with `...`, or renamed" ger "Variable '%-.64s' muss mit `...` geschützt oder aber umbenannt werden" ER_TRG_NO_DEFINER - eng "No definer attribute for trigger '%-.64s'.'%-.64s'. The trigger will be activated under the authorization of the invoker, which may have insufficient privileges. Please recreate the trigger." - ger "Kein Definierer-Attribut für Trigger '%-.64s'.'%-.64s'. Der Trigger wird mit der Autorisierung des Aufrufers aktiviert, der möglicherweise keine zureichenden Berechtigungen hat. Bitte legen Sie den Trigger neu an." + eng "No definer attribute for trigger '%-.192s'.'%-.192s'. The trigger will be activated under the authorization of the invoker, which may have insufficient privileges. Please recreate the trigger." + ger "Kein Definierer-Attribut für Trigger '%-.192s'.'%-.192s'. Der Trigger wird mit der Autorisierung des Aufrufers aktiviert, der möglicherweise keine zureichenden Berechtigungen hat. Bitte legen Sie den Trigger neu an." ER_OLD_FILE_FORMAT - eng "'%-.64s' has an old format, you should re-create the '%s' object(s)" - ger "'%-.64s' hat altes Format, Sie sollten die '%s'-Objekt(e) neu erzeugen" + eng "'%-.192s' has an old format, you should re-create the '%s' object(s)" + ger "'%-.192s' hat altes Format, Sie sollten die '%s'-Objekt(e) neu erzeugen" ER_SP_RECURSION_LIMIT - eng "Recursive limit %d (as set by the max_sp_recursion_depth variable) was exceeded for routine %.64s" - ger "Rekursionsgrenze %d (durch Variable max_sp_recursion_depth gegeben) wurde für Routine %.64s überschritten" + eng "Recursive limit %d (as set by the max_sp_recursion_depth variable) was exceeded for routine %.192s" + ger "Rekursionsgrenze %d (durch Variable max_sp_recursion_depth gegeben) wurde für Routine %.192s überschritten" ER_SP_PROC_TABLE_CORRUPT - eng "Failed to load routine %-.64s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)" - ger "Routine %-.64s konnte nicht geladen werden. Die Tabelle mysql.proc fehlt, ist beschädigt, oder enthält fehlerhaften Daten (interner Code: %d)" + eng "Failed to load routine %-.192s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)" + ger "Routine %-.192s konnte nicht geladen werden. Die Tabelle mysql.proc fehlt, ist beschädigt, oder enthält fehlerhaften Daten (interner Code: %d)" ER_FOREIGN_SERVER_EXISTS eng "The foreign server, %s, you are trying to create already exists." ER_SP_WRONG_NAME 42000 - eng "Incorrect routine name '%-.64s'" - ger "Ungültiger Routinenname '%-.64s'" + eng "Incorrect routine name '%-.192s'" + ger "Ungültiger Routinenname '%-.192s'" ER_TABLE_NEEDS_UPGRADE eng "Table upgrade required. Please do \"REPAIR TABLE `%-.32s`\" to fix it!" ger "Tabellenaktualisierung erforderlich. Bitte zum Reparieren \"REPAIR TABLE `%-.32s`\" eingeben!" @@ -5622,11 +5621,11 @@ ER_MAX_PREPARED_STMT_COUNT_REACHED 42000 eng "Can't create more than max_prepared_stmt_count statements (current value: %lu)" ger "Kann nicht mehr Anweisungen als max_prepared_stmt_count erzeugen (aktueller Wert: %lu)" ER_VIEW_RECURSIVE - eng "`%-.64s`.`%-.64s` contains view recursion" - ger "`%-.64s`.`%-.64s` enthält View-Rekursion" + eng "`%-.192s`.`%-.192s` contains view recursion" + ger "`%-.192s`.`%-.192s` enthält View-Rekursion" ER_NON_GROUPING_FIELD_USED 42000 - eng "non-grouping field '%-.64s' is used in %-.64s clause" - ger "In der %-.64s-Klausel wird das die Nicht-Gruppierungsspalte '%-.64s' verwendet" + eng "non-grouping field '%-.192s' is used in %-.64s clause" + ger "In der %-.192s-Klausel wird das die Nicht-Gruppierungsspalte '%-.64s' verwendet" ER_TABLE_CANT_HANDLE_SPKEYS eng "The used table type doesn't support SPATIAL indexes" ger "Der verwendete Tabellentyp unterstützt keine SPATIAL-Indizes" @@ -5682,9 +5681,9 @@ ER_INCONSISTENT_PARTITION_INFO_ERROR ger "Die Partitionierungsinformationen in der frm-Datei stimmen nicht mit dem überein, was in die frm-Datei geschrieben werden kann" swe "Partitioneringsinformationen i frm-filen är inte konsistent med vad som kan skrivas i frm-filen" ER_PARTITION_FUNC_NOT_ALLOWED_ERROR - eng "The %-.64s function returns the wrong type" - ger "Die %-.64s-Funktion gibt einen falschen Typ zurück" - swe "%-.64s-funktionen returnerar felaktig typ" + eng "The %-.192s function returns the wrong type" + ger "Die %-.192s-Funktion gibt einen falschen Typ zurück" + swe "%-.192s-funktionen returnerar felaktig typ" ER_PARTITIONS_MUST_BE_DEFINED_ERROR eng "For %-.64s partitions each partition must be defined" ger "Für %-.64s-Partitionen muss jede Partition definiert sein" @@ -5730,7 +5729,7 @@ ER_BLOB_FIELD_IN_PART_FUNC_ERROR ger "In der Partitionierungsfunktion sind BLOB-Spalten nicht erlaubt" swe "Ett BLOB-fält är inte tillåtet i partitioneringsfunktioner" ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF - eng "A %-.64s must include all columns in the table's partitioning function" + eng "A %-.192s must include all columns in the table's partitioning function" ER_NO_PARTS_ERROR eng "Number of %-.64s = 0 is not an allowed value" ger "Eine Anzahl von %-.64s = 0 ist kein erlaubter Wert" @@ -5784,9 +5783,9 @@ ER_REORG_PARTITION_NOT_EXIST ger "Es wurde versucht, mehr Partitionen als vorhanden zu reorganisieren" swe "Fler partitioner att reorganisera än det finns partitioner" ER_SAME_NAME_PARTITION - eng "Duplicate partition name %-.64s" - ger "Doppelter Partitionsname: %-.64s" - swe "Duplicerat partitionsnamn %-.64s" + eng "Duplicate partition name %-.192s" + ger "Doppelter Partitionsname: %-.192s" + swe "Duplicerat partitionsnamn %-.192s" ER_NO_BINLOG_ERROR eng "It is not allowed to shut off binlog on this command" ger "Es es nicht erlaubt, bei diesem Befehl binlog abzuschalten" @@ -5811,8 +5810,8 @@ ER_LIMITED_PART_RANGE ger "Der Handler %-.64s unterstützt in VALUES nur 32-Bit-Integers" swe "%-.64s stödjer endast 32 bitar i integers i VALUES" ER_PLUGIN_IS_NOT_LOADED - eng "Plugin '%-.64s' is not loaded" - ger "Plugin '%-.64s' ist nicht geladen" + eng "Plugin '%-.192s' is not loaded" + ger "Plugin '%-.192s' ist nicht geladen" ER_WRONG_VALUE eng "Incorrect %-.32s value: '%-.128s'" ger "Falscher %-.32s-Wert: '%-.128s'" @@ -5853,17 +5852,17 @@ ER_FOREIGN_SERVER_DOESNT_EXIST eng "The foreign server name you are trying to reference does not exist. Data source error: %-.64s" ger "Die externe Verbindung, auf die Sie zugreifen wollen, existiert nicht. Datenquellenfehlermeldung: %-.64s" ER_EVENT_ALREADY_EXISTS - eng "Event '%-.64s' already exists" - ger "Event '%-.64s' existiert bereits" + eng "Event '%-.192s' already exists" + ger "Event '%-.192s' existiert bereits" ER_EVENT_STORE_FAILED eng "Failed to store event %s. Error code %d from storage engine." ger "Speichern von Event %s fehlgeschlagen. Fehlercode der Speicher-Engine: %d" ER_EVENT_DOES_NOT_EXIST - eng "Unknown event '%-.64s'" - ger "Unbekanntes Event '%-.64s'" + eng "Unknown event '%-.192s'" + ger "Unbekanntes Event '%-.192s'" ER_EVENT_CANT_ALTER - eng "Failed to alter event '%-.64s'" - ger "Ändern des Events '%-.64s' fehlgeschlagen" + eng "Failed to alter event '%-.192s'" + ger "Ändern des Events '%-.192s' fehlgeschlagen" ER_EVENT_DROP_FAILED eng "Failed to drop %s" ger "Löschen von %s fehlgeschlagen" @@ -5874,8 +5873,7 @@ ER_EVENT_ENDS_BEFORE_STARTS eng "ENDS is either invalid or before STARTS" ger "ENDS ist entweder ungültig oder liegt vor STARTS" ER_EVENT_EXEC_TIME_IN_THE_PAST - eng "Activation (AT) time is in the past" - ger "Aktivierungszeit (AT) liegt in der Vergangenheit" + eng "Event execution time is in the past. Event has been disabled" ER_EVENT_OPEN_TABLE_FAILED eng "Failed to open mysql.event" ger "Öffnen von mysql.event fehlgeschlagen" @@ -5883,11 +5881,11 @@ ER_EVENT_NEITHER_M_EXPR_NOR_M_AT eng "No datetime expression provided" ger "Kein DATETIME-Ausdruck angegeben" ER_COL_COUNT_DOESNT_MATCH_CORRUPTED - eng "Column count of mysql.%s is wrong. Expected %d, found %d. Table probably corrupted" + eng "Column count of mysql.%s is wrong. Expected %d, found %d. The table is probably corrupted" ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d gefunden. Tabelle ist wahrscheinlich beschädigt" ER_CANNOT_LOAD_FROM_TABLE - eng "Cannot load from mysql.%s. Table probably corrupted. See error log." - ger "Kann mysql.%s nicht einlesen. Tabelle ist wahrscheinlich beschädigt, siehe Fehlerlog" + eng "Cannot load from mysql.%s. The table is probably corrupted" + ger "Kann mysql.%s nicht einlesen. Tabelle ist wahrscheinlich beschädigt" ER_EVENT_CANNOT_DELETE eng "Failed to delete the event from mysql.event" ger "Löschen des Events aus mysql.event fehlgeschlagen" @@ -5901,8 +5899,8 @@ ER_EVENT_DATA_TOO_LONG eng "Data for column '%s' too long" ger "Daten der Spalte '%s' zu lang" ER_DROP_INDEX_FK - eng "Cannot drop index '%-.64s': needed in a foreign key constraint" - ger "Kann Index '%-.64s' nicht löschen: wird für einen Fremdschlüssel benötigt" + eng "Cannot drop index '%-.192s': needed in a foreign key constraint" + ger "Kann Index '%-.192s' nicht löschen: wird für einen Fremdschlüssel benötigt" ER_WARN_DEPRECATED_SYNTAX eng "The syntax '%s' is deprecated and will be removed in MySQL %s. Please use %s instead" ger "Die Syntax '%s' ist veraltet und wird in MySQL %s entfernt. Bitte benutzen Sie statt dessen %s" @@ -5913,8 +5911,8 @@ ER_CANT_READ_LOCK_LOG_TABLE eng "You can't use usual read lock with log tables. Try READ LOCAL instead" ger "Log-Tabellen können nicht mit normalen Lesesperren gesperrt werden. Verwenden Sie statt dessen READ LOCAL" ER_FOREIGN_DUPLICATE_KEY 23000 S1009 - eng "Upholding foreign key constraints for table '%.64s', entry '%-.64s', key %d would lead to a duplicate entry" - ger "Aufrechterhalten der Fremdschlüssel-Constraints für Tabelle '%.64s', Eintrag '%-.64s', Schlüssel %d würde zu einem doppelten Eintrag führen" + eng "Upholding foreign key constraints for table '%.192s', entry '%-.192s', key %d would lead to a duplicate entry" + ger "Aufrechterhalten der Fremdschlüssel-Constraints für Tabelle '%.192s', Eintrag '%-.192s', Schlüssel %d würde zu einem doppelten Eintrag führen" ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MySQL %d, now running %d. Please use scripts/mysql_fix_privilege_tables" ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d erhalten. Erzeugt mit MySQL %d, jetzt unter %d. Bitte benutzen Sie scripts/mysql_fix_privilege_tables, um den Fehler zu beheben" @@ -5956,8 +5954,8 @@ ER_CANT_CHANGE_TX_ISOLATION 25001 eng "Transaction isolation level can't be changed while a transaction is in progress" ger "Transaktionsisolationsebene kann während einer laufenden Transaktion nicht geändert werden" ER_DUP_ENTRY_AUTOINCREMENT_CASE - eng "ALTER TABLE causes auto_increment resequencing, resulting in duplicate entry '%-.64s' for key '%-.64s'" - ger "ALTER TABLE führt zur Neusequenzierung von auto_increment, wodurch der doppelte Eintrag '%-.64s' für Schlüssel '%-.64s' auftritt" + eng "ALTER TABLE causes auto_increment resequencing, resulting in duplicate entry '%-.192s' for key '%-.192s'" + ger "ALTER TABLE führt zur Neusequenzierung von auto_increment, wodurch der doppelte Eintrag '%-.192s' für Schlüssel '%-.192s' auftritt" ER_EVENT_MODIFY_QUEUE_ERROR eng "Internal scheduler error %d" ger "Interner Scheduler-Fehler %d" @@ -5980,11 +5978,11 @@ ER_BASE64_DECODE_ERROR ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA eng "Triggers can not be created on system tables" ger "Trigger können nicht auf Systemtabellen erzeugt werden" -ER_EVENT_RECURSIVITY_FORBIDDEN - eng "Recursivity of EVENT DDL statements is forbidden when body is present" +ER_EVENT_RECURSION_FORBIDDEN + eng "Recursion of EVENT DDL statements is forbidden when body is present" ger "Rekursivität von EVENT-DDL-Anweisungen ist unzulässig wenn ein Hauptteil (Body) existiert" ER_EVENTS_DB_ERROR - eng "Cannot proceed because the tables used by events were found damaged at server start" + eng "Cannot proceed because system tables used by Event Scheduler were found damaged at server start" ger "Kann nicht weitermachen, weil die Tabellen, die von Events verwendet werden, beim Serverstart als beschädigt markiert wurden" ER_ONLY_INTEGERS_ALLOWED eng "Only integers allowed as number here" @@ -6014,42 +6012,50 @@ ER_CANT_RENAME_LOG_TABLE eng "Cannot rename '%s'. When logging enabled, rename to/from log table must rename two tables: the log table to an archive table and another table back to '%s'" ger "Kann '%s' nicht umbenennen. Wenn Loggen angeschaltet ist, müssen beim Umbenennen zu/von einer Logtabelle zwei Tabellen angegeben werden: die Logtabelle zu einer Archivtabelle und eine weitere Tabelle zurück zu '%s'" ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT 42000 - eng "Incorrect parameter count in the call to native function '%-.64s'" - ger "Falsche Anzahl von Parametern beim Aufruf der nativen Funktion '%-.64s'" + eng "Incorrect parameter count in the call to native function '%-.192s'" + ger "Falsche Anzahl von Parametern beim Aufruf der nativen Funktion '%-.192s'" ER_WRONG_PARAMETERS_TO_NATIVE_FCT 42000 - eng "Incorrect parameters in the call to native function '%-.64s'" - ger "Falscher Parameter beim Aufruf der nativen Funktion '%-.64s'" + eng "Incorrect parameters in the call to native function '%-.192s'" + ger "Falscher Parameter beim Aufruf der nativen Funktion '%-.192s'" ER_WRONG_PARAMETERS_TO_STORED_FCT 42000 - eng "Incorrect parameters in the call to stored function '%-.64s'" - ger "Falsche Parameter beim Aufruf der gespeicherten Funktion '%-.64s'" + eng "Incorrect parameters in the call to stored function '%-.192s'" + ger "Falsche Parameter beim Aufruf der gespeicherten Funktion '%-.192s'" ER_NATIVE_FCT_NAME_COLLISION - eng "This function '%-.64s' has the same name as a native function" - ger "Die Funktion '%-.64s' hat denselben Namen wie eine native Funktion" + eng "This function '%-.192s' has the same name as a native function" + ger "Die Funktion '%-.192s' hat denselben Namen wie eine native Funktion" ER_DUP_ENTRY_WITH_KEY_NAME 23000 S1009 - cze "Zvojen-Bý klíè '%-.64s' (èíslo klíèe '%-.64s')" - dan "Ens værdier '%-.64s' for indeks '%-.64s'" - nla "Dubbele ingang '%-.64s' voor zoeksleutel '%-.64s'" - eng "Duplicate entry '%-.64s' for key '%-.64s'" - jps "'%-.64s' ‚Í key '%-.64s' ‚É‚¨‚¢‚Äd•¡‚µ‚Ä‚¢‚Ü‚·", - est "Kattuv väärtus '%-.64s' võtmele '%-.64s'" - fre "Duplicata du champ '%-.64s' pour la clef '%-.64s'" - ger "Doppelter Eintrag '%-.64s' für Schlüssel '%-.64s'" - greek "ÄéðëÞ åããñáöÞ '%-.64s' ãéá ôï êëåéäß '%-.64s'" - hun "Duplikalt bejegyzes '%-.64s' a '%-.64s' kulcs szerint." - ita "Valore duplicato '%-.64s' per la chiave '%-.64s'" - jpn "'%-.64s' ¤Ï key '%-.64s' ¤Ë¤ª¤¤¤Æ½ÅÊ£¤·¤Æ¤¤¤Þ¤¹" - kor "Áߺ¹µÈ ÀÔ·Â °ª '%-.64s': key '%-.64s'" - nor "Like verdier '%-.64s' for nøkkel '%-.64s'" - norwegian-ny "Like verdiar '%-.64s' for nykkel '%-.64s'" - pol "Powtórzone wyst?pienie '%-.64s' dla klucza '%-.64s'" - por "Entrada '%-.64s' duplicada para a chave '%-.64s'" - rum "Cimpul '%-.64s' e duplicat pentru cheia '%-.64s'" - rus "äÕÂÌÉÒÕÀÝÁÑÓÑ ÚÁÐÉÓØ '%-.64s' ÐÏ ËÌÀÞÕ '%-.64s'" - serbian "Dupliran unos '%-.64s' za kljuè '%-.64s'" - slo "Opakovaný kµúè '%-.64s' (èíslo kµúèa '%-.64s')" - spa "Entrada duplicada '%-.64s' para la clave '%-.64s'" - swe "Dubbel nyckel '%-.64s' för nyckel '%-.64s'" - ukr "äÕÂÌÀÀÞÉÊ ÚÁÐÉÓ '%-.64s' ÄÌÑ ËÌÀÞÁ '%-.64s'" + cze "Zvojen-Bý klíè '%-.64s' (èíslo klíèe '%-.192s')" + dan "Ens værdier '%-.64s' for indeks '%-.192s'" + nla "Dubbele ingang '%-.64s' voor zoeksleutel '%-.192s'" + eng "Duplicate entry '%-.64s' for key '%-.192s'" + jps "'%-.64s' ‚Í key '%-.192s' ‚É‚¨‚¢‚Äd•¡‚µ‚Ä‚¢‚Ü‚·", + est "Kattuv väärtus '%-.64s' võtmele '%-.192s'" + fre "Duplicata du champ '%-.64s' pour la clef '%-.192s'" + ger "Doppelter Eintrag '%-.64s' für Schlüssel '%-.192s'" + greek "ÄéðëÞ åããñáöÞ '%-.64s' ãéá ôï êëåéäß '%-.192s'" + hun "Duplikalt bejegyzes '%-.64s' a '%-.192s' kulcs szerint." + ita "Valore duplicato '%-.64s' per la chiave '%-.192s'" + jpn "'%-.64s' ¤Ï key '%-.192s' ¤Ë¤ª¤¤¤Æ½ÅÊ£¤·¤Æ¤¤¤Þ¤¹" + kor "Áߺ¹µÈ ÀÔ·Â °ª '%-.64s': key '%-.192s'" + nor "Like verdier '%-.64s' for nøkkel '%-.192s'" + norwegian-ny "Like verdiar '%-.64s' for nykkel '%-.192s'" + pol "Powtórzone wyst?pienie '%-.64s' dla klucza '%-.192s'" + por "Entrada '%-.64s' duplicada para a chave '%-.192s'" + rum "Cimpul '%-.64s' e duplicat pentru cheia '%-.192s'" + rus "äÕÂÌÉÒÕÀÝÁÑÓÑ ÚÁÐÉÓØ '%-.64s' ÐÏ ËÌÀÞÕ '%-.192s'" + serbian "Dupliran unos '%-.64s' za kljuè '%-.192s'" + slo "Opakovaný kµúè '%-.64s' (èíslo kµúèa '%-.192s')" + spa "Entrada duplicada '%-.64s' para la clave '%-.192s'" + swe "Dubbel nyckel '%-.64s' för nyckel '%-.192s'" + ukr "äÕÂÌÀÀÞÉÊ ÚÁÐÉÓ '%-.64s' ÄÌÑ ËÌÀÞÁ '%-.192s'" ER_BINLOG_PURGE_EMFILE eng "Too many files opened, please execute the command again" ger "Zu viele offene Dateien, bitte führen Sie den Befehl noch einmal aus" +ER_EVENT_CANNOT_CREATE_IN_THE_PAST + eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been created" +ER_EVENT_CANNOT_ALTER_IN_THE_PAST + eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. Event has not been altered" +ER_SLAVE_INCIDENT + eng "The incident %s occured on the master. Message: %-.64s" +ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT + eng "Table has no partition for some existing values" diff --git a/sql/slave.cc b/sql/slave.cc index 6f62f74647a..f1b9ea8476b 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -17,20 +17,24 @@ #include <mysql.h> #include <myisam.h> -#include "rpl_rli.h" #include "slave.h" +#include "rpl_mi.h" +#include "rpl_rli.h" #include "sql_repl.h" #include "rpl_filter.h" #include "repl_failsafe.h" #include <thr_alarm.h> #include <my_dir.h> #include <sql_common.h> +#include <errmsg.h> #ifdef HAVE_REPLICATION #include "rpl_tblmap.h" int queue_event(MASTER_INFO* mi,const char* buf,ulong event_len); +static Log_event* next_event(RELAY_LOG_INFO* rli); + #define FLAGSTR(V,F) ((V)&(F)?#F" ":"") @@ -519,11 +523,11 @@ static bool sql_slave_killed(THD* thd, RELAY_LOG_INFO* rli) really one minute of idleness, we don't timeout if the slave SQL thread is actively working. */ - if (!rli->unsafe_to_stop_at) + if (rli->last_event_start_time == 0) DBUG_RETURN(1); DBUG_PRINT("info", ("Slave SQL thread is in an unsafe situation, giving " "it some grace period")); - if (difftime(time(0), rli->unsafe_to_stop_at) > 60) + if (difftime(time(0), rli->last_event_start_time) > 60) { slave_print_msg(ERROR_LEVEL, rli, 0, "SQL thread had to stop in an unsafe situation, in " @@ -557,7 +561,7 @@ static bool sql_slave_killed(THD* thd, RELAY_LOG_INFO* rli) void */ -void slave_print_msg(enum loglevel level, RELAY_LOG_INFO* rli, +void slave_print_msg(enum loglevel level, RELAY_LOG_INFO const *rli, int err_code, const char* msg, ...) { void (*report_function)(const char *, ...); @@ -579,9 +583,9 @@ void slave_print_msg(enum loglevel level, RELAY_LOG_INFO* rli, It's an error, it must be reported in Last_error and Last_errno in SHOW SLAVE STATUS. */ - pbuff= rli->last_slave_error; + pbuff= const_cast<RELAY_LOG_INFO*>(rli)->last_slave_error; pbuffsize= sizeof(rli->last_slave_error); - rli->last_slave_errno = err_code; + const_cast<RELAY_LOG_INFO*>(rli)->last_slave_errno = err_code; report_function= sql_print_error; break; case WARNING_LEVEL: @@ -791,8 +795,10 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi) else { mi->clock_diff_with_master= 0; /* The "most sensible" value */ - sql_print_warning("\"SELECT UNIX_TIMESTAMP()\" failed on master, \ -do not trust column Seconds_Behind_Master of SHOW SLAVE STATUS"); + sql_print_warning("\"SELECT UNIX_TIMESTAMP()\" failed on master, " + "do not trust column Seconds_Behind_Master of SHOW " + "SLAVE STATUS. Error: %s (%d)", + mysql_error(mysql), mysql_errno(mysql)); } if (master_res) mysql_free_result(master_res); @@ -813,7 +819,7 @@ do not trust column Seconds_Behind_Master of SHOW SLAVE STATUS"); { if ((master_row= mysql_fetch_row(master_res)) && (::server_id == strtoul(master_row[1], 0, 10)) && - !replicate_same_server_id) + !mi->rli.replicate_same_server_id) errmsg= "The slave I/O thread stops because master and slave have equal \ MySQL server ids; these ids must be different for replication to work (or \ the --replicate-same-server-id option must be used on slave but this does \ @@ -1249,6 +1255,8 @@ bool show_master_info(THD* thd, MASTER_INFO* mi) sizeof(mi->ssl_key))); field_list.push_back(new Item_return_int("Seconds_Behind_Master", 10, MYSQL_TYPE_LONGLONG)); + field_list.push_back(new Item_empty_string("Master_SSL_Verify_Server_Cert", + 3)); if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) @@ -1328,8 +1336,7 @@ bool show_master_info(THD* thd, MASTER_INFO* mi) if ((mi->slave_running == MYSQL_SLAVE_RUN_CONNECT) && mi->rli.slave_running) { - long time_diff= ((long)((time_t)time((time_t*) 0) - - mi->rli.last_master_timestamp) + long time_diff= ((long)(time(0) - mi->rli.last_master_timestamp) - mi->clock_diff_with_master); /* Apparently on some systems time_diff can be <0. Here are possible @@ -1355,7 +1362,10 @@ bool show_master_info(THD* thd, MASTER_INFO* mi) max(0, time_diff) : 0)); } else + { protocol->store_null(); + } + protocol->store(mi->ssl_verify_server_cert? "Yes":"No", &my_charset_bin); pthread_mutex_unlock(&mi->rli.data_lock); pthread_mutex_unlock(&mi->data_lock); @@ -1390,7 +1400,7 @@ void set_slave_thread_options(THD* thd) DBUG_VOID_RETURN; } -void set_slave_thread_default_charset(THD* thd, RELAY_LOG_INFO *rli) +void set_slave_thread_default_charset(THD* thd, RELAY_LOG_INFO const *rli) { DBUG_ENTER("set_slave_thread_default_charset"); @@ -1401,7 +1411,14 @@ void set_slave_thread_default_charset(THD* thd, RELAY_LOG_INFO *rli) thd->variables.collation_server= global_system_variables.collation_server; thd->update_charset(); - rli->cached_charset_invalidate(); + + /* + We use a const cast here since the conceptual (and externally + visible) behavior of the function is to set the default charset of + the thread. That the cache has to be invalidated is a secondary + effect. + */ + const_cast<RELAY_LOG_INFO*>(rli)->cached_charset_invalidate(); DBUG_VOID_RETURN; } @@ -1609,13 +1626,15 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings) } -int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int expected_error) +int check_expected_error(THD* thd, RELAY_LOG_INFO const *rli, + int expected_error) { DBUG_ENTER("check_expected_error"); switch (expected_error) { case ER_NET_READ_ERROR: case ER_NET_ERROR_ON_WRITE: + case ER_QUERY_INTERRUPTED: case ER_SERVER_SHUTDOWN: case ER_NEW_ABORTING_CONNECTION: DBUG_RETURN(1); @@ -1714,78 +1733,44 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) } if (ev) { - int type_code = ev->get_type_code(); - int exec_res; + int const type_code= ev->get_type_code(); + int exec_res= 0; /* - Queries originating from this server must be skipped. - Low-level events (Format_desc, Rotate, Stop) from this server - must also be skipped. But for those we don't want to modify - group_master_log_pos, because these events did not exist on the master. - Format_desc is not completely skipped. - Skip queries specified by the user in slave_skip_counter. - We can't however skip events that has something to do with the - log files themselves. - Filtering on own server id is extremely important, to ignore execution of - events created by the creation/rotation of the relay log (remember that - now the relay log starts with its Format_desc, has a Rotate etc). */ - DBUG_PRINT("info",("type_code=%d, server_id=%d",type_code,ev->server_id)); + DBUG_PRINT("exec_event",("%s(type_code: %d; server_id: %d)", + ev->get_type_str(), type_code, ev->server_id)); + DBUG_PRINT("info", ("thd->options: %s%s; rli->last_event_start_time: %lu", + FLAGSTR(thd->options, OPTION_NOT_AUTOCOMMIT), + FLAGSTR(thd->options, OPTION_BEGIN), + rli->last_event_start_time)); - if ((ev->server_id == (uint32) ::server_id && - !replicate_same_server_id && - type_code != FORMAT_DESCRIPTION_EVENT) || - (rli->slave_skip_counter && - type_code != ROTATE_EVENT && type_code != STOP_EVENT && - type_code != START_EVENT_V3 && type_code!= FORMAT_DESCRIPTION_EVENT)) - { - DBUG_PRINT("info", ("event skipped")); - /* - We only skip the event here and do not increase the group log - position. In the event that we have to restart, this means - that we might have to skip the event again, but that is a - minor issue. - - If we were to increase the group log position when skipping an - event, it might be that we are restarting at the wrong - position and have events before that we should have executed, - so not increasing the group log position is a sure bet in this - case. - - In this way, we just step the group log position when we - *know* that we are at the end of a group. - */ - rli->inc_event_relay_log_pos(); - /* - Protect against common user error of setting the counter to 1 - instead of 2 while recovering from an insert which used auto_increment, - rand or user var. - */ - if (rli->slave_skip_counter && - !((type_code == INTVAR_EVENT || - type_code == RAND_EVENT || - type_code == USER_VAR_EVENT) && - rli->slave_skip_counter == 1) && - /* - The events from ourselves which have something to do with the relay - log itself must be skipped, true, but they mustn't decrement - rli->slave_skip_counter, because the user is supposed to not see - these events (they are not in the master's binlog) and if we - decremented, START SLAVE would for example decrement when it sees - the Rotate, so the event which the user probably wanted to skip - would not be skipped. - */ - !(ev->server_id == (uint32) ::server_id && - (type_code == ROTATE_EVENT || type_code == STOP_EVENT || - type_code == START_EVENT_V3 || type_code == FORMAT_DESCRIPTION_EVENT))) - --rli->slave_skip_counter; - pthread_mutex_unlock(&rli->data_lock); - delete ev; - DBUG_RETURN(0); // avoid infinite update loops - } - pthread_mutex_unlock(&rli->data_lock); + + /* + Execute the event to change the database and update the binary + log coordinates, but first we set some data that is needed for + the thread. + + The event will be executed unless it is supposed to be skipped. + + Queries originating from this server must be skipped. Low-level + events (Format_description_log_event, Rotate_log_event, + Stop_log_event) from this server must also be skipped. But for + those we don't want to modify 'group_master_log_pos', because + these events did not exist on the master. + Format_description_log_event is not completely skipped. + + Skip queries specified by the user in 'slave_skip_counter'. We + can't however skip events that has something to do with the log + files themselves. + + Filtering on own server id is extremely important, to ignore + execution of events created by the creation/rotation of the relay + log (remember that now the relay log starts with its Format_desc, + has a Rotate etc). + */ thd->server_id = ev->server_id; // use the original server id for logging thd->set_time(); // time the query @@ -1793,19 +1778,69 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) if (!ev->when) ev->when = time(NULL); ev->thd = thd; // because up to this point, ev->thd == 0 - DBUG_PRINT("info", ("thd->options={ %s%s}", - FLAGSTR(thd->options, OPTION_NOT_AUTOCOMMIT), - FLAGSTR(thd->options, OPTION_BEGIN))); - exec_res = ev->exec_event(rli); - DBUG_PRINT("info", ("exec_event result: %d", exec_res)); - DBUG_ASSERT(rli->sql_thd==thd); + int reason= ev->shall_skip(rli); + if (reason == Log_event::EVENT_SKIP_COUNT) + --rli->slave_skip_counter; + pthread_mutex_unlock(&rli->data_lock); + if (reason == Log_event::EVENT_SKIP_NOT) + exec_res= ev->apply_event(rli); +#ifndef DBUG_OFF + /* + This only prints information to the debug trace. + + TODO: Print an informational message to the error log? + */ + static const char *const explain[] = { + // EVENT_SKIP_NOT, + "not skipped", + // EVENT_SKIP_IGNORE, + "skipped because event originated from this server", + // EVENT_SKIP_COUNT + "skipped because event skip counter was non-zero" + }; + DBUG_PRINT("skip_event", ("%s event was %s", + ev->get_type_str(), explain[reason])); +#endif + + DBUG_PRINT("info", ("apply_event error = %d", exec_res)); + if (exec_res == 0) + { + int error= ev->update_pos(rli); + char buf[22]; + DBUG_PRINT("info", ("update_pos error = %d", error)); + DBUG_PRINT("info", ("group %s %s", + llstr(rli->group_relay_log_pos, buf), + rli->group_relay_log_name)); + DBUG_PRINT("info", ("event %s %s", + llstr(rli->event_relay_log_pos, buf), + rli->event_relay_log_name)); + /* + The update should not fail, so print an error message and + return an error code. + + TODO: Replace this with a decent error message when merged + with BUG#24954 (which adds several new error message). + */ + if (error) + { + slave_print_msg(ERROR_LEVEL, rli, ER_UNKNOWN_ERROR, + "It was not possible to update the positions" + " of the relay log information: the slave may" + " be in an inconsistent state." + " Stopped in %s position %s", + rli->group_relay_log_name, + llstr(rli->group_relay_log_pos, buf)); + DBUG_RETURN(1); + } + } + /* Format_description_log_event should not be deleted because it will be used to read info about the relay log's format; it will be deleted when the SQL thread does not need it, i.e. when this thread terminates. */ - if (ev->get_type_code() != FORMAT_DESCRIPTION_EVENT) + if (type_code != FORMAT_DESCRIPTION_EVENT) { DBUG_PRINT("info", ("Deleting the event after it has been executed")); delete ev; @@ -2066,7 +2101,7 @@ after reconnect"); if (event_len == packet_error) { uint mysql_error_number= mysql_errno(mysql); - if (mysql_error_number == ER_NET_PACKET_TOO_LARGE) + if (mysql_error_number == CR_NET_PACKET_TOO_LARGE) { sql_print_error("\ Log entry on master is longer than max_allowed_packet (%ld) on \ @@ -2366,13 +2401,17 @@ Slave SQL thread aborted. Can't execute init_slave query"); THD_CHECK_SENTRY(thd); if (exec_relay_log_event(thd,rli)) { + DBUG_PRINT("info", ("exec_relay_log_event() failed")); // do not scare the user if SQL thread was simply killed or stopped if (!sql_slave_killed(thd,rli)) { /* - retrieve as much info as possible from the thd and, error codes and warnings - and print this to the error log as to allow the user to locate the error + retrieve as much info as possible from the thd and, error + codes and warnings and print this to the error log as to + allow the user to locate the error */ + DBUG_PRINT("info", ("thd->net.last_errno=%d; rli->last_slave_errno=%d", + thd->net.last_errno, rli->last_slave_errno)); if (thd->net.last_errno != 0) { if (rli->last_slave_errno == 0) @@ -2393,10 +2432,25 @@ Slave SQL thread aborted. Can't execute init_slave query"); /* Print any warnings issued */ List_iterator_fast<MYSQL_ERROR> it(thd->warn_list); MYSQL_ERROR *err; + /* + Added controlled slave thread cancel for replication + of user-defined variables. + */ + bool udf_error = false; while ((err= it++)) + { + if (err->code == ER_CANT_OPEN_LIBRARY) + udf_error = true; sql_print_warning("Slave: %s Error_code: %d",err->msg, err->code); - - sql_print_error("\ + } + if (udf_error) + sql_print_error("Error loading user-defined library, slave SQL " + "thread aborted. Install the missing library, and restart the " + "slave SQL thread with \"SLAVE START\". We stopped at log '%s' " + "position %s", RPL_LOG_NAME, llstr(rli->group_master_log_pos, + llbuff)); + else + sql_print_error("\ Error running query, slave SQL thread aborted. Fix the problem, and restart \ the slave SQL thread with \"SLAVE START\". We stopped at log \ '%s' position %s", RPL_LOG_NAME, llstr(rli->group_master_log_pos, llbuff)); @@ -2699,6 +2753,7 @@ static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf, my_free((char*) tmp_buf, MYF(MY_ALLOW_ZERO_PTR)); DBUG_RETURN(1); } + pthread_mutex_lock(&mi->data_lock); ev->log_pos= mi->master_log_pos; /* 3.23 events don't contain log_pos */ switch (ev->get_type_code()) { @@ -2962,7 +3017,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) pthread_mutex_lock(log_lock); if ((uint4korr(buf + SERVER_ID_OFFSET) == ::server_id) && - !replicate_same_server_id) + !mi->rli.replicate_same_server_id) { /* Do not write it to the relay log. @@ -3096,12 +3151,16 @@ static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi, #ifdef HAVE_OPENSSL if (mi->ssl) + { mysql_ssl_set(mysql, mi->ssl_key[0]?mi->ssl_key:0, mi->ssl_cert[0]?mi->ssl_cert:0, mi->ssl_ca[0]?mi->ssl_ca:0, mi->ssl_capath[0]?mi->ssl_capath:0, mi->ssl_cipher[0]?mi->ssl_cipher:0); + mysql_options(mysql, MYSQL_OPT_SSL_VERIFY_SERVER_CERT, + &mi->ssl_verify_server_cert); + } #endif mysql_options(mysql, MYSQL_SET_CHARSET_NAME, default_charset_info->csname); diff --git a/sql/slave.h b/sql/slave.h index f21266bbee4..cb880c8125d 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -22,13 +22,18 @@ #include "my_list.h" #include "rpl_filter.h" #include "rpl_tblmap.h" -#include "rpl_rli.h" -#include "rpl_mi.h" #define SLAVE_NET_TIMEOUT 3600 #define MAX_SLAVE_ERROR 2000 + +// Forward declarations +struct st_relay_log_info; +typedef st_relay_log_info RELAY_LOG_INFO; + +class MASTER_INFO; + /***************************************************************************** MySQL Replication @@ -162,9 +167,9 @@ bool show_binlog_info(THD* thd); bool rpl_master_has_bug(RELAY_LOG_INFO *rli, uint bug_id); const char *print_slave_db_safe(const char *db); -int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int error_code); +int check_expected_error(THD* thd, RELAY_LOG_INFO const *rli, int error_code); void skip_load_data_infile(NET* net); -void slave_print_msg(enum loglevel level, RELAY_LOG_INFO* rli, +void slave_print_msg(enum loglevel level, RELAY_LOG_INFO const *rli, int err_code, const char* msg, ...) ATTRIBUTE_FORMAT(printf, 4, 5); @@ -182,7 +187,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,ulonglong pos, int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, const char** errmsg); void set_slave_thread_options(THD* thd); -void set_slave_thread_default_charset(THD* thd, RELAY_LOG_INFO *rli); +void set_slave_thread_default_charset(THD *thd, RELAY_LOG_INFO const *rli); void rotate_relay_log(MASTER_INFO* mi); pthread_handler_t handle_slave_io(void *arg); diff --git a/sql/sp.cc b/sql/sp.cc index 0c0bc8e8869..49b8b304e76 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -69,24 +69,6 @@ enum /* - Close mysql.proc, opened with open_proc_table_for_read(). - - SYNOPSIS - close_proc_table() - thd Thread context - backup Pointer to Open_tables_state instance which holds - information about tables which were open before we - decided to access mysql.proc. -*/ - -void close_proc_table(THD *thd, Open_tables_state *backup) -{ - close_thread_tables(thd); - thd->restore_backup_open_tables_state(backup); -} - - -/* Open the mysql.proc table for read. SYNOPSIS @@ -96,13 +78,6 @@ void close_proc_table(THD *thd, Open_tables_state *backup) currently open tables will be saved, and from which will be restored when we will end work with mysql.proc. - NOTES - Thanks to restrictions which we put on opening and locking of - this table for writing, we can open and lock it for reading - even when we already have some other tables open and locked. - One must call close_proc_table() to close table opened with - this call. - RETURN 0 Error # Pointer to TABLE object of mysql.proc @@ -110,38 +85,18 @@ void close_proc_table(THD *thd, Open_tables_state *backup) TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup) { - TABLE_LIST tables; - TABLE *table; - bool not_used; - DBUG_ENTER("open_proc_table"); - - thd->reset_n_backup_open_tables_state(backup); + DBUG_ENTER("open_proc_table_for_read"); - bzero((char*) &tables, sizeof(tables)); - tables.db= (char*) "mysql"; - tables.table_name= tables.alias= (char*)"proc"; - if (!(table= open_table(thd, &tables, thd->mem_root, ¬_used, - MYSQL_LOCK_IGNORE_FLUSH))) - { - thd->restore_backup_open_tables_state(backup); - DBUG_RETURN(0); - } - table->use_all_columns(); - - DBUG_ASSERT(table->s->system_table); + TABLE_LIST table; + bzero((char*) &table, sizeof(table)); + table.db= (char*) "mysql"; + table.table_name= table.alias= (char*)"proc"; + table.lock_type= TL_READ; - table->reginfo.lock_type= TL_READ; - /* - We have to ensure we are not blocked by a flush tables, as this - could lead to a deadlock if we have other tables opened. - */ - if (!(thd->lock= mysql_lock_tables(thd, &table, 1, - MYSQL_LOCK_IGNORE_FLUSH, ¬_used))) - { - close_proc_table(thd, backup); + if (!open_system_tables_for_read(thd, &table, backup)) + DBUG_RETURN(table.table); + else DBUG_RETURN(0); - } - DBUG_RETURN(table); } @@ -162,20 +117,15 @@ TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup) static TABLE *open_proc_table_for_update(THD *thd) { - TABLE_LIST tables; - TABLE *table; - DBUG_ENTER("open_proc_table"); - - bzero((char*) &tables, sizeof(tables)); - tables.db= (char*) "mysql"; - tables.table_name= tables.alias= (char*)"proc"; - tables.lock_type= TL_WRITE; + DBUG_ENTER("open_proc_table_for_update"); - table= open_ltable(thd, &tables, TL_WRITE); - if (table) - table->use_all_columns(); + TABLE_LIST table; + bzero((char*) &table, sizeof(table)); + table.db= (char*) "mysql"; + table.table_name= table.alias= (char*)"proc"; + table.lock_type= TL_WRITE; - DBUG_RETURN(table); + DBUG_RETURN(open_system_table_for_update(thd, &table)); } @@ -218,8 +168,7 @@ db_find_routine_aux(THD *thd, int type, sp_name *name, TABLE *table) key_copy(key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read_idx(table->record[0], 0, - key, table->key_info->key_length, + if (table->file->index_read_idx(table->record[0], 0, key, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) DBUG_RETURN(SP_KEY_NOT_FOUND); @@ -364,7 +313,7 @@ db_find_routine(THD *thd, int type, sp_name *name, sp_head **sphp) chistics.comment.str= ptr; chistics.comment.length= length; - close_proc_table(thd, &open_tables_state_backup); + close_system_tables(thd, &open_tables_state_backup); table= 0; ret= db_load_routine(thd, type, name, sphp, @@ -373,7 +322,7 @@ db_find_routine(THD *thd, int type, sp_name *name, sp_head **sphp) done: if (table) - close_proc_table(thd, &open_tables_state_backup); + close_system_tables(thd, &open_tables_state_backup); DBUG_RETURN(ret); } @@ -435,21 +384,21 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, if ((ret= sp_use_new_db(thd, name->m_db, &old_db, 1, &dbchanged))) goto end; - lex_start(thd, (uchar*)defstr.c_ptr(), defstr.length()); + lex_start(thd, defstr.c_ptr(), defstr.length()); thd->spcont= 0; if (MYSQLparse(thd) || thd->is_fatal_error || newlex.sphead == NULL) { sp_head *sp= newlex.sphead; - if (dbchanged && (ret= mysql_change_db(thd, old_db.str, 1))) + if (dbchanged && (ret= mysql_change_db(thd, &old_db, TRUE))) goto end; delete sp; ret= SP_PARSE_ERROR; } else { - if (dbchanged && (ret= mysql_change_db(thd, old_db.str, 1))) + if (dbchanged && (ret= mysql_change_db(thd, &old_db, TRUE))) goto end; *sphp= newlex.sphead; (*sphp)->set_definer(&definer_user_name, &definer_host_name); @@ -733,15 +682,15 @@ struct st_used_field static struct st_used_field init_fields[]= { - { "Db", NAME_LEN, MYSQL_TYPE_STRING, 0}, - { "Name", NAME_LEN, MYSQL_TYPE_STRING, 0}, - { "Type", 9, MYSQL_TYPE_STRING, 0}, - { "Definer", 77, MYSQL_TYPE_STRING, 0}, - { "Modified", 0, MYSQL_TYPE_TIMESTAMP, 0}, - { "Created", 0, MYSQL_TYPE_TIMESTAMP, 0}, - { "Security_type", 1, MYSQL_TYPE_STRING, 0}, - { "Comment", NAME_LEN, MYSQL_TYPE_STRING, 0}, - { 0, 0, MYSQL_TYPE_STRING, 0} + { "Db", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0}, + { "Name", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0}, + { "Type", 9, MYSQL_TYPE_STRING, 0}, + { "Definer", 77, MYSQL_TYPE_STRING, 0}, + { "Modified", 0, MYSQL_TYPE_TIMESTAMP, 0}, + { "Created", 0, MYSQL_TYPE_TIMESTAMP, 0}, + { "Security_type", 1, MYSQL_TYPE_STRING, 0}, + { "Comment", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0}, + { 0, 0, MYSQL_TYPE_STRING, 0} }; @@ -775,7 +724,7 @@ print_field_values(THD *thd, TABLE *table, switch (used_field->field_type) { case MYSQL_TYPE_TIMESTAMP: { - TIME tmp_time; + MYSQL_TIME tmp_time; bzero((char *)&tmp_time, sizeof(tmp_time)); ((Field_timestamp *) used_field->field)->get_time(&tmp_time); @@ -925,7 +874,7 @@ sp_drop_db_routines(THD *thd, char *db) table->file->ha_index_init(0, 1); if (! table->file->index_read(table->record[0], (byte *)table->field[MYSQL_PROC_FIELD_DB]->ptr, - key_len, HA_READ_KEY_EXACT)) + (key_part_map)1, HA_READ_KEY_EXACT)) { int nxtres; bool deleted= FALSE; @@ -1092,7 +1041,7 @@ sp_exist_routines(THD *thd, TABLE_LIST *routines, bool any, bool no_error) lex_name.length= strlen(routine->table_name); lex_db.str= thd->strmake(routine->db, lex_db.length); lex_name.str= thd->strmake(routine->table_name, lex_name.length); - name= new sp_name(lex_db, lex_name); + name= new sp_name(lex_db, lex_name, true); name->init_qname(thd); sp_object_found= sp_find_routine(thd, TYPE_ENUM_PROCEDURE, name, &thd->sp_proc_cache, FALSE) != NULL || @@ -1147,7 +1096,7 @@ sp_routine_exists_in_table(THD *thd, int type, sp_name *name) { if ((ret= db_find_routine_aux(thd, type, name, table)) != SP_OK) ret= SP_KEY_NOT_FOUND; - close_proc_table(thd, &open_tables_state_backup); + close_system_tables(thd, &open_tables_state_backup); } return ret; } @@ -1649,10 +1598,8 @@ sp_cache_routines_and_add_tables_aux(THD *thd, LEX *lex, rest of the server checks agains NAME_LEN bytes and not chars. Hence, the overrun happens only if the name is in length > 32 and uses multibyte (cyrillic, greek, etc.) - - !! Change 3 with SYSTEM_CHARSET_MBMAXLEN when it's defined. */ - char n[NAME_LEN*3*2+2]; + char n[NAME_LEN*2+2]; /* m_qname.str is not always \0 terminated */ memcpy(n, name.m_qname.str, name.m_qname.length); @@ -1914,7 +1861,7 @@ sp_use_new_db(THD *thd, LEX_STRING new_db, LEX_STRING *old_db, DBUG_RETURN(0); } - ret= mysql_change_db(thd, new_db.str, no_access_check); + ret= mysql_change_db(thd, &new_db, no_access_check); *dbchangedp= ret == 0; DBUG_RETURN(ret); @@ -100,7 +100,6 @@ extern "C" byte* sp_sroutine_key(const byte *ptr, uint *plen, my_bool first); we already have some tables open and locked. */ TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup); -void close_proc_table(THD *thd, Open_tables_state *backup); /* diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 1e0986f6e82..04a7b2574a4 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -349,13 +349,13 @@ sp_eval_expr(THD *thd, Field *result_field, Item **expr_item_ptr) enum_check_fields save_count_cuted_fields= thd->count_cuted_fields; bool save_abort_on_warning= thd->abort_on_warning; - bool save_no_trans_update= thd->no_trans_update; + bool save_no_trans_update_stmt= thd->no_trans_update.stmt; thd->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL; thd->abort_on_warning= thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES); - thd->no_trans_update= 0; + thd->no_trans_update.stmt= FALSE; /* Save the value in the field. Convert the value if needed. */ @@ -363,7 +363,7 @@ sp_eval_expr(THD *thd, Field *result_field, Item **expr_item_ptr) thd->count_cuted_fields= save_count_cuted_fields; thd->abort_on_warning= save_abort_on_warning; - thd->no_trans_update= save_no_trans_update; + thd->no_trans_update.stmt= save_no_trans_update_stmt; if (thd->net.report_error) { @@ -408,9 +408,22 @@ sp_name::init_qname(THD *thd) */ bool -check_routine_name(LEX_STRING ident) +check_routine_name(LEX_STRING *ident) { - return (!ident.str || !ident.str[0] || ident.str[ident.length-1] == ' '); + if (!ident || !ident->str || !ident->str[0] || + ident->str[ident->length-1] == ' ') + { + my_error(ER_SP_WRONG_NAME, MYF(0), ident->str); + return TRUE; + } + if (check_string_char_length(ident, "", NAME_CHAR_LEN, + system_charset_info, 1)) + { + my_error(ER_TOO_LONG_IDENT, MYF(0), ident->str); + return TRUE; + } + + return FALSE; } /* ------------------------------------------------------------------ */ @@ -490,7 +503,7 @@ sp_head::init(LEX *lex) { DBUG_ENTER("sp_head::init"); - lex->spcont= m_pcont= new sp_pcontext(NULL); + lex->spcont= m_pcont= new sp_pcontext(); /* Altough trg_table_fields list is used only in triggers we init for all @@ -541,15 +554,14 @@ void sp_head::init_strings(THD *thd, LEX *lex) { DBUG_ENTER("sp_head::init_strings"); - const uchar *endp; /* Used to trim the end */ + const char *endp; /* Used to trim the end */ /* During parsing, we must use thd->mem_root */ MEM_ROOT *root= thd->mem_root; if (m_param_begin && m_param_end) { m_params.length= m_param_end - m_param_begin; - m_params.str= strmake_root(root, - (char *)m_param_begin, m_params.length); + m_params.str= strmake_root(root, m_param_begin, m_params.length); } /* If ptr has overrun end_of_query then end_of_query is the end */ @@ -561,9 +573,9 @@ sp_head::init_strings(THD *thd, LEX *lex) endp= skip_rear_comments(m_body_begin, endp); m_body.length= endp - m_body_begin; - m_body.str= strmake_root(root, (char *)m_body_begin, m_body.length); + m_body.str= strmake_root(root, m_body_begin, m_body.length); m_defstr.length= endp - lex->buf; - m_defstr.str= strmake_root(root, (char *)lex->buf, m_defstr.length); + m_defstr.str= strmake_root(root, lex->buf, m_defstr.length); DBUG_VOID_RETURN; } @@ -993,6 +1005,12 @@ sp_head::execute(THD *thd) m_first_instance->m_last_cached_sp == this) || (m_recursion_level + 1 == m_next_cached_sp->m_recursion_level)); + /* + NOTE: The SQL Standard does not specify the context that should be + preserved for stored routines. However, at SAP/Walldorf meeting it was + decided that current database should be preserved. + */ + if (m_db.length && (err_status= sp_use_new_db(thd, m_db, &old_db, 0, &dbchanged))) goto done; @@ -1117,7 +1135,7 @@ sp_head::execute(THD *thd) case SP_HANDLER_CONTINUE: thd->restore_active_arena(&execute_arena, &backup_arena); thd->set_n_backup_active_arena(&execute_arena, &backup_arena); - ctx->push_hstack(ip); + ctx->push_hstack(i->get_cont_dest()); // Fall through default: ip= hip; @@ -1127,6 +1145,7 @@ sp_head::execute(THD *thd) thd->clear_error(); thd->is_fatal_error= 0; thd->killed= THD::NOT_KILLED; + thd->mysys_var->abort= 0; continue; } } @@ -1170,7 +1189,7 @@ sp_head::execute(THD *thd) (It would generate an error from mysql_change_db() when old_db=="") */ if (! thd->killed) - err_status|= mysql_change_db(thd, old_db.str, 1); + err_status|= mysql_change_db(thd, &old_db, TRUE); } m_flags&= ~IS_INVOKED; DBUG_PRINT("info", @@ -1226,7 +1245,11 @@ set_routine_security_ctx(THD *thd, sp_head *sp, bool is_proc, Security_context **save_ctx) { *save_ctx= 0; - if (sp_change_security_context(thd, sp, save_ctx)) + if (sp->m_chistics->suid != SP_IS_NOT_SUID && + sp->m_security_ctx.change_security_context(thd, &sp->m_definer_user, + &sp->m_definer_host, + &sp->m_db, + save_ctx)) return TRUE; /* @@ -1243,7 +1266,7 @@ set_routine_security_ctx(THD *thd, sp_head *sp, bool is_proc, check_routine_access(thd, EXECUTE_ACL, sp->m_db.str, sp->m_name.str, is_proc, FALSE)) { - sp_restore_security_context(thd, *save_ctx); + sp->m_security_ctx.restore_security_context(thd, *save_ctx); *save_ctx= 0; return TRUE; } @@ -1554,7 +1577,7 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount, } #ifndef NO_EMBEDDED_ACCESS_CHECKS - sp_restore_security_context(thd, save_security_ctx); + m_security_ctx.restore_security_context(thd, save_security_ctx); #endif err_with_cleanup: @@ -1772,7 +1795,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args) #ifndef NO_EMBEDDED_ACCESS_CHECKS if (save_security_ctx) - sp_restore_security_context(thd, save_security_ctx); + m_security_ctx.restore_security_context(thd, save_security_ctx); #endif if (!save_spcont) @@ -2104,24 +2127,18 @@ sp_head::show_create_procedure(THD *thd) String buffer(buff, sizeof(buff), system_charset_info); int res; List<Item> field_list; - byte *sql_mode_str; - ulong sql_mode_len; + LEX_STRING sql_mode; bool full_access; DBUG_ENTER("sp_head::show_create_procedure"); DBUG_PRINT("info", ("procedure %s", m_name.str)); - LINT_INIT(sql_mode_str); - LINT_INIT(sql_mode_len); - if (check_show_routine_access(thd, this, &full_access)) DBUG_RETURN(1); - sql_mode_str= - sys_var_thd_sql_mode::symbolic_mode_representation(thd, - m_sql_mode, - &sql_mode_len); - field_list.push_back(new Item_empty_string("Procedure", NAME_LEN)); - field_list.push_back(new Item_empty_string("sql_mode", sql_mode_len)); + sys_var_thd_sql_mode::symbolic_mode_representation(thd, m_sql_mode, + &sql_mode); + field_list.push_back(new Item_empty_string("Procedure", NAME_CHAR_LEN)); + field_list.push_back(new Item_empty_string("sql_mode", sql_mode.length)); // 1024 is for not to confuse old clients Item_empty_string *definition= new Item_empty_string("Create Procedure", max(buffer.length(),1024)); @@ -2133,7 +2150,7 @@ sp_head::show_create_procedure(THD *thd) DBUG_RETURN(1); protocol->prepare_for_resend(); protocol->store(m_name.str, m_name.length, system_charset_info); - protocol->store((char*) sql_mode_str, sql_mode_len, system_charset_info); + protocol->store((char*) sql_mode.str, sql_mode.length, system_charset_info); if (full_access) protocol->store(m_defstr.str, m_defstr.length, system_charset_info); else @@ -2176,23 +2193,18 @@ sp_head::show_create_function(THD *thd) String buffer(buff, sizeof(buff), system_charset_info); int res; List<Item> field_list; - byte *sql_mode_str; - ulong sql_mode_len; + LEX_STRING sql_mode; bool full_access; DBUG_ENTER("sp_head::show_create_function"); DBUG_PRINT("info", ("procedure %s", m_name.str)); - LINT_INIT(sql_mode_str); - LINT_INIT(sql_mode_len); if (check_show_routine_access(thd, this, &full_access)) DBUG_RETURN(1); - sql_mode_str= - sys_var_thd_sql_mode::symbolic_mode_representation(thd, - m_sql_mode, - &sql_mode_len); - field_list.push_back(new Item_empty_string("Function",NAME_LEN)); - field_list.push_back(new Item_empty_string("sql_mode", sql_mode_len)); + sys_var_thd_sql_mode::symbolic_mode_representation(thd, m_sql_mode, + &sql_mode); + field_list.push_back(new Item_empty_string("Function",NAME_CHAR_LEN)); + field_list.push_back(new Item_empty_string("sql_mode", sql_mode.length)); Item_empty_string *definition= new Item_empty_string("Create Function", max(buffer.length(),1024)); definition->maybe_null= TRUE; @@ -2203,7 +2215,7 @@ sp_head::show_create_function(THD *thd) DBUG_RETURN(1); protocol->prepare_for_resend(); protocol->store(m_name.str, m_name.length, system_charset_info); - protocol->store((char*) sql_mode_str, sql_mode_len, system_charset_info); + protocol->store(sql_mode.str, sql_mode.length, system_charset_info); if (full_access) protocol->store(m_defstr.str, m_defstr.length, system_charset_info); else @@ -2448,7 +2460,7 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp, reinit_stmt_before_use(thd, m_lex); if (open_tables) - res= instr->exec_open_and_lock_tables(thd, m_lex->query_tables, nextp); + res= instr->exec_open_and_lock_tables(thd, m_lex->query_tables); if (!res) { @@ -2500,8 +2512,7 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp, sp_instr class functions */ -int sp_instr::exec_open_and_lock_tables(THD *thd, TABLE_LIST *tables, - uint *nextp) +int sp_instr::exec_open_and_lock_tables(THD *thd, TABLE_LIST *tables) { int result; @@ -2511,19 +2522,16 @@ int sp_instr::exec_open_and_lock_tables(THD *thd, TABLE_LIST *tables, */ if (check_table_access(thd, SELECT_ACL, tables, 0) || open_and_lock_tables(thd, tables)) - { - get_cont_dest(nextp); result= -1; - } else result= 0; return result; } -void sp_instr::get_cont_dest(uint *nextp) +uint sp_instr::get_cont_dest() { - *nextp= m_ip+1; + return (m_ip+1); } @@ -2716,9 +2724,9 @@ sp_instr_set_trigger_field::print(String *str) sp_instr_opt_meta */ -void sp_instr_opt_meta::get_cont_dest(uint *nextp) +uint sp_instr_opt_meta::get_cont_dest() { - *nextp= m_cont_dest; + return m_cont_dest; } @@ -2810,7 +2818,6 @@ sp_instr_jump_if_not::exec_core(THD *thd, uint *nextp) if (! it) { res= -1; - *nextp = m_cont_dest; } else { @@ -3379,7 +3386,6 @@ sp_instr_set_case_expr::exec_core(THD *thd, uint *nextp) spcont->clear_handler(); thd->spcont= spcont; } - *nextp= m_cont_dest; /* For continue handler */ } else *nextp= m_ip+1; @@ -3429,44 +3435,6 @@ sp_instr_set_case_expr::opt_move(uint dst, List<sp_instr> *bp) /* ------------------------------------------------------------------ */ -/* - Security context swapping -*/ - -#ifndef NO_EMBEDDED_ACCESS_CHECKS -bool -sp_change_security_context(THD *thd, sp_head *sp, Security_context **backup) -{ - *backup= 0; - if (sp->m_chistics->suid != SP_IS_NOT_SUID && - (strcmp(sp->m_definer_user.str, - thd->security_ctx->priv_user) || - my_strcasecmp(system_charset_info, sp->m_definer_host.str, - thd->security_ctx->priv_host))) - { - if (acl_getroot_no_password(&sp->m_security_ctx, sp->m_definer_user.str, - sp->m_definer_host.str, - sp->m_definer_host.str, - sp->m_db.str)) - { - my_error(ER_NO_SUCH_USER, MYF(0), sp->m_definer_user.str, - sp->m_definer_host.str); - return TRUE; - } - *backup= thd->security_ctx; - thd->security_ctx= &sp->m_security_ctx; - } - return FALSE; -} - -void -sp_restore_security_context(THD *thd, Security_context *backup) -{ - if (backup) - thd->security_ctx= backup; -} - -#endif /* NO_EMBEDDED_ACCESS_CHECKS */ /* Structure that represent all instances of one table diff --git a/sql/sp_head.h b/sql/sp_head.h index 46ad3dd96d8..551707fa7bd 100644 --- a/sql/sp_head.h +++ b/sql/sp_head.h @@ -59,9 +59,10 @@ public: calling set_routine_type(). */ LEX_STRING m_sroutines_key; + bool m_explicit_name; /**< Prepend the db name? */ - sp_name(LEX_STRING db, LEX_STRING name) - : m_db(db), m_name(name) + sp_name(LEX_STRING db, LEX_STRING name, bool use_explicit_name) + : m_db(db), m_name(name), m_explicit_name(use_explicit_name) { m_qname.str= m_sroutines_key.str= 0; m_qname.length= m_sroutines_key.length= 0; @@ -79,6 +80,7 @@ public: m_name.length= m_qname.length= key_len - 1; m_db.str= 0; m_db.length= 0; + m_explicit_name= false; } // Init. the qualified name from the db and name. @@ -95,7 +97,7 @@ public: bool -check_routine_name(LEX_STRING name); +check_routine_name(LEX_STRING *ident); class sp_head :private Query_arena { @@ -107,8 +109,6 @@ public: /* Possible values of m_flags */ enum { HAS_RETURN= 1, // For FUNCTIONs only: is set if has RETURN - IN_SIMPLE_CASE= 2, // Is set if parsing a simple CASE - IN_HANDLER= 4, // Is set if the parser is in a handler body MULTI_RESULTS= 8, // Is set if a procedure with SELECT(s) CONTAINS_DYNAMIC_SQL= 16, // Is set if a procedure with PREPARE/EXECUTE IS_INVOKED= 32, // Is set if this sp_head is being used @@ -128,7 +128,7 @@ public: create_field m_return_field_def; /* This is used for FUNCTIONs only. */ - const uchar *m_tmp_query; // Temporary pointer to sub query string + const char *m_tmp_query; // Temporary pointer to sub query string st_sp_chistics *m_chistics; ulong m_sql_mode; // For SHOW CREATE and execution LEX_STRING m_qname; // db.name @@ -176,7 +176,7 @@ public: */ HASH m_sroutines; // Pointers set during parsing - const uchar *m_param_begin, *m_param_end, *m_body_begin; + const char *m_param_begin, *m_param_end, *m_body_begin; /* Security context for stored routine which should be run under @@ -468,13 +468,15 @@ public: thd Thread handle nextp OUT index of the next instruction to execute. (For most instructions this will be the instruction following this - one). - - RETURN - 0 on success, - other if some error occured + one). Note that this parameter is undefined in case of + errors, use get_cont_dest() to find the continuation + instruction for CONTINUE error handlers. + + RETURN + 0 on success, + other if some error occurred */ - + virtual int execute(THD *thd, uint *nextp) = 0; /** @@ -482,22 +484,17 @@ public: Open and lock the tables used by this statement, as a pre-requisite to execute the core logic of this instruction with <code>exec_core()</code>. - If this statement fails, the next instruction to execute is also returned. - This is useful when a user defined SQL continue handler needs to be - executed. @param thd the current thread @param tables the list of tables to open and lock - @param nextp the continuation instruction, returned to the caller if this - method fails. @return zero on success, non zero on failure. */ - int exec_open_and_lock_tables(THD *thd, TABLE_LIST *tables, uint *nextp); + int exec_open_and_lock_tables(THD *thd, TABLE_LIST *tables); /** Get the continuation destination of this instruction. - @param nextp the continuation destination (output) + @return the continuation destination */ - virtual void get_cont_dest(uint *nextp); + virtual uint get_cont_dest(); /* Execute core function of instruction after all preparations (e.g. @@ -763,7 +760,7 @@ public: virtual void set_destination(uint old_dest, uint new_dest) = 0; - virtual void get_cont_dest(uint *nextp); + virtual uint get_cont_dest(); protected: diff --git a/sql/sp_pcontext.cc b/sql/sp_pcontext.cc index 6229cf14604..780243cc79f 100644 --- a/sql/sp_pcontext.cc +++ b/sql/sp_pcontext.cc @@ -25,6 +25,11 @@ #include "sp_pcontext.h" #include "sp_head.h" +/* Initial size for the dynamic arrays in sp_pcontext */ +#define PCONTEXT_ARRAY_INIT_ALLOC 16 +/* Increment size for the dynamic arrays in sp_pcontext */ +#define PCONTEXT_ARRAY_INCREMENT_ALLOC 8 + /* Sanity check for SQLSTATEs. Will not check if it's really an existing state (there are just too many), but will check length and bad characters. @@ -49,28 +54,61 @@ sp_cond_check(LEX_STRING *sqlstate) return TRUE; } -sp_pcontext::sp_pcontext(sp_pcontext *prev) - :Sql_alloc(), m_max_var_index(0), m_max_cursor_index(0), m_max_handler_index(0), - m_context_handlers(0), m_parent(prev), m_pboundary(0) +sp_pcontext::sp_pcontext() + : Sql_alloc(), + m_max_var_index(0), m_max_cursor_index(0), m_max_handler_index(0), + m_context_handlers(0), m_parent(NULL), m_pboundary(0), + m_label_scope(LABEL_DEFAULT_SCOPE) { - VOID(my_init_dynamic_array(&m_vars, sizeof(sp_variable_t *), 16, 8)); - VOID(my_init_dynamic_array(&m_case_expr_id_lst, sizeof(int), 16, 8)); - VOID(my_init_dynamic_array(&m_conds, sizeof(sp_cond_type_t *), 16, 8)); - VOID(my_init_dynamic_array(&m_cursors, sizeof(LEX_STRING), 16, 8)); - VOID(my_init_dynamic_array(&m_handlers, sizeof(sp_cond_type_t *), 16, 8)); + VOID(my_init_dynamic_array(&m_vars, sizeof(sp_variable_t *), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_case_expr_id_lst, sizeof(int), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_conds, sizeof(sp_cond_type_t *), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_cursors, sizeof(LEX_STRING), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_handlers, sizeof(sp_cond_type_t *), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); m_label.empty(); m_children.empty(); - if (!prev) - { - m_var_offset= m_cursor_offset= 0; - m_num_case_exprs= 0; - } - else - { - m_var_offset= prev->m_var_offset + prev->m_max_var_index; - m_cursor_offset= prev->current_cursor_count(); - m_num_case_exprs= prev->get_num_case_exprs(); - } + + m_var_offset= m_cursor_offset= 0; + m_num_case_exprs= 0; +} + +sp_pcontext::sp_pcontext(sp_pcontext *prev, label_scope_type label_scope) + : Sql_alloc(), + m_max_var_index(0), m_max_cursor_index(0), m_max_handler_index(0), + m_context_handlers(0), m_parent(prev), m_pboundary(0), + m_label_scope(label_scope) +{ + VOID(my_init_dynamic_array(&m_vars, sizeof(sp_variable_t *), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_case_expr_id_lst, sizeof(int), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_conds, sizeof(sp_cond_type_t *), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_cursors, sizeof(LEX_STRING), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_handlers, sizeof(sp_cond_type_t *), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + m_label.empty(); + m_children.empty(); + + m_var_offset= prev->m_var_offset + prev->m_max_var_index; + m_cursor_offset= prev->current_cursor_count(); + m_num_case_exprs= prev->get_num_case_exprs(); } void @@ -92,9 +130,9 @@ sp_pcontext::destroy() } sp_pcontext * -sp_pcontext::push_context() +sp_pcontext::push_context(label_scope_type label_scope) { - sp_pcontext *child= new sp_pcontext(this); + sp_pcontext *child= new sp_pcontext(this, label_scope); if (child) m_children.push_back(child); @@ -257,7 +295,15 @@ sp_pcontext::find_label(char *name) if (my_strcasecmp(system_charset_info, name, lab->name) == 0) return lab; - if (m_parent) + /* + Note about exception handlers. + See SQL:2003 SQL/PSM (ISO/IEC 9075-4:2003), + section 13.1 <compound statement>, + syntax rule 4. + In short, a DECLARE HANDLER block can not refer + to labels from the parent context, as they are out of scope. + */ + if (m_parent && (m_label_scope == LABEL_DEFAULT_SCOPE)) return m_parent->find_label(name); return NULL; } diff --git a/sql/sp_pcontext.h b/sql/sp_pcontext.h index b2cdd5e689c..5bffda79f98 100644 --- a/sql/sp_pcontext.h +++ b/sql/sp_pcontext.h @@ -88,16 +88,33 @@ typedef struct sp_cond sp_cond_type_t *val; } sp_cond_t; +/** + The scope of a label in Stored Procedures, + for name resolution of labels in a parsing context. +*/ +enum label_scope_type +{ + /** + The labels declared in a parent context are in scope. + */ + LABEL_DEFAULT_SCOPE, + /** + The labels declared in a parent context are not in scope. + */ + LABEL_HANDLER_SCOPE +}; -/* - The parse-time context, used to keep track on declared variables/parameters, +/** + The parse-time context, used to keep track of declared variables/parameters, conditions, handlers, cursors and labels, during parsing. sp_contexts are organized as a tree, with one object for each begin-end - block, plus a root-context for the parameters. + block, one object for each exception handler, + plus a root-context for the parameters. This is used during parsing for looking up defined names (e.g. declared variables and visible labels), for error checking, and to calculate offsets to be used at runtime. (During execution variable values, active handlers and cursors, etc, are referred to by an index in a stack.) + Parsing contexts for exception handlers limit the visibility of labels. The pcontext tree is also kept during execution and is used for error checking (e.g. correct number of parameters), and in the future, used by the debugger. @@ -105,21 +122,30 @@ typedef struct sp_cond class sp_pcontext : public Sql_alloc { - sp_pcontext(const sp_pcontext &); /* Prevent use of these */ - void operator=(sp_pcontext &); +public: - public: - - sp_pcontext(sp_pcontext *prev); + /** + Constructor. + Builds a parsing context root node. + */ + sp_pcontext(); // Free memory void destroy(); + /** + Create and push a new context in the tree. + @param label_scope label scope for the new parsing context + @return the node created + */ sp_pcontext * - push_context(); + push_context(label_scope_type label_scope); - // Returns the previous context, not the one we pop + /** + Pop a node from the parsing context tree. + @return the parent node + */ sp_pcontext * pop_context(); @@ -363,6 +389,13 @@ class sp_pcontext : public Sql_alloc protected: + /** + Constructor for a tree node. + @param prev the parent parsing context + @param label_scope label_scope for this parsing context + */ + sp_pcontext(sp_pcontext *prev, label_scope_type label_scope); + /* m_max_var_index -- number of variables (including all types of arguments) in this context including all children contexts. @@ -416,6 +449,14 @@ private: List<sp_pcontext> m_children; // Children contexts, used for destruction + /** + Scope of labels for this parsing context. + */ + label_scope_type m_label_scope; + +private: + sp_pcontext(const sp_pcontext &); /* Prevent use of these */ + void operator=(sp_pcontext &); }; // class sp_pcontext : public Sql_alloc diff --git a/sql/spatial.h b/sql/spatial.h index 0c0452b5abc..f806861290e 100644 --- a/sql/spatial.h +++ b/sql/spatial.h @@ -144,15 +144,46 @@ struct MBR return (xmin<x) && (xmax>x) && (ymin<y) && (ymax>y); } + /** + The dimension maps to an integer as: + - Polygon -> 2 + - Horizontal or vertical line -> 1 + - Point -> 0 + - Invalid MBR -> -1 + */ + int dimension() const + { + int d= 0; + + if (xmin > xmax) + return -1; + else if (xmin < xmax) + d++; + + if (ymin > ymax) + return -1; + else if (ymin < ymax) + d++; + + return d; + } + int overlaps(const MBR *mbr) { - int lb= mbr->inner_point(xmin, ymin); - int rb= mbr->inner_point(xmax, ymin); - int rt= mbr->inner_point(xmax, ymax); - int lt= mbr->inner_point(xmin, ymax); + /* + overlaps() requires that some point inside *this is also inside + *mbr, and that both geometries and their intersection are of the + same dimension. + */ + int d = dimension(); + + if (d != mbr->dimension() || d <= 0 || contains(mbr) || within(mbr)) + return 0; + + MBR intersection(max(xmin, mbr->xmin), max(ymin, mbr->ymin), + min(xmax, mbr->xmax), min(ymax, mbr->ymax)); - int a = lb+rb+rt+lt; - return (a>0) && (a<4) && (!within(mbr)); + return (d == intersection.dimension()); } }; diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index ee7127fcd8d..ba1ec66fb02 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -1813,8 +1813,7 @@ static bool update_user_table(THD *thd, TABLE *table, table->key_info->key_length); if (table->file->index_read_idx(table->record[0], 0, - (byte *) user_key, - table->key_info->key_length, + (byte *) user_key, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { my_message(ER_PASSWORD_NO_MATCH, ER(ER_PASSWORD_NO_MATCH), @@ -1905,8 +1904,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, key_copy(user_key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read_idx(table->record[0], 0, - user_key, table->key_info->key_length, + if (table->file->index_read_idx(table->record[0], 0, user_key, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { /* what == 'N' means revoke */ @@ -2123,8 +2121,7 @@ static int replace_db_table(TABLE *table, const char *db, key_copy(user_key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read_idx(table->record[0],0, - user_key, table->key_info->key_length, + if (table->file->index_read_idx(table->record[0],0, user_key, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { if (what == 'N') @@ -2341,9 +2338,8 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) col_privs->field[4]->store("",0, &my_charset_latin1); col_privs->file->ha_index_init(0, 1); - if (col_privs->file->index_read(col_privs->record[0], - (byte*) key, - key_prefix_len, HA_READ_KEY_EXACT)) + if (col_privs->file->index_read(col_privs->record[0], (byte*) key, + (key_part_map)15, HA_READ_KEY_EXACT)) { cols = 0; /* purecov: deadcode */ col_privs->file->ha_index_end(); @@ -2479,7 +2475,7 @@ static int replace_column_table(GRANT_TABLE *g_t, table->field[3]->store(table_name,(uint) strlen(table_name), system_charset_info); - /* Get length of 3 first key parts */ + /* Get length of 4 first key parts */ key_prefix_length= (key_part[0].store_length + key_part[1].store_length + key_part[2].store_length + key_part[3].store_length); key_copy(key, table->record[0], table->key_info, key_prefix_length); @@ -2505,8 +2501,7 @@ static int replace_column_table(GRANT_TABLE *g_t, key_copy(user_key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read(table->record[0], user_key, - table->key_info->key_length, + if (table->file->index_read(table->record[0], user_key, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { if (revoke_grant) @@ -2582,8 +2577,7 @@ static int replace_column_table(GRANT_TABLE *g_t, key_copy(user_key, table->record[0], table->key_info, key_prefix_length); - if (table->file->index_read(table->record[0], user_key, - key_prefix_length, + if (table->file->index_read(table->record[0], user_key, (key_part_map)15, HA_READ_KEY_EXACT)) goto end; @@ -2684,8 +2678,7 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, key_copy(user_key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read_idx(table->record[0], 0, - user_key, table->key_info->key_length, + if (table->file->index_read_idx(table->record[0], 0, user_key, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { /* @@ -2803,13 +2796,13 @@ static int replace_routine_table(THD *thd, GRANT_NAME *grant_name, table->field[2]->store(combo.user.str,combo.user.length, &my_charset_latin1); table->field[3]->store(routine_name,(uint) strlen(routine_name), &my_charset_latin1); - table->field[4]->store((longlong)(is_proc ? + table->field[4]->store((longlong)(is_proc ? TYPE_ENUM_PROCEDURE : TYPE_ENUM_FUNCTION), TRUE); store_record(table,record[1]); // store at pos 1 - if (table->file->index_read_idx(table->record[0],0, - (byte*) table->field[0]->ptr,0, + if (table->file->index_read_idx(table->record[0], 0, + (byte*) table->field[0]->ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { /* @@ -4059,6 +4052,26 @@ err2: } +static bool check_grant_db_routine(THD *thd, const char *db, HASH *hash) +{ + Security_context *sctx= thd->security_ctx; + + for (uint idx= 0; idx < hash->records; ++idx) + { + GRANT_NAME *item= (GRANT_NAME*) hash_element(hash, idx); + + if (strcmp(item->user, sctx->priv_user) == 0 && + strcmp(item->db, db) == 0 && + compare_hostname(&item->host, sctx->host, sctx->ip)) + { + return FALSE; + } + } + + return TRUE; +} + + /* Check if a user has the right to access a database Access is accepted if he has a grant for any table/routine in the database @@ -4070,9 +4083,10 @@ bool check_grant_db(THD *thd,const char *db) Security_context *sctx= thd->security_ctx; char helping [NAME_LEN+USERNAME_LENGTH+2]; uint len; - bool error= 1; + bool error= TRUE; len= (uint) (strmov(strmov(helping, sctx->priv_user) + 1, db) - helping) + 1; + rw_rdlock(&LOCK_grant); for (uint idx=0 ; idx < column_priv_hash.records ; idx++) @@ -4083,11 +4097,17 @@ bool check_grant_db(THD *thd,const char *db) !memcmp(grant_table->hash_key,helping,len) && compare_hostname(&grant_table->host, sctx->host, sctx->ip)) { - error=0; // Found match + error= FALSE; /* Found match. */ break; } } + + if (error) + error= check_grant_db_routine(thd, db, &proc_priv_hash) && + check_grant_db_routine(thd, db, &func_priv_hash); + rw_unlock(&LOCK_grant); + return error; } @@ -5008,7 +5028,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop, key_copy(user_key, table->record[0], table->key_info, key_prefix_length); if ((error= table->file->index_read_idx(table->record[0], 0, - user_key, key_prefix_length, + user_key, (key_part_map)3, HA_READ_KEY_EXACT))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) @@ -6086,6 +6106,8 @@ int fill_schema_user_privileges(THD *thd, TABLE_LIST *tables, COND *cond) char *curr_host= thd->security_ctx->priv_host_name(); DBUG_ENTER("fill_schema_user_privileges"); + if (!initialized) + DBUG_RETURN(0); pthread_mutex_lock(&acl_cache->lock); for (counter=0 ; counter < acl_users.elements ; counter++) @@ -6145,6 +6167,8 @@ int fill_schema_schema_privileges(THD *thd, TABLE_LIST *tables, COND *cond) char *curr_host= thd->security_ctx->priv_host_name(); DBUG_ENTER("fill_schema_schema_privileges"); + if (!initialized) + DBUG_RETURN(0); pthread_mutex_lock(&acl_cache->lock); for (counter=0 ; counter < acl_dbs.elements ; counter++) diff --git a/sql/sql_acl.h b/sql/sql_acl.h index 86d2cabc703..cba283ec65b 100644 --- a/sql/sql_acl.h +++ b/sql/sql_acl.h @@ -47,8 +47,7 @@ don't forget to update 1. static struct show_privileges_st sys_privileges[] 2. static const char *command_array[] and static uint command_lengths[] - 3. mysql_create_system_tables.sh, mysql_fix_privilege_tables.sql - and mysql-test/lib/init_db.sql + 3. mysql_system_tables.sql and mysql_system_tables_fix.sql 4. acl_init() or whatever - to define behaviour for old privilege tables 5. sql_yacc.yy - for GRANT/REVOKE to work */ diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 44325fe7b12..0f428da1fdb 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -858,6 +858,7 @@ void free_io_cache(TABLE *table) DBUG_VOID_RETURN; } + /* Close all tables which aren't in use by any thread @@ -970,6 +971,71 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, /* + Close all tables which match specified connection string or + if specified string is NULL, then any table with a connection string. +*/ + +bool close_cached_connection_tables(THD *thd, bool if_wait_for_refresh, + LEX_STRING *connection, bool have_lock) +{ + uint idx; + TABLE_LIST tmp, *tables= NULL; + bool result= FALSE; + DBUG_ENTER("close_cached_connections"); + DBUG_ASSERT(thd); + + bzero(&tmp, sizeof(TABLE_LIST)); + + if (!have_lock) + VOID(pthread_mutex_lock(&LOCK_open)); + + for (idx= 0; idx < table_def_cache.records; idx++) + { + TABLE_SHARE *share= (TABLE_SHARE *) hash_element(&table_def_cache, idx); + + /* Ignore if table is not open or does not have a connect_string */ + if (!share->connect_string.length || !share->ref_count) + continue; + + /* Compare the connection string */ + if (connection && + (connection->length > share->connect_string.length || + (connection->length < share->connect_string.length && + (share->connect_string.str[connection->length] != '/' && + share->connect_string.str[connection->length] != '\\')) || + strncasecmp(connection->str, share->connect_string.str, + connection->length))) + continue; + + /* close_cached_tables() only uses these elements */ + tmp.db= share->db.str; + tmp.table_name= share->table_name.str; + tmp.next_local= tables; + + tables= (TABLE_LIST *) memdup_root(thd->mem_root, (char*)&tmp, + sizeof(TABLE_LIST)); + } + + if (tables) + result= close_cached_tables(thd, FALSE, tables, TRUE); + + if (!have_lock) + VOID(pthread_mutex_unlock(&LOCK_open)); + + if (if_wait_for_refresh) + { + pthread_mutex_lock(&thd->mysys_var->mutex); + thd->mysys_var->current_mutex= 0; + thd->mysys_var->current_cond= 0; + thd->proc_info=0; + pthread_mutex_unlock(&thd->mysys_var->mutex); + } + + DBUG_RETURN(result); +} + + +/* Mark all tables in the list which were used by current substatement as free for reuse. @@ -1328,10 +1394,10 @@ void close_temporary_tables(THD *thd) due to special characters in the names */ append_identifier(thd, &s_query, table->s->db.str, strlen(table->s->db.str)); - s_query.q_append('.'); + s_query.append('.'); append_identifier(thd, &s_query, table->s->table_name.str, strlen(table->s->table_name.str)); - s_query.q_append(','); + s_query.append(','); next= table->next; close_temporary(table, 1, 1); } @@ -1410,6 +1476,7 @@ TABLE_LIST *find_table_in_list(TABLE_LIST *table, thd thread handle table table which should be checked table_list list of tables + check_alias whether to check tables' aliases NOTE: to exclude derived tables from check we use following mechanism: a) during derived table processing set THD::derived_tables_processing @@ -1437,10 +1504,11 @@ TABLE_LIST *find_table_in_list(TABLE_LIST *table, 0 if table is unique */ -TABLE_LIST* unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list) +TABLE_LIST* unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list, + bool check_alias) { TABLE_LIST *res; - const char *d_name, *t_name; + const char *d_name, *t_name, *t_alias; DBUG_ENTER("unique_table"); DBUG_PRINT("enter", ("table alias: %s", table->alias)); @@ -1468,6 +1536,7 @@ TABLE_LIST* unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list) } d_name= table->db; t_name= table->table_name; + t_alias= table->alias; DBUG_PRINT("info", ("real table: %s.%s", d_name, t_name)); for (;;) @@ -1475,6 +1544,9 @@ TABLE_LIST* unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list) if (((! (res= find_table_in_global_list(table_list, d_name, t_name))) && (! (res= mysql_lock_have_duplicate(thd, table, table_list)))) || ((!res->table || res->table != table->table) && + (!check_alias || !(lower_case_table_names ? + my_strcasecmp(files_charset_info, t_alias, res->alias) : + strcmp(t_alias, res->alias))) && res->select_lex && !res->select_lex->exclude_from_table_unique_test && !res->prelocking_placeholder)) break; @@ -1845,8 +1917,6 @@ bool reopen_name_locked_table(THD* thd, TABLE_LIST* table_list) table->const_table=0; table->null_row= table->maybe_null= table->force_index= 0; table->status=STATUS_NO_RECORD; - table->keys_in_use_for_query= share->keys_in_use; - table->used_keys= share->keys_for_keyread; DBUG_RETURN(FALSE); } @@ -2265,11 +2335,12 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, table->const_table=0; table->null_row= table->maybe_null= table->force_index= 0; table->status=STATUS_NO_RECORD; - table->keys_in_use_for_query= table->s->keys_in_use; table->insert_values= 0; - table->used_keys= table->s->keys_for_keyread; table->fulltext_searched= 0; table->file->ft_handler= 0; + /* Catch wrong handling of the auto_increment_field_not_null. */ + DBUG_ASSERT(!table->auto_increment_field_not_null); + table->auto_increment_field_not_null= FALSE; if (table->timestamp_field) table->timestamp_field_type= table->timestamp_field->get_auto_set_type(); table->pos_in_table_list= table_list; @@ -2352,8 +2423,6 @@ static bool reopen_table(TABLE *table) tmp.null_row= table->null_row; tmp.maybe_null= table->maybe_null; tmp.status= table->status; - tmp.keys_in_use_for_query= tmp.s->keys_in_use; - tmp.used_keys= tmp.s->keys_for_keyread; tmp.s->table_map_id= table->s->table_map_id; @@ -3746,7 +3815,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, tmp_table->reginfo.lock_type= TL_WRITE; // Simulate locked share->tmp_table= (tmp_table->file->has_transactions() ? - TRANSACTIONAL_TMP_TABLE : TMP_TABLE); + TRANSACTIONAL_TMP_TABLE : NON_TRANSACTIONAL_TMP_TABLE); if (link_in_list) { @@ -3814,7 +3883,7 @@ static void update_field_dependencies(THD *thd, Field *field, TABLE *table) been set for all fields (for example for view). */ - table->used_keys.intersect(field->part_of_key); + table->covering_keys.intersect(field->part_of_key); table->merge_keys.merge(field->part_of_key); if (thd->mark_used_columns == MARK_COLUMNS_READ) @@ -4490,14 +4559,35 @@ find_field_in_tables(THD *thd, Item_ident *item, { Field *cur_field= find_field_in_table_ref(thd, cur_table, name, length, item->name, db, table_name, ref, - check_privileges, allow_rowid, + check_privileges, + allow_rowid, &(item->cached_field_index), register_tree_change, &actual_table); if (cur_field) { if (cur_field == WRONG_GRANT) - return (Field*) 0; + { + if (thd->lex->sql_command != SQLCOM_SHOW_FIELDS) + return (Field*) 0; + + thd->clear_error(); + cur_field= find_field_in_table_ref(thd, cur_table, name, length, + item->name, db, table_name, ref, + false, + allow_rowid, + &(item->cached_field_index), + register_tree_change, + &actual_table); + if (cur_field) + { + Field *nf=new Field_null(NULL,0,Field::NONE, + cur_field->field_name, + &my_charset_bin); + nf->init(cur_table->table); + cur_field= nf; + } + } /* Store the original table of the field, which may be different from @@ -4520,7 +4610,7 @@ find_field_in_tables(THD *thd, Item_ident *item, report_error == IGNORE_EXCEPT_NON_UNIQUE) my_error(ER_NON_UNIQ_ERROR, MYF(0), table_name ? item->full_name() : name, thd->where); - return (Field*) 0; + return (Field*) 0; } found= cur_field; } @@ -4577,10 +4667,13 @@ find_field_in_tables(THD *thd, Item_ident *item, return not_found_item, report other errors, return 0 IGNORE_ERRORS Do not report errors, return 0 if error - unaliased Set to true if item is field which was found - by original field name and not by its alias - in item list. Set to false otherwise. - + resolution Set to the resolution type if the item is found + (it says whether the item is resolved + against an alias name, + or as a field name without alias, + or as a field hidden by alias, + or ignoring alias) + RETURN VALUES 0 Item is not found or item is not unique, error message is reported @@ -4596,7 +4689,8 @@ Item **not_found_item= (Item**) 0x1; Item ** find_item_in_list(Item *find, List<Item> &items, uint *counter, - find_item_error_report_type report_error, bool *unaliased) + find_item_error_report_type report_error, + enum_resolution_type *resolution) { List_iterator<Item> li(items); Item **found=0, **found_unaliased= 0, *item; @@ -4610,10 +4704,9 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter, */ bool is_ref_by_name= 0; uint unaliased_counter; - LINT_INIT(unaliased_counter); // Dependent on found_unaliased - *unaliased= FALSE; + *resolution= NOT_RESOLVED; is_ref_by_name= (find->type() == Item::FIELD_ITEM || find->type() == Item::REF_ITEM); @@ -4680,63 +4773,77 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter, } found_unaliased= li.ref(); unaliased_counter= i; + *resolution= RESOLVED_IGNORING_ALIAS; if (db_name) break; // Perfect match } } - else if (!my_strcasecmp(system_charset_info, item_field->name, - field_name)) - { - /* - If table name was not given we should scan through aliases - (or non-aliased fields) first. We are also checking unaliased - name of the field in then next else-if, to be able to find - instantly field (hidden by alias) if no suitable alias (or - non-aliased field) was found. - */ - if (found) - { - if ((*found)->eq(item, 0)) - continue; // Same field twice - if (report_error != IGNORE_ERRORS) - my_error(ER_NON_UNIQ_ERROR, MYF(0), - find->full_name(), current_thd->where); - return (Item**) 0; - } - found= li.ref(); - *counter= i; - } - else if (!my_strcasecmp(system_charset_info, item_field->field_name, - field_name)) + else { - /* - We will use un-aliased field or react on such ambiguities only if - we won't be able to find aliased field. - Again if we have ambiguity with field outside of select list - we should prefer fields from select list. - */ - if (found_unaliased) + int fname_cmp= my_strcasecmp(system_charset_info, + item_field->field_name, + field_name); + if (!my_strcasecmp(system_charset_info, + item_field->name,field_name)) { - if ((*found_unaliased)->eq(item, 0)) - continue; // Same field twice - found_unaliased_non_uniq= 1; + /* + If table name was not given we should scan through aliases + and non-aliased fields first. We are also checking unaliased + name of the field in then next else-if, to be able to find + instantly field (hidden by alias) if no suitable alias or + non-aliased field was found. + */ + if (found) + { + if ((*found)->eq(item, 0)) + continue; // Same field twice + if (report_error != IGNORE_ERRORS) + my_error(ER_NON_UNIQ_ERROR, MYF(0), + find->full_name(), current_thd->where); + return (Item**) 0; + } + found= li.ref(); + *counter= i; + *resolution= fname_cmp ? RESOLVED_AGAINST_ALIAS: + RESOLVED_WITH_NO_ALIAS; } - else + else if (!fname_cmp) { + /* + We will use non-aliased field or react on such ambiguities only if + we won't be able to find aliased field. + Again if we have ambiguity with field outside of select list + we should prefer fields from select list. + */ + if (found_unaliased) + { + if ((*found_unaliased)->eq(item, 0)) + continue; // Same field twice + found_unaliased_non_uniq= 1; + } found_unaliased= li.ref(); unaliased_counter= i; } } } - else if (!table_name && (find->eq(item,0) || - is_ref_by_name && find->name && item->name && - !my_strcasecmp(system_charset_info, - item->name,find->name))) - { - found= li.ref(); - *counter= i; - break; - } + else if (!table_name) + { + if (is_ref_by_name && find->name && item->name && + !my_strcasecmp(system_charset_info,item->name,find->name)) + { + found= li.ref(); + *counter= i; + *resolution= RESOLVED_AGAINST_ALIAS; + break; + } + else if (find->eq(item,0)) + { + found= li.ref(); + *counter= i; + *resolution= RESOLVED_IGNORING_ALIAS; + break; + } + } } if (!found) { @@ -4751,7 +4858,7 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter, { found= found_unaliased; *counter= unaliased_counter; - *unaliased= TRUE; + *resolution= RESOLVED_BEHIND_ALIAS; } } if (found) @@ -5037,7 +5144,7 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2, TABLE *table_1= nj_col_1->table_ref->table; /* Mark field_1 used for table cache. */ bitmap_set_bit(table_1->read_set, field_1->field_index); - table_1->used_keys.intersect(field_1->part_of_key); + table_1->covering_keys.intersect(field_1->part_of_key); table_1->merge_keys.merge(field_1->part_of_key); } if (field_2) @@ -5045,7 +5152,7 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2, TABLE *table_2= nj_col_2->table_ref->table; /* Mark field_2 used for table cache. */ bitmap_set_bit(table_2->read_set, field_2->field_index); - table_2->used_keys.intersect(field_2->part_of_key); + table_2->covering_keys.intersect(field_2->part_of_key); table_2->merge_keys.merge(field_2->part_of_key); } @@ -5486,7 +5593,8 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields, Item_int do not need fix_fields() because it is basic constant. */ - it.replace(new Item_int("Not_used", (longlong) 1, 21)); + it.replace(new Item_int("Not_used", (longlong) 1, + MY_INT64_NUM_DECIMAL_DIGITS)); } else if (insert_fields(thd, ((Item_field*) item)->context, ((Item_field*) item)->db_name, @@ -5533,6 +5641,7 @@ bool setup_fields(THD *thd, Item **ref_pointer_array, enum_mark_columns save_mark_used_columns= thd->mark_used_columns; nesting_map save_allow_sum_func= thd->lex->allow_sum_func; List_iterator<Item> it(fields); + bool save_is_item_list_lookup; DBUG_ENTER("setup_fields"); thd->mark_used_columns= mark_used_columns; @@ -5540,6 +5649,8 @@ bool setup_fields(THD *thd, Item **ref_pointer_array, if (allow_sum_func) thd->lex->allow_sum_func|= 1 << thd->lex->current_select->nest_level; thd->where= THD::DEFAULT_WHERE; + save_is_item_list_lookup= thd->lex->current_select->is_item_list_lookup; + thd->lex->current_select->is_item_list_lookup= 0; /* To prevent fail on forward lookup we fill it with zerows, @@ -5562,6 +5673,7 @@ bool setup_fields(THD *thd, Item **ref_pointer_array, if (!item->fixed && item->fix_fields(thd, it.ref()) || (item= *(it.ref()))->check_cols(1)) { + thd->lex->current_select->is_item_list_lookup= save_is_item_list_lookup; thd->lex->allow_sum_func= save_allow_sum_func; thd->mark_used_columns= save_mark_used_columns; DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns)); @@ -5575,6 +5687,7 @@ bool setup_fields(THD *thd, Item **ref_pointer_array, thd->used_tables|= item->used_tables(); thd->lex->current_select->cur_pos_in_select_list++; } + thd->lex->current_select->is_item_list_lookup= save_is_item_list_lookup; thd->lex->current_select->cur_pos_in_select_list= UNDEF_POS; thd->lex->allow_sum_func= save_allow_sum_func; @@ -5676,30 +5789,8 @@ bool setup_tables(THD *thd, Name_resolution_context *context, tablenr= 0; } setup_table_map(table, table_list, tablenr); - table->used_keys= table->s->keys_for_keyread; - table->merge_keys.clear_all(); - if (table_list->use_index) - { - key_map map; - get_key_map_from_key_list(&map, table, table_list->use_index); - if (map.is_set_all()) - DBUG_RETURN(1); - /* - Don't introduce keys in keys_in_use_for_query that weren't there - before. FORCE/USE INDEX should not add keys, it should only remove - all keys except the key(s) specified in the hint. - */ - table->keys_in_use_for_query.intersect(map); - } - if (table_list->ignore_index) - { - key_map map; - get_key_map_from_key_list(&map, table, table_list->ignore_index); - if (map.is_set_all()) - DBUG_RETURN(1); - table->keys_in_use_for_query.subtract(map); - } - table->used_keys.intersect(table->keys_in_use_for_query); + if (table_list->process_index_hints(table)) + DBUG_RETURN(1); } if (tablenr > MAX_TABLES) { @@ -5775,13 +5866,14 @@ bool setup_tables_and_check_access(THD *thd, &leaves_tmp, select_insert)) return TRUE; - *leaves= leaves_tmp; + if (leaves) + *leaves= leaves_tmp; for (; leaves_tmp; leaves_tmp= leaves_tmp->next_leaf) { if (leaves_tmp->belong_to_view && check_single_table_access(thd, first_table ? want_access_first : - want_access, leaves_tmp)) + want_access, leaves_tmp, FALSE)) { tables->hide_view_error(thd); return TRUE; @@ -5981,7 +6073,7 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name, bitmap_set_bit(field->table->read_set, field->field_index); if (table) { - table->used_keys.intersect(field->part_of_key); + table->covering_keys.intersect(field->part_of_key); table->merge_keys.merge(field->part_of_key); } if (tables->is_natural_join) @@ -5999,7 +6091,7 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name, if (field_table) { thd->used_tables|= field_table->map; - field_table->used_keys.intersect(field->part_of_key); + field_table->covering_keys.intersect(field->part_of_key); field_table->merge_keys.merge(field->part_of_key); field_table->used_fields++; } @@ -6068,6 +6160,8 @@ int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves, */ bool it_is_update= (select_lex == &thd->lex->select_lex) && thd->lex->which_check_option_applicable(); + bool save_is_item_list_lookup= select_lex->is_item_list_lookup; + select_lex->is_item_list_lookup= 0; DBUG_ENTER("setup_conds"); if (select_lex->conds_processed_with_permanent_arena || @@ -6143,9 +6237,11 @@ int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves, select_lex->where= *conds; select_lex->conds_processed_with_permanent_arena= 1; } + thd->lex->current_select->is_item_list_lookup= save_is_item_list_lookup; DBUG_RETURN(test(thd->net.report_error)); err_no_arena: + select_lex->is_item_list_lookup= save_is_item_list_lookup; DBUG_RETURN(1); } @@ -6166,6 +6262,11 @@ err_no_arena: values values to fill with ignore_errors TRUE if we should ignore errors + NOTE + fill_record() may set table->auto_increment_field_not_null and a + caller should make sure that it is reset after their last call to this + function. + RETURN FALSE OK TRUE error occured @@ -6178,27 +6279,52 @@ fill_record(THD * thd, List<Item> &fields, List<Item> &values, List_iterator_fast<Item> f(fields),v(values); Item *value, *fld; Item_field *field; + TABLE *table= 0; DBUG_ENTER("fill_record"); + /* + Reset the table->auto_increment_field_not_null as it is valid for + only one row. + */ + if (fields.elements) + { + /* + On INSERT or UPDATE fields are checked to be from the same table, + thus we safely can take table from the first field. + */ + fld= (Item_field*)f++; + if (!(field= fld->filed_for_view_update())) + { + my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), fld->name); + goto err; + } + table= field->field->table; + table->auto_increment_field_not_null= FALSE; + f.rewind(); + } while ((fld= f++)) { if (!(field= fld->filed_for_view_update())) { my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), fld->name); - DBUG_RETURN(TRUE); + goto err; } value=v++; Field *rfield= field->field; - TABLE *table= rfield->table; + table= rfield->table; if (rfield == table->next_number_field) table->auto_increment_field_not_null= TRUE; if ((value->save_in_field(rfield, 0) < 0) && !ignore_errors) { my_message(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR), MYF(0)); - DBUG_RETURN(TRUE); + goto err; } } DBUG_RETURN(thd->net.report_error); +err: + if (table) + table->auto_increment_field_not_null= FALSE; + DBUG_RETURN(TRUE); } @@ -6247,6 +6373,11 @@ fill_record_n_invoke_before_triggers(THD *thd, List<Item> &fields, values list of fields ignore_errors TRUE if we should ignore errors + NOTE + fill_record() may set table->auto_increment_field_not_null and a + caller should make sure that it is reset after their last call to this + function. + RETURN FALSE OK TRUE error occured @@ -6257,19 +6388,38 @@ fill_record(THD *thd, Field **ptr, List<Item> &values, bool ignore_errors) { List_iterator_fast<Item> v(values); Item *value; + TABLE *table= 0; DBUG_ENTER("fill_record"); Field *field; + /* + Reset the table->auto_increment_field_not_null as it is valid for + only one row. + */ + if (*ptr) + { + /* + On INSERT or UPDATE fields are checked to be from the same table, + thus we safely can take table from the first field. + */ + table= (*ptr)->table; + table->auto_increment_field_not_null= FALSE; + } while ((field = *ptr++)) { value=v++; - TABLE *table= field->table; + table= field->table; if (field == table->next_number_field) table->auto_increment_field_not_null= TRUE; - if (value->save_in_field(field, 0) == -1) - DBUG_RETURN(TRUE); + if (value->save_in_field(field, 0) < 0) + goto err; } DBUG_RETURN(thd->net.report_error); + +err: + if (table) + table->auto_increment_field_not_null= FALSE; + DBUG_RETURN(TRUE); } @@ -6874,3 +7024,122 @@ has_two_write_locked_tables_with_auto_increment(TABLE_LIST *tables) } return 0; } + + +/* + Open and lock system tables for read. + + SYNOPSIS + open_system_tables_for_read() + thd Thread context. + table_list List of tables to open. + backup Pointer to Open_tables_state instance where + information about currently open tables will be + saved, and from which will be restored when we will + end work with system tables. + + NOTES + Thanks to restrictions which we put on opening and locking of + system tables for writing, we can open and lock them for reading + even when we already have some other tables open and locked. One + must call close_system_tables() to close systems tables opened + with this call. + + RETURN + FALSE Success + TRUE Error +*/ + +bool +open_system_tables_for_read(THD *thd, TABLE_LIST *table_list, + Open_tables_state *backup) +{ + DBUG_ENTER("open_system_tables_for_read"); + + thd->reset_n_backup_open_tables_state(backup); + + uint count= 0; + bool not_used; + for (TABLE_LIST *tables= table_list; tables; tables= tables->next_global) + { + TABLE *table= open_table(thd, tables, thd->mem_root, ¬_used, + MYSQL_LOCK_IGNORE_FLUSH); + if (!table) + goto error; + + DBUG_ASSERT(table->s->system_table); + + table->use_all_columns(); + table->reginfo.lock_type= tables->lock_type; + tables->table= table; + count++; + } + + { + TABLE **list= (TABLE**) thd->alloc(sizeof(TABLE*) * count); + TABLE **ptr= list; + for (TABLE_LIST *tables= table_list; tables; tables= tables->next_global) + *(ptr++)= tables->table; + + thd->lock= mysql_lock_tables(thd, list, count, + MYSQL_LOCK_IGNORE_FLUSH, ¬_used); + } + if (thd->lock) + DBUG_RETURN(FALSE); + +error: + close_system_tables(thd, backup); + + DBUG_RETURN(TRUE); +} + + +/* + Close system tables, opened with open_system_tables_for_read(). + + SYNOPSIS + close_system_tables() + thd Thread context + backup Pointer to Open_tables_state instance which holds + information about tables which were open before we + decided to access system tables. +*/ + +void +close_system_tables(THD *thd, Open_tables_state *backup) +{ + close_thread_tables(thd); + thd->restore_backup_open_tables_state(backup); +} + + +/* + Open and lock one system table for update. + + SYNOPSIS + open_system_table_for_update() + thd Thread context. + one_table Table to open. + + NOTES + Table opened with this call should closed using close_thread_tables(). + + RETURN + 0 Error + # Pointer to TABLE object of system table +*/ + +TABLE * +open_system_table_for_update(THD *thd, TABLE_LIST *one_table) +{ + DBUG_ENTER("open_system_table_for_update"); + + TABLE *table= open_ltable(thd, one_table, one_table->lock_type); + if (table) + { + DBUG_ASSERT(table->s->system_table); + table->use_all_columns(); + } + + DBUG_RETURN(table); +} diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc index b0a54bec664..d3fdd95707c 100644 --- a/sql/sql_binlog.cc +++ b/sql/sql_binlog.cc @@ -14,6 +14,7 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "mysql_priv.h" +#include "rpl_rli.h" #include "base64.h" /* @@ -163,9 +164,17 @@ void mysql_client_binlog_statement(THD* thd) (ulong) uint4korr(bufptr+EVENT_LEN_OFFSET))); #endif ev->thd= thd; - if (IF_DBUG(int err= ) ev->exec_event(thd->rli_fake)) + /* + We go directly to the application phase, since we don't need + to check if the event shall be skipped or not. + + Neither do we have to update the log positions, since that is + not used at all: the rli_fake instance is used only for error + reporting. + */ + if (IF_DBUG(int err= ) ev->apply_event(thd->rli_fake)) { - DBUG_PRINT("error", ("exec_event() returned: %d", err)); + DBUG_PRINT("info", ("apply_event() returned: %d", err)); /* TODO: Maybe a better error message since the BINLOG statement now contains several events. diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index c06d7161ec1..8d8838d4585 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -377,7 +377,7 @@ inline Query_cache_block * Query_cache_block_table::block() void Query_cache_block::init(ulong block_length) { DBUG_ENTER("Query_cache_block::init"); - DBUG_PRINT("qcache", ("init block 0x%lx length: %lu", (ulong) this, + DBUG_PRINT("qcache", ("init block: 0x%lx length: %lu", (ulong) this, block_length)); length = block_length; used = 0; @@ -844,6 +844,12 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) flags.client_long_flag= test(thd->client_capabilities & CLIENT_LONG_FLAG); flags.client_protocol_41= test(thd->client_capabilities & CLIENT_PROTOCOL_41); + /* + Protocol influences result format, so statement results in the binary + protocol (COM_EXECUTE) cannot be served to statements asking for results + in the text protocol (COM_QUERY) and vice-versa. + */ + flags.result_in_binary_protocol= (unsigned int) thd->protocol->type(); flags.more_results_exists= test(thd->server_status & SERVER_MORE_RESULTS_EXISTS); flags.pkt_nr= net->pkt_nr; @@ -861,11 +867,13 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) flags.max_sort_length= thd->variables.max_sort_length; flags.lc_time_names= thd->variables.lc_time_names; flags.group_concat_max_len= thd->variables.group_concat_max_len; - DBUG_PRINT("qcache", ("long %d, 4.1: %d, more results %d, pkt_nr: %d, \ + DBUG_PRINT("qcache", ("\ +long %d, 4.1: %d, bin_proto: %d, more results %d, pkt_nr: %d, \ CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \ sql mode: 0x%lx, sort len: %lu, conncat len: %lu", (int)flags.client_long_flag, (int)flags.client_protocol_41, + (int)flags.result_in_binary_protocol, (int)flags.more_results_exists, flags.pkt_nr, flags.character_set_client_num, @@ -1089,6 +1097,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) flags.client_long_flag= test(thd->client_capabilities & CLIENT_LONG_FLAG); flags.client_protocol_41= test(thd->client_capabilities & CLIENT_PROTOCOL_41); + flags.result_in_binary_protocol= (unsigned int)thd->protocol->type(); flags.more_results_exists= test(thd->server_status & SERVER_MORE_RESULTS_EXISTS); flags.pkt_nr= thd->net.pkt_nr; @@ -1104,11 +1113,13 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) flags.max_sort_length= thd->variables.max_sort_length; flags.group_concat_max_len= thd->variables.group_concat_max_len; flags.lc_time_names= thd->variables.lc_time_names; - DBUG_PRINT("qcache", ("long %d, 4.1: %d, more results %d, pkt_nr: %d, \ + DBUG_PRINT("qcache", ("\ +long %d, 4.1: %d, bin_proto: %d, more results %d, pkt_nr: %d, \ CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \ sql mode: 0x%lx, sort len: %lu, conncat len: %lu", (int)flags.client_long_flag, (int)flags.client_protocol_41, + (int)flags.result_in_binary_protocol, (int)flags.more_results_exists, flags.pkt_nr, flags.character_set_client_num, @@ -3048,11 +3059,10 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex, TABLE_COUNTER_TYPE table_count; DBUG_ENTER("Query_cache::is_cacheable"); - if (lex->sql_command == SQLCOM_SELECT && + if (query_cache_is_cacheable_query(lex) && (thd->variables.query_cache_type == 1 || (thd->variables.query_cache_type == 2 && (lex->select_lex.options & - OPTION_TO_QUERY_CACHE))) && - lex->safe_to_cache_query) + OPTION_TO_QUERY_CACHE)))) { DBUG_PRINT("qcache", ("options: %lx %lx type: %u", (long) OPTION_TO_QUERY_CACHE, @@ -3675,7 +3685,7 @@ void Query_cache::queries_dump() Query_cache_query_flags flags; memcpy(&flags, str+len, QUERY_CACHE_FLAGS_SIZE); str[len]= 0; // make zero ending DB name - DBUG_PRINT("qcache", ("F:%u C:%u L:%lu T:'%s' (%u) '%s' '%s'", + DBUG_PRINT("qcache", ("F: %u C: %u L: %lu T: '%s' (%u) '%s' '%s'", flags.client_long_flag, flags.character_set_client_num, (ulong)flags.limit, @@ -3998,7 +4008,7 @@ my_bool Query_cache::check_integrity(bool locked) } while (block != bins[i].free_blocks); if (count != bins[i].number) { - DBUG_PRINT("error", ("bins[%d].number = %d, but bin have %d blocks", + DBUG_PRINT("error", ("bins[%d].number= %d, but bin have %d blocks", i, bins[i].number, count)); result = 1; } diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 96a84d2fd46..fd44817811e 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -26,6 +26,8 @@ #endif #include "mysql_priv.h" +#include "rpl_rli.h" +#include "rpl_record.h" #include <my_bitmap.h> #include "log_event.h" #include <m_ctype.h> @@ -303,9 +305,9 @@ THD::THD() bzero((char*) &user_var_events, sizeof(user_var_events)); /* Protocol */ - protocol= &protocol_simple; // Default protocol - protocol_simple.init(this); - protocol_prep.init(this); + protocol= &protocol_text; // Default protocol + protocol_text.init(this); + protocol_binary.init(this); tablespace_op=FALSE; tmp= sql_rnd_with_mutex(); @@ -367,6 +369,7 @@ void THD::init(void) if (variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES) server_status|= SERVER_STATUS_NO_BACKSLASH_ESCAPES; options= thd_startup_options; + no_trans_update.stmt= no_trans_update.all= FALSE; open_options=ha_open_options; update_lock_default= (variables.low_priority_updates ? TL_WRITE_LOW_PRIORITY : @@ -378,7 +381,6 @@ void THD::init(void) update_charset(); reset_current_stmt_binlog_row_based(); bzero((char *) &status_var, sizeof(status_var)); - variables.lc_time_names = &my_locale_en_US; } @@ -419,6 +421,7 @@ void THD::init_for_queries() void THD::change_user(void) { cleanup(); + killed= NOT_KILLED; cleanup_done= 0; init(); stmt_map.reset(); @@ -437,6 +440,7 @@ void THD::cleanup(void) DBUG_ENTER("THD::cleanup"); DBUG_ASSERT(cleanup_done == 0); + killed= KILL_CONNECTION; #ifdef ENABLE_WHEN_BINLOG_WILL_BE_ABLE_TO_PREPARE if (transaction.xid_state.xa_state == XA_PREPARED) { @@ -679,11 +683,22 @@ bool THD::store_globals() void THD::cleanup_after_query() { + /* + Reset rand_used so that detection of calls to rand() will save random + seeds if needed by the slave. + + Do not reset rand_used if inside a stored function or trigger because + only the call to these operations is logged. Thus only the calling + statement needs to detect rand() calls made by its substatements. These + substatements must not set rand_used to 0 because it would remove the + detection of rand() by the calling statement. + */ if (!in_sub_stmt) /* stored functions and triggers are a special case */ { /* Forget those values, for next binlogger: */ stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; auto_inc_intervals_in_cur_stmt_for_binlog.empty(); + rand_used= 0; } if (first_successful_insert_id_in_cur_stmt > 0) { @@ -830,7 +845,8 @@ void THD::add_changed_table(const char *key, long key_length) { list_include(prev_changed, curr, changed_table_dup(key, key_length)); DBUG_PRINT("info", - ("key_length %ld %u", key_length, (*prev_changed)->key_length)); + ("key_length: %ld %u", key_length, + (*prev_changed)->key_length)); DBUG_VOID_RETURN; } else if (cmp == 0) @@ -840,7 +856,7 @@ void THD::add_changed_table(const char *key, long key_length) { list_include(prev_changed, curr, changed_table_dup(key, key_length)); DBUG_PRINT("info", - ("key_length %ld %u", key_length, + ("key_length: %ld %u", key_length, (*prev_changed)->key_length)); DBUG_VOID_RETURN; } @@ -852,7 +868,7 @@ void THD::add_changed_table(const char *key, long key_length) } } *prev_changed = changed_table_dup(key, key_length); - DBUG_PRINT("info", ("key_length %ld %u", key_length, + DBUG_PRINT("info", ("key_length: %ld %u", key_length, (*prev_changed)->key_length)); DBUG_VOID_RETURN; } @@ -887,7 +903,7 @@ int THD::send_explain_fields(select_result *result) CHARSET_INFO *cs= system_charset_info; field_list.push_back(new Item_return_int("id",3, MYSQL_TYPE_LONGLONG)); field_list.push_back(new Item_empty_string("select_type", 19, cs)); - field_list.push_back(item= new Item_empty_string("table", NAME_LEN, cs)); + field_list.push_back(item= new Item_empty_string("table", NAME_CHAR_LEN, cs)); item->maybe_null= 1; if (lex->describe & DESCRIBE_PARTITIONS) { @@ -900,15 +916,16 @@ int THD::send_explain_fields(select_result *result) field_list.push_back(item= new Item_empty_string("type", 10, cs)); item->maybe_null= 1; field_list.push_back(item=new Item_empty_string("possible_keys", - NAME_LEN*MAX_KEY, cs)); + NAME_CHAR_LEN*MAX_KEY, cs)); item->maybe_null=1; - field_list.push_back(item=new Item_empty_string("key", NAME_LEN, cs)); + field_list.push_back(item=new Item_empty_string("key", NAME_CHAR_LEN, cs)); item->maybe_null=1; field_list.push_back(item=new Item_empty_string("key_len", - NAME_LEN*MAX_KEY)); + NAME_CHAR_LEN*MAX_KEY)); item->maybe_null=1; field_list.push_back(item=new Item_empty_string("ref", - NAME_LEN*MAX_REF_PARTS, cs)); + NAME_CHAR_LEN*MAX_REF_PARTS, + cs)); item->maybe_null=1; field_list.push_back(item= new Item_return_int("rows", 10, MYSQL_TYPE_LONGLONG)); @@ -1322,7 +1339,6 @@ bool select_export::send_data(List<Item> &items) } row_count++; Item *item; - char *buff_ptr=buff; uint used_length=0,items_left=items.elements; List_iterator_fast<Item> li(items); @@ -1422,19 +1438,18 @@ bool select_export::send_data(List<Item> &items) goto err; } } - buff_ptr=buff; // Place separators here if (res && (!exchange->opt_enclosed || result_type == STRING_RESULT)) { - memcpy(buff_ptr,exchange->enclosed->ptr(),exchange->enclosed->length()); - buff_ptr+=exchange->enclosed->length(); + if (my_b_write(&cache, (byte*) exchange->enclosed->ptr(), + exchange->enclosed->length())) + goto err; } if (--items_left) { - memcpy(buff_ptr,exchange->field_term->ptr(),field_term_length); - buff_ptr+=field_term_length; + if (my_b_write(&cache, (byte*) exchange->field_term->ptr(), + field_term_length)) + goto err; } - if (my_b_write(&cache,(byte*) buff,(uint) (buff_ptr-buff))) - goto err; } if (my_b_write(&cache,(byte*) exchange->line_term->ptr(), exchange->line_term->length())) @@ -2110,6 +2125,102 @@ bool Security_context::set_user(char *user_arg) return user == 0; } +#ifndef NO_EMBEDDED_ACCESS_CHECKS +/** + Initialize this security context from the passed in credentials + and activate it in the current thread. + + @param[out] backup Save a pointer to the current security context + in the thread. In case of success it points to the + saved old context, otherwise it points to NULL. + + + During execution of a statement, multiple security contexts may + be needed: + - the security context of the authenticated user, used as the + default security context for all top-level statements + - in case of a view or a stored program, possibly the security + context of the definer of the routine, if the object is + defined with SQL SECURITY DEFINER option. + + The currently "active" security context is parameterized in THD + member security_ctx. By default, after a connection is + established, this member points at the "main" security context + - the credentials of the authenticated user. + + Later, if we would like to execute some sub-statement or a part + of a statement under credentials of a different user, e.g. + definer of a procedure, we authenticate this user in a local + instance of Security_context by means of this method (and + ultimately by means of acl_getroot_no_password), and make the + local instance active in the thread by re-setting + thd->security_ctx pointer. + + Note, that the life cycle and memory management of the "main" and + temporary security contexts are different. + For the main security context, the memory for user/host/ip is + allocated on system heap, and the THD class frees this memory in + its destructor. The only case when contents of the main security + context may change during its life time is when someone issued + CHANGE USER command. + Memory management of a "temporary" security context is + responsibility of the module that creates it. + + @retval TRUE there is no user with the given credentials. The erro + is reported in the thread. + @retval FALSE success +*/ + +bool +Security_context:: +change_security_context(THD *thd, + LEX_STRING *definer_user, + LEX_STRING *definer_host, + LEX_STRING *db, + Security_context **backup) +{ + bool needs_change; + + DBUG_ENTER("Security_context::change_security_context"); + + DBUG_ASSERT(definer_user->str && definer_host->str); + + *backup= NULL; + /* + The current security context may have NULL members + if we have just started the thread and not authenticated + any user. This use case is currently in events worker thread. + */ + needs_change= (thd->security_ctx->priv_user == NULL || + strcmp(definer_user->str, thd->security_ctx->priv_user) || + thd->security_ctx->priv_host == NULL || + my_strcasecmp(system_charset_info, definer_host->str, + thd->security_ctx->priv_host)); + if (needs_change) + { + if (acl_getroot_no_password(this, definer_user->str, definer_host->str, + definer_host->str, db->str)) + { + my_error(ER_NO_SUCH_USER, MYF(0), definer_user->str, + definer_host->str); + DBUG_RETURN(TRUE); + } + *backup= thd->security_ctx; + thd->security_ctx= this; + } + + DBUG_RETURN(FALSE); +} + + +void +Security_context::restore_security_context(THD *thd, + Security_context *backup) +{ + if (backup) + thd->security_ctx= backup; +} +#endif /**************************************************************************** Handling of open and locked tables states. @@ -2544,31 +2655,6 @@ my_size_t THD::max_row_length_blob(TABLE *table, const byte *data) const } -my_size_t THD::pack_row(TABLE *table, MY_BITMAP const* cols, byte *row_data, - const byte *record) const -{ - Field **p_field= table->field, *field; - int n_null_bytes= table->s->null_bytes; - byte *ptr; - uint i; - my_ptrdiff_t const rec_offset= record - table->record[0]; - my_ptrdiff_t const def_offset= table->s->default_values - table->record[0]; - memcpy(row_data, record, n_null_bytes); - ptr= row_data+n_null_bytes; - - for (i= 0 ; (field= *p_field) ; i++, p_field++) - { - if (bitmap_is_set(cols,i)) - { - my_ptrdiff_t const offset= - field->is_null((uint) rec_offset) ? def_offset : rec_offset; - field->move_field_offset(offset); - ptr= (byte*)field->pack((char *) ptr, field->ptr); - field->move_field_offset(-offset); - } - } - return (static_cast<my_size_t>(ptr - row_data)); -} namespace { @@ -2739,9 +2825,9 @@ int THD::binlog_update_row(TABLE* table, bool is_trans, byte *before_row= row_data.slot(0); byte *after_row= row_data.slot(1); - my_size_t const before_size= pack_row(table, cols, before_row, + my_size_t const before_size= pack_row(table, cols, before_row, before_record); - my_size_t const after_size= pack_row(table, cols, after_row, + my_size_t const after_size= pack_row(table, cols, after_row, after_record); /* diff --git a/sql/sql_class.h b/sql/sql_class.h index ffc8b2c46d1..48a88087fd3 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -21,9 +21,11 @@ #endif #include "log.h" -#include "rpl_rli.h" #include "rpl_tblmap.h" +struct st_relay_log_info; +typedef st_relay_log_info RELAY_LOG_INFO; + class Query_log_event; class Load_log_event; class Slave_log_event; @@ -243,6 +245,11 @@ struct system_variables my_bool low_priority_updates; my_bool new_mode; + /* + compatibility option: + - index usage hints (USE INDEX without a FOR clause) behave as in 5.0 + */ + my_bool old_mode; my_bool query_cache_wlock_invalidate; my_bool engine_condition_pushdown; my_bool innodb_table_locks; @@ -273,11 +280,12 @@ struct system_variables Time_zone *time_zone; - /* DATE, DATETIME and TIME formats */ + /* DATE, DATETIME and MYSQL_TIME formats */ DATE_TIME_FORMAT *date_format; DATE_TIME_FORMAT *datetime_format; DATE_TIME_FORMAT *time_format; my_bool sysdate_is_now; + }; @@ -650,6 +658,18 @@ public: } bool set_user(char *user_arg); + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + bool + change_security_context(THD *thd, + LEX_STRING *definer_user, + LEX_STRING *definer_host, + LEX_STRING *db, + Security_context **backup); + + void + restore_security_context(THD *thd, Security_context *backup); +#endif }; @@ -904,8 +924,8 @@ public: NET net; // client connection descriptor MEM_ROOT warn_root; // For warnings and errors Protocol *protocol; // Current protocol - Protocol_simple protocol_simple; // Normal protocol - Protocol_prep protocol_prep; // Binary protocol + Protocol_text protocol_text; // Normal protocol + Protocol_binary protocol_binary; // Binary protocol HASH user_vars; // hash for user variables String packet; // dynamic buffer for network I/O String convert_buffer; // buffer for charset conversions @@ -1035,9 +1055,6 @@ public: return (length+max_row_length_blob(table,data)); } - my_size_t pack_row(TABLE* table, MY_BITMAP const* cols, byte *row_data, - const byte *data) const; - int binlog_flush_pending_rows_event(bool stmt_end); void binlog_delete_pending_rows_event(); @@ -1264,7 +1281,7 @@ public: return first_successful_insert_id_in_prev_stmt; } /* - Used by Intvar_log_event::exec_event() and by "SET INSERT_ID=#" + Used by Intvar_log_event::do_apply_event() and by "SET INSERT_ID=#" (mysqlbinlog). We'll soon add a variant which can take many intervals in argument. */ @@ -1350,7 +1367,11 @@ public: bool charset_is_system_charset, charset_is_collation_connection; bool charset_is_character_set_filesystem; bool enable_slow_log; /* enable slow log for current statement */ - bool no_trans_update, abort_on_warning; + struct { + bool all:1; + bool stmt:1; + } no_trans_update; + bool abort_on_warning; bool got_warning; /* Set on call to push_warning() */ bool no_warnings_for_error; /* no warnings on call to my_error() */ /* set during loop of derived table processing */ @@ -1405,7 +1426,7 @@ public: #ifdef WITH_PARTITION_STORAGE_ENGINE partition_info *work_part_info; #endif - + THD(); ~THD(); @@ -1578,7 +1599,7 @@ public: inline bool really_abort_on_warning() { return (abort_on_warning && - (!no_trans_update || + (!no_trans_update.stmt || (variables.sql_mode & MODE_STRICT_ALL_TABLES))); } void set_status_var_init(); diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc index 09ee4962235..d03a17079d8 100644 --- a/sql/sql_connect.cc +++ b/sql/sql_connect.cc @@ -315,6 +315,7 @@ int check_user(THD *thd, enum enum_server_command command, bool check_count) { DBUG_ENTER("check_user"); + LEX_STRING db_str= { (char *) db, db ? strlen(db) : 0 }; #ifdef NO_EMBEDDED_ACCESS_CHECKS thd->main_security_ctx.master_access= GLOBAL_ACLS; // Full rights @@ -326,7 +327,7 @@ int check_user(THD *thd, enum enum_server_command command, function returns 0 */ thd->reset_db(NULL, 0); - if (mysql_change_db(thd, db, FALSE)) + if (mysql_change_db(thd, &db_str, FALSE)) { /* Send the error to the client */ net_send_error(thd); @@ -472,7 +473,7 @@ int check_user(THD *thd, enum enum_server_command command, /* Change database if necessary */ if (db && db[0]) { - if (mysql_change_db(thd, db, FALSE)) + if (mysql_change_db(thd, &db_str, FALSE)) { /* Send error to the client */ net_send_error(thd); diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 4fd35b7e6e8..cfd610638ce 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -22,6 +22,7 @@ #include "events.h" #include <my_dir.h> #include <m_ctype.h> +#include "log.h" #ifdef __WIN__ #include <direct.h> #endif @@ -577,7 +578,7 @@ bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, DBUG_ENTER("mysql_create_db"); /* do not create 'information_schema' db */ - if (!my_strcasecmp(system_charset_info, db, information_schema_name.str)) + if (!my_strcasecmp(system_charset_info, db, INFORMATION_SCHEMA_NAME.str)) { my_error(ER_DB_CREATE_EXISTS, MYF(0), db); DBUG_RETURN(-1); @@ -953,7 +954,7 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) exit: (void)sp_drop_db_routines(thd, db); /* QQ Ignore errors for now */ - Events::get_instance()->drop_schema_events(thd, db); + Events::drop_schema_events(thd, db); /* If this database was the client's selected database, we silently change the client's selected database to nothing (to have an empty @@ -1256,155 +1257,251 @@ err: } -/* - Change the current database. +/** + @brief Internal implementation: switch current database to a valid one. - SYNOPSIS - mysql_change_db() - thd thread handle - name database name - no_access_check if TRUE, don't do access check. In this - case name may be "" + @param thd Thread context. + @param new_db_name Name of the database to switch to. The function will + take ownership of the name (the caller must not free + the allocated memory). If the name is NULL, we're + going to switch to NULL db. + @param new_db_access Privileges of the new database. + @param new_db_charset Character set of the new database. +*/ - DESCRIPTION - Check that the database name corresponds to a valid and - existent database, check access rights (unless called with - no_access_check), and set the current database. This function - is called to change the current database upon user request - (COM_CHANGE_DB command) or temporarily, to execute a stored - routine. +static void mysql_change_db_impl(THD *thd, + LEX_STRING *new_db_name, + ulong new_db_access, + CHARSET_INFO *new_db_charset) +{ + /* 1. Change current database in THD. */ - NOTES - This function is not the only way to switch the database that - is currently employed. When the replication slave thread - switches the database before executing a query, it calls - thd->set_db directly. However, if the query, in turn, uses - a stored routine, the stored routine will use this function, - even if it's run on the slave. - - This function allocates the name of the database on the system - heap: this is necessary to be able to uniformly change the - database from any module of the server. Up to 5.0 different - modules were using different memory to store the name of the - database, and this led to memory corruption: a stack pointer - set by Stored Procedures was used by replication after the - stack address was long gone. - - This function does not send anything, including error - messages, to the client. If that should be sent to the client, - call net_send_error after this function. + if (new_db_name == NULL) + { + /* + THD::set_db() does all the job -- it frees previous database name and + sets the new one. + */ - RETURN VALUES - 0 OK - 1 error + thd->set_db(NULL, 0); + } + else if (new_db_name == &INFORMATION_SCHEMA_NAME) + { + /* + Here we must use THD::set_db(), because we want to copy + INFORMATION_SCHEMA_NAME constant. + */ + + thd->set_db(INFORMATION_SCHEMA_NAME.str, INFORMATION_SCHEMA_NAME.length); + } + else + { + /* + Here we already have a copy of database name to be used in THD. So, + we just call THD::reset_db(). Since THD::reset_db() does not releases + the previous database name, we should do it explicitly. + */ + + x_free(thd->db); + + thd->reset_db(new_db_name->str, new_db_name->length); + } + + /* 2. Update security context. */ + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + thd->security_ctx->db_access= new_db_access; +#endif + + /* 3. Update db-charset environment variables. */ + + thd->db_charset= new_db_charset; + thd->variables.collation_database= new_db_charset; +} + + +/** + @brief Change the current database. + + @param thd thread handle + @param name database name + @param force_switch if this flag is set (TRUE), mysql_change_db() will + switch to NULL db if the specified database is not + available anymore. Corresponding warning will be + thrown in this case. This flag is used to change + database in stored-routine-execution code. + + @details Check that the database name corresponds to a valid and existent + database, check access rights (unless called with no_access_check), and + set the current database. This function is called to change the current + database upon user request (COM_CHANGE_DB command) or temporarily, to + execute a stored routine. + + This function is not the only way to switch the database that is + currently employed. When the replication slave thread switches the + database before executing a query, it calls thd->set_db directly. + However, if the query, in turn, uses a stored routine, the stored routine + will use this function, even if it's run on the slave. + + This function allocates the name of the database on the system heap: this + is necessary to be able to uniformly change the database from any module + of the server. Up to 5.0 different modules were using different memory to + store the name of the database, and this led to memory corruption: + a stack pointer set by Stored Procedures was used by replication after + the stack address was long gone. + + @return Operation status + @retval FALSE Success + @retval TRUE Error */ -bool mysql_change_db(THD *thd, const char *name, bool no_access_check) +bool mysql_change_db(THD *thd, const LEX_STRING *new_db_name, bool force_switch) { - LEX_STRING db_name; - bool system_db= 0; -#ifndef NO_EMBEDDED_ACCESS_CHECKS - ulong db_access; + LEX_STRING new_db_file_name; + Security_context *sctx= thd->security_ctx; - LINT_INIT(db_access); -#endif + ulong db_access= sctx->db_access; + DBUG_ENTER("mysql_change_db"); - DBUG_PRINT("enter",("name: '%s'",name)); + DBUG_PRINT("enter",("name: '%s'", new_db_name->str)); - if (name == NULL || name[0] == '\0' && no_access_check == FALSE) + if (new_db_name == NULL || + new_db_name->length == 0) { - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); - DBUG_RETURN(1); /* purecov: inspected */ + if (force_switch) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR)); + + /* Change db to NULL. */ + + mysql_change_db_impl(thd, NULL, 0, thd->variables.collation_server); + + DBUG_RETURN(FALSE); + } + else + { + my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); + + DBUG_RETURN(TRUE); + } } - else if (name[0] == '\0') + + if (my_strcasecmp(system_charset_info, new_db_name->str, + INFORMATION_SCHEMA_NAME.str) == 0) { - /* Called from SP to restore the original database, which was NULL */ - DBUG_ASSERT(no_access_check); - system_db= 1; - db_name.str= NULL; - db_name.length= 0; - goto end; + /* Switch database to INFORMATION_SCHEMA. */ + + mysql_change_db_impl(thd, &INFORMATION_SCHEMA_NAME, SELECT_ACL, + system_charset_info); + + DBUG_RETURN(FALSE); } + /* Now we need to make a copy because check_db_name requires a - non-constant argument. TODO: fix check_db_name. + non-constant argument. Actually, it takes database file name. + + TODO: fix check_db_name(). + */ + + new_db_file_name.str= my_strndup(new_db_name->str, new_db_name->length, + MYF(MY_WME)); + new_db_file_name.length= new_db_name->length; + + if (new_db_file_name.str == NULL) + DBUG_RETURN(TRUE); /* the error is set */ + + /* + NOTE: if check_db_name() fails, we should throw an error in any case, + even if we are called from sp_head::execute(). + + It's next to impossible however to get this error when we are called + from sp_head::execute(). But let's switch database to NULL in this case + to be sure. */ - if ((db_name.str= my_strdup(name, MYF(MY_WME))) == NULL) - DBUG_RETURN(1); /* the error is set */ - db_name.length= strlen(db_name.str); - if (check_db_name(&db_name)) + + if (check_db_name(&new_db_file_name)) { - my_error(ER_WRONG_DB_NAME, MYF(0), db_name.str); - my_free(db_name.str, MYF(0)); - DBUG_RETURN(1); + my_error(ER_WRONG_DB_NAME, MYF(0), new_db_file_name.str); + my_free(new_db_file_name.str, MYF(0)); + + if (force_switch) + { + /* Change db to NULL. */ + mysql_change_db_impl(thd, NULL, 0, thd->variables.collation_server); + } + DBUG_RETURN(TRUE); } - DBUG_PRINT("info",("Use database: %s", db_name.str)); - if (!my_strcasecmp(system_charset_info, db_name.str, - information_schema_name.str)) - { - system_db= 1; + + DBUG_PRINT("info",("Use database: %s", new_db_file_name.str)); + #ifndef NO_EMBEDDED_ACCESS_CHECKS - db_access= SELECT_ACL; -#endif - goto end; + db_access= + test_all_bits(sctx->master_access, DB_ACLS) ? + DB_ACLS : + acl_get(sctx->host, + sctx->ip, + sctx->priv_user, + new_db_file_name.str, + FALSE) | sctx->master_access; + + if (!force_switch && + !(db_access & DB_ACLS) && + (!grant_option || check_grant_db(thd, new_db_file_name.str))) + { + my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), + sctx->priv_user, + sctx->priv_host, + new_db_file_name.str); + general_log_print(thd, COM_INIT_DB, ER(ER_DBACCESS_DENIED_ERROR), + sctx->priv_user, sctx->priv_host, new_db_file_name.str); + my_free(new_db_file_name.str, MYF(0)); + DBUG_RETURN(TRUE); } +#endif -#ifndef NO_EMBEDDED_ACCESS_CHECKS - if (!no_access_check) + if (check_db_dir_existence(new_db_file_name.str)) { - if (test_all_bits(sctx->master_access, DB_ACLS)) - db_access=DB_ACLS; + if (force_switch) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_BAD_DB_ERROR, ER(ER_BAD_DB_ERROR), + new_db_file_name.str); + + my_free(new_db_file_name.str, MYF(0)); + + /* Change db to NULL. */ + + mysql_change_db_impl(thd, NULL, 0, thd->variables.collation_server); + + DBUG_RETURN(FALSE); + } else - db_access= (acl_get(sctx->host, sctx->ip, sctx->priv_user, - db_name.str, 0) | - sctx->master_access); - if (!(db_access & DB_ACLS) && (!grant_option || - check_grant_db(thd, db_name.str))) { - my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), - sctx->priv_user, - sctx->priv_host, - db_name.str); - general_log_print(thd, COM_INIT_DB, ER(ER_DBACCESS_DENIED_ERROR), - sctx->priv_user, sctx->priv_host, db_name.str); - my_free(db_name.str, MYF(0)); - DBUG_RETURN(1); + my_error(ER_BAD_DB_ERROR, MYF(0), new_db_file_name.str); + my_free(new_db_file_name.str, MYF(0)); + DBUG_RETURN(TRUE); } } -#endif - if (check_db_dir_existence(db_name.str)) - { - my_error(ER_BAD_DB_ERROR, MYF(0), db_name.str); - my_free(db_name.str, MYF(0)); - DBUG_RETURN(1); - } + /* + NOTE: in mysql_change_db_impl() new_db_file_name is assigned to THD + attributes and will be freed in THD::~THD(). + */ -end: - x_free(thd->db); - DBUG_ASSERT(db_name.str == NULL || db_name.str[0] != '\0'); - thd->reset_db(db_name.str, db_name.length); // THD::~THD will free this -#ifndef NO_EMBEDDED_ACCESS_CHECKS - if (!no_access_check) - sctx->db_access= db_access; -#endif - if (system_db) - { - thd->db_charset= system_charset_info; - thd->variables.collation_database= system_charset_info; - } - else { - HA_CREATE_INFO create; + HA_CREATE_INFO db_options; - load_db_opt_by_name(thd, db_name.str, &create); + load_db_opt_by_name(thd, new_db_name->str, &db_options); - thd->db_charset= create.default_table_charset ? - create.default_table_charset : - thd->variables.collation_server; - thd->variables.collation_database= thd->db_charset; + mysql_change_db_impl(thd, &new_db_file_name, db_access, + db_options.default_table_charset ? + db_options.default_table_charset : + thd->variables.collation_server); } - DBUG_RETURN(0); + + DBUG_RETURN(FALSE); } @@ -1581,8 +1678,8 @@ bool mysql_rename_db(THD *thd, LEX_STRING *old_db, LEX_STRING *new_db) Failed to move all tables from the old database to the new one. In the best case mysql_rename_tables() moved all tables back to the old database. In the worst case mysql_rename_tables() moved some tables - to the new database, then failed, then started to move the tables back, and - then failed again. In this situation we have some tables in the + to the new database, then failed, then started to move the tables back, + and then failed again. In this situation we have some tables in the old database and some tables in the new database. Let's delete the option file, and then the new database directory. If some tables were left in the new directory, rmdir() will fail. @@ -1703,7 +1800,7 @@ bool mysql_rename_db(THD *thd, LEX_STRING *old_db, LEX_STRING *new_db) /* Step9: Let's do "use newdb" if we renamed the current database */ if (change_to_newdb) - error|= mysql_change_db(thd, new_db->str, 0); + error|= mysql_change_db(thd, new_db, 0); exit: pthread_mutex_lock(&LOCK_lock_db); @@ -1718,6 +1815,8 @@ exit: DBUG_RETURN(error); } + + /* Check if there is directory for the database name. diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 3ab053d1741..92f38d57330 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -54,6 +54,27 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, if (mysql_prepare_delete(thd, table_list, &conds)) DBUG_RETURN(TRUE); + /* check ORDER BY even if it can be ignored */ + if (order && order->elements) + { + TABLE_LIST tables; + List<Item> fields; + List<Item> all_fields; + + bzero((char*) &tables,sizeof(tables)); + tables.table = table; + tables.alias = table_list->alias; + + if (select_lex->setup_ref_array(thd, order->elements) || + setup_order(thd, select_lex->ref_pointer_array, &tables, + fields, all_fields, (ORDER*) order->first)) + { + delete select; + free_underlaid_joins(thd, &thd->lex->select_lex); + DBUG_RETURN(TRUE); + } + } + const_cond= (!conds || conds->const_item()); safe_update=test(thd->options & OPTION_SAFE_UPDATES); if (safe_update && const_cond) @@ -116,7 +137,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, /* Update the table->file->stats.records number */ table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); - table->used_keys.clear_all(); + table->covering_keys.clear_all(); table->quick_keys.clear_all(); // Can't use 'only index' select=make_select(table, 0, 0, conds, 0, &error); if (error) @@ -155,23 +176,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, { uint length= 0; SORT_FIELD *sortorder; - TABLE_LIST tables; - List<Item> fields; - List<Item> all_fields; ha_rows examined_rows; - - bzero((char*) &tables,sizeof(tables)); - tables.table = table; - tables.alias = table_list->alias; - - if (select_lex->setup_ref_array(thd, order->elements) || - setup_order(thd, select_lex->ref_pointer_array, &tables, - fields, all_fields, (ORDER*) order->first)) - { - delete select; - free_underlaid_joins(thd, &thd->lex->select_lex); - DBUG_RETURN(TRUE); - } if ((!select || table->quick_keys.is_clear_all()) && limit != HA_POS_ERROR) usable_index= get_index_for_order(table, (ORDER*)(order->first), limit); @@ -216,7 +221,20 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, init_ftfuncs(thd, select_lex, 1); thd->proc_info="updating"; - will_batch= !table->file->start_bulk_delete(); + if (table->triggers && + table->triggers->has_triggers(TRG_EVENT_DELETE, + TRG_ACTION_AFTER)) + { + /* + The table has AFTER DELETE triggers that might access to subject table + and therefore might need delete to be done immediately. So we turn-off + the batching. + */ + (void) table->file->extra(HA_EXTRA_DELETE_CANNOT_BATCH); + will_batch= FALSE; + } + else + will_batch= !table->file->start_bulk_delete(); table->mark_columns_needed_for_delete(); @@ -335,7 +353,7 @@ cleanup: } } if (!transactional_table) - thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; + thd->no_trans_update.all= TRUE; } free_underlaid_joins(thd, select_lex); if (transactional_table) @@ -376,6 +394,8 @@ bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds) { Item *fake_conds= 0; SELECT_LEX *select_lex= &thd->lex->select_lex; + const char *operation = thd->lex->sql_command == SQLCOM_TRUNCATE ? + "TRUNCATE" : "DELETE"; DBUG_ENTER("mysql_prepare_delete"); List<Item> all_fields; @@ -390,14 +410,14 @@ bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds) DBUG_RETURN(TRUE); if (!table_list->updatable || check_key_in_view(thd, table_list)) { - my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "DELETE"); + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, operation); DBUG_RETURN(TRUE); } { TABLE_LIST *duplicate; - if ((duplicate= unique_table(thd, table_list, table_list->next_global))) + if ((duplicate= unique_table(thd, table_list, table_list->next_global, 0))) { - update_non_unique_table_error(table_list, "DELETE", duplicate); + update_non_unique_table_error(table_list, operation, duplicate); DBUG_RETURN(TRUE); } } @@ -492,7 +512,7 @@ bool mysql_multi_delete_prepare(THD *thd) { TABLE_LIST *duplicate; if ((duplicate= unique_table(thd, target_tbl->correspondent_table, - lex->query_tables))) + lex->query_tables, 0))) { update_non_unique_table_error(target_tbl->correspondent_table, "DELETE", duplicate); @@ -553,11 +573,22 @@ multi_delete::initialize_tables(JOIN *join) tbl->no_keyread=1; /* Don't use record cache */ tbl->no_cache= 1; - tbl->used_keys.clear_all(); + tbl->covering_keys.clear_all(); if (tbl->file->has_transactions()) transactional_tables= 1; else normal_tables= 1; + if (tbl->triggers && + tbl->triggers->has_triggers(TRG_EVENT_DELETE, + TRG_ACTION_AFTER)) + { + /* + The table has AFTER DELETE triggers that might access to subject + table and therefore might need delete to be done immediately. + So we turn-off the batching. + */ + (void) tbl->file->extra(HA_EXTRA_DELETE_CANNOT_BATCH); + } tbl->prepare_for_position(); tbl->mark_columns_needed_for_delete(); } @@ -827,7 +858,7 @@ bool multi_delete::send_eof() } } if (!transactional_tables) - thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; + thd->no_trans_update.all= TRUE; } /* Commit or rollback the current SQL statement */ if (transactional_tables) @@ -904,7 +935,8 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok) if (!dont_send_ok) { enum legacy_db_type table_type; - mysql_frm_type(thd, path, &table_type); + if (mysql_frm_type(thd, path, &table_type) == FRMTYPE_VIEW) + goto trunc_by_del; if (table_type == DB_TYPE_UNKNOWN) { my_error(ER_NO_SUCH_TABLE, MYF(0), diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 5712fbceac8..89bd7958c86 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -179,7 +179,7 @@ exit: orig_table_list->table_name= table->s->table_name.str; orig_table_list->table_name_length= table->s->table_name.length; table->derived_select_number= first_select->select_number; - table->s->tmp_table= TMP_TABLE; + table->s->tmp_table= NON_TRANSACTIONAL_TMP_TABLE; #ifndef NO_EMBEDDED_ACCESS_CHECKS if (orig_table_list->referencing_view) table->grant= orig_table_list->grant; diff --git a/sql/sql_error.cc b/sql/sql_error.cc index 70882d8f4e8..61442c52de7 100644 --- a/sql/sql_error.cc +++ b/sql/sql_error.cc @@ -147,15 +147,9 @@ MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, if (thd->warn_list.elements < thd->variables.max_error_count) { - /* - The following code is here to change the allocation to not - use the thd->mem_root, which is freed after each query - */ - MEM_ROOT *old_root= thd->mem_root; - thd->mem_root= &thd->warn_root; - if ((err= new MYSQL_ERROR(thd, code, level, msg))) - thd->warn_list.push_back(err); - thd->mem_root= old_root; + /* We have to use warn_root, as mem_root is freed after each query */ + if ((err= new (&thd->warn_root) MYSQL_ERROR(thd, code, level, msg))) + thd->warn_list.push_back(err, &thd->warn_root); } thd->warn_count[(uint) level]++; thd->total_warn_count++; diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 91e61be0478..cd87330cedb 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -515,7 +515,8 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables, } List_iterator<Item> it_ke(*key_expr); Item *item; - for (key_len=0 ; (item=it_ke++) ; key_part++) + key_part_map keypart_map; + for (keypart_map= key_len=0 ; (item=it_ke++) ; key_part++) { my_bitmap_map *old_map; // 'item' can be changed by fix_fields() call @@ -532,6 +533,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables, (void) item->save_in_field(key_part->field, 1); dbug_tmp_restore_column_map(table->write_set, old_map); key_len+=key_part->store_length; + keypart_map= (keypart_map << 1) | 1; } if (!(key= (byte*) thd->calloc(ALIGN_SIZE(key_len)))) @@ -540,7 +542,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables, table->file->ha_index_init(keyno, 1); key_copy(key, table->record[0], table->key_info + keyno, key_len); error= table->file->index_read(table->record[0], - key,key_len,ha_rkey_mode); + key, keypart_map, ha_rkey_mode); mode=rkey_to_rnext[(int)ha_rkey_mode]; break; } diff --git a/sql/sql_help.cc b/sql/sql_help.cc index b8a904b0c6e..79d658c2a85 100644 --- a/sql/sql_help.cc +++ b/sql/sql_help.cc @@ -295,8 +295,7 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations, rkey_id->store((longlong) key_id, TRUE); rkey_id->get_key_image(buff, rkey_id->pack_length(), Field::itRAW); int key_res= relations->file->index_read(relations->record[0], - (byte *) buff, - rkey_id->pack_length(), + (byte *) buff, (key_part_map)1, HA_READ_KEY_EXACT); for ( ; @@ -310,7 +309,7 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations, field->get_key_image(topic_id_buff, field->pack_length(), Field::itRAW); if (!topics->file->index_read(topics->record[0], (byte *)topic_id_buff, - field->pack_length(), HA_READ_KEY_EXACT)) + (key_part_map)1, HA_READ_KEY_EXACT)) { memorize_variant_topic(thd,topics,count,find_fields, names,name,description,example); @@ -567,7 +566,7 @@ SQL_SELECT *prepare_simple_select(THD *thd, Item *cond, cond->fix_fields(thd, &cond); // can never fail /* Assume that no indexes cover all required fields */ - table->used_keys.clear_all(); + table->covering_keys.clear_all(); SQL_SELECT *res= make_select(table, 0, 0, cond, 0, error); if (*error || (res && res->check_quick(thd, 0, HA_POS_ERROR)) || @@ -655,8 +654,9 @@ bool mysqld_help(THD *thd, const char *mask) tables[3].lock_type= TL_READ; tables[0].db= tables[1].db= tables[2].db= tables[3].db= (char*) "mysql"; - if (open_and_lock_tables(thd, tables)) - goto error; + Open_tables_state open_tables_state_backup; + if (open_system_tables_for_read(thd, tables, &open_tables_state_backup)) + goto error2; /* Init tables and fields to be usable from items @@ -781,8 +781,13 @@ bool mysqld_help(THD *thd, const char *mask) } send_eof(thd); + close_system_tables(thd, &open_tables_state_backup); DBUG_RETURN(FALSE); + error: + close_system_tables(thd, &open_tables_state_backup); + +error2: DBUG_RETURN(TRUE); } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index f58d08b8dea..f6ae2df3750 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -60,6 +60,7 @@ #include "sql_select.h" #include "sql_show.h" #include "slave.h" +#include "rpl_mi.h" #ifndef EMBEDDED_LIBRARY static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list); @@ -341,6 +342,47 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list, return 0; } +/* + Prepare triggers for INSERT-like statement. + + SYNOPSIS + prepare_triggers_for_insert_stmt() + table Table to which insert will happen + + NOTE + Prepare triggers for INSERT-like statement by marking fields + used by triggers and inform handlers that batching of UPDATE/DELETE + cannot be done if there are BEFORE UPDATE/DELETE triggers. +*/ + +void prepare_triggers_for_insert_stmt(TABLE *table) +{ + if (table->triggers) + { + if (table->triggers->has_triggers(TRG_EVENT_DELETE, + TRG_ACTION_AFTER)) + { + /* + The table has AFTER DELETE triggers that might access to + subject table and therefore might need delete to be done + immediately. So we turn-off the batching. + */ + (void) table->file->extra(HA_EXTRA_DELETE_CANNOT_BATCH); + } + if (table->triggers->has_triggers(TRG_EVENT_UPDATE, + TRG_ACTION_AFTER)) + { + /* + The table has AFTER UPDATE triggers that might access to subject + table and therefore might need update to be done immediately. + So we turn-off the batching. + */ + (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH); + } + } + table->mark_columns_needed_for_insert(); +} + bool mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields, @@ -468,10 +510,15 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, thd->proc_info="init"; thd->used_tables=0; values= its++; + value_count= values->elements; if (mysql_prepare_insert(thd, table_list, table, fields, values, update_fields, update_values, duplic, &unused_conds, - FALSE)) + FALSE, + (fields.elements || !value_count), + !ignore && (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES)))) goto abort; /* mysql_prepare_insert set table_list->table if it was not set */ @@ -497,7 +544,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, table_list->next_local= 0; context->resolve_in_table_list_only(table_list); - value_count= values->elements; while ((values= its++)) { counter++; @@ -566,20 +612,13 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, if (lock_type != TL_WRITE_DELAYED && !thd->prelocked_mode) table->file->ha_start_bulk_insert(values_list.elements); - thd->no_trans_update= 0; - thd->abort_on_warning= (!ignore && - (thd->variables.sql_mode & - (MODE_STRICT_TRANS_TABLES | - MODE_STRICT_ALL_TABLES))); + thd->no_trans_update.stmt= FALSE; + thd->abort_on_warning= (!ignore && (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))); - if ((fields.elements || !value_count) && - check_that_all_fields_are_given_values(thd, table, table_list)) - { - /* thd->net.report_error is now set, which will abort the next loop */ - error= 1; - } + prepare_triggers_for_insert_stmt(table); - table->mark_columns_needed_for_insert(); if (table_list->prepare_where(thd, 0, TRUE) || table_list->prepare_check_option(thd)) @@ -716,7 +755,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, } } if (!transactional_table) - thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; + thd->no_trans_update.all= TRUE; } } if (transactional_table) @@ -757,6 +796,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, table->next_number_field->val_int() : 0)); table->next_number_field=0; thd->count_cuted_fields= CHECK_FIELD_IGNORE; + table->auto_increment_field_not_null= FALSE; if (duplic != DUP_ERROR || ignore) table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); if (duplic == DUP_REPLACE && @@ -954,6 +994,10 @@ static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list, be taken from table_list->table) where Where clause (for insert ... select) select_insert TRUE if INSERT ... SELECT statement + check_fields TRUE if need to check that all INSERT fields are + given values. + abort_on_warning whether to report if some INSERT field is not + assigned as an error (TRUE) or as a warning (FALSE). TODO (in far future) In cases of: @@ -974,7 +1018,8 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE *table, List<Item> &fields, List_item *values, List<Item> &update_fields, List<Item> &update_values, enum_duplicates duplic, - COND **where, bool select_insert) + COND **where, bool select_insert, + bool check_fields, bool abort_on_warning) { SELECT_LEX *select_lex= &thd->lex->select_lex; Name_resolution_context *context= &select_lex->context; @@ -1036,10 +1081,22 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, table_list->next_local= 0; context->resolve_in_table_list_only(table_list); - if (!(res= check_insert_fields(thd, context->table_list, fields, *values, - !insert_into_view, &map) || - setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0)) - && duplic == DUP_UPDATE) + res= check_insert_fields(thd, context->table_list, fields, *values, + !insert_into_view, &map) || + setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0); + + if (!res && check_fields) + { + bool saved_abort_on_warning= thd->abort_on_warning; + thd->abort_on_warning= abort_on_warning; + res= check_that_all_fields_are_given_values(thd, + table ? table : + context->table_list->table, + context->table_list); + thd->abort_on_warning= saved_abort_on_warning; + } + + if (!res && duplic == DUP_UPDATE) { select_lex->no_wrap_view_item= TRUE; res= check_update_fields(thd, context->table_list, update_fields, &map); @@ -1063,7 +1120,7 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, { Item *fake_conds= 0; TABLE_LIST *duplicate; - if ((duplicate= unique_table(thd, table_list, table_list->next_global))) + if ((duplicate= unique_table(thd, table_list, table_list->next_global, 1))) { update_non_unique_table_error(table_list, "INSERT", duplicate); DBUG_RETURN(TRUE); @@ -1106,7 +1163,7 @@ static int last_uniq_key(TABLE *table,uint keynr) then both on update triggers will work instead. Similarly both on delete triggers will be invoked if we will delete conflicting records. - Sets thd->no_trans_update if table which is updated didn't have + Sets thd->no_trans_update.stmt to TRUE if table which is updated didn't have transactions. RETURN VALUE @@ -1201,9 +1258,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) } key_copy((byte*) key,table->record[0],table->key_info+key_nr,0); if ((error=(table->file->index_read_idx(table->record[1],key_nr, - (byte*) key, - table->key_info[key_nr]. - key_length, + (byte*) key, HA_WHOLE_KEY, HA_READ_KEY_EXACT)))) goto err; } @@ -1258,12 +1313,14 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) */ insert_id_for_cur_row= table->file->insert_id_for_cur_row= 0; if (table->next_number_field) - table->file->adjust_next_insert_id_after_explicit_value(table->next_number_field->val_int()); + table->file->adjust_next_insert_id_after_explicit_value( + table->next_number_field->val_int()); trg_error= (table->triggers && table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, TRG_ACTION_AFTER, TRUE)); info->copied++; } + goto ok_or_after_trg_err; } else /* DUP_REPLACE */ @@ -1309,7 +1366,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) goto err; info->deleted++; if (!table->file->has_transactions()) - thd->no_trans_update= 1; + thd->no_trans_update.stmt= TRUE; if (table->triggers && table->triggers->process_triggers(thd, TRG_EVENT_DELETE, TRG_ACTION_AFTER, TRUE)) @@ -1350,7 +1407,7 @@ ok_or_after_trg_err: if (key) my_safe_afree(key,table->s->max_unique_length,MAX_KEY_LENGTH); if (!table->file->has_transactions()) - thd->no_trans_update= 1; + thd->no_trans_update.stmt= TRUE; DBUG_RETURN(trg_error); err: @@ -2443,7 +2500,7 @@ bool mysql_insert_select_prepare(THD *thd) lex->query_tables->table, lex->field_list, 0, lex->update_list, lex->value_list, lex->duplicates, - &select_lex->where, TRUE)) + &select_lex->where, TRUE, FALSE, FALSE)) DBUG_RETURN(TRUE); /* @@ -2506,7 +2563,18 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) !insert_into_view, &map) || setup_fields(thd, 0, values, MARK_COLUMNS_READ, 0, 0); - if (info.handle_duplicates == DUP_UPDATE) + if (!res && fields->elements) + { + bool saved_abort_on_warning= thd->abort_on_warning; + thd->abort_on_warning= !info.ignore && (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES)); + res= check_that_all_fields_are_given_values(thd, table_list->table, + table_list); + thd->abort_on_warning= saved_abort_on_warning; + } + + if (info.handle_duplicates == DUP_UPDATE && !res) { Name_resolution_context *context= &lex->select_lex.context; Name_resolution_context_state ctx_state; @@ -2576,7 +2644,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) query */ if (!(lex->current_select->options & OPTION_BUFFER_RESULT) && - unique_table(thd, table_list, table_list->next_global)) + unique_table(thd, table_list, table_list->next_global, 0)) { /* Using same table for INSERT and SELECT */ lex->current_select->options|= OPTION_BUFFER_RESULT; @@ -2612,18 +2680,16 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) if (info.handle_duplicates == DUP_REPLACE && (!table->triggers || !table->triggers->has_delete_triggers())) table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); - thd->no_trans_update= 0; + thd->no_trans_update.stmt= FALSE; thd->abort_on_warning= (!info.ignore && (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))); - res= ((fields->elements && - check_that_all_fields_are_given_values(thd, table, table_list)) || - table_list->prepare_where(thd, 0, TRUE) || + res= (table_list->prepare_where(thd, 0, TRUE) || table_list->prepare_check_option(thd)); if (!res) - table->mark_columns_needed_for_insert(); + prepare_triggers_for_insert_stmt(table); DBUG_RETURN(res); } @@ -2667,6 +2733,7 @@ select_insert::~select_insert() if (table) { table->next_number_field=0; + table->auto_increment_field_not_null= FALSE; table->file->ha_reset(); } thd->count_cuted_fields= CHECK_FIELD_IGNORE; @@ -2794,7 +2861,7 @@ void select_insert::send_error(uint errcode,const char *err) table->file->has_transactions(), FALSE); if (!thd->current_stmt_binlog_row_based && !table->s->tmp_table && !can_rollback_data()) - thd->options|= OPTION_STATUS_NO_TRANS_UPDATE; + thd->no_trans_update.all= TRUE; query_cache_invalidate3(thd, table, 1); } } @@ -2835,7 +2902,7 @@ bool select_insert::send_eof() */ if (!trans_table && (!table->s->tmp_table || !thd->current_stmt_binlog_row_based)) - thd->options|= OPTION_STATUS_NO_TRANS_UPDATE; + thd->no_trans_update.all= TRUE; } /* @@ -3142,7 +3209,7 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u) table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); if (!thd->prelocked_mode) table->file->ha_start_bulk_insert((ha_rows) 0); - thd->no_trans_update= 0; + thd->no_trans_update.stmt= FALSE; thd->abort_on_warning= (!info.ignore && (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index ef1f0592051..4a162478388 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -33,10 +33,10 @@ sys_var *trg_new_row_fake_var= (sys_var*) 0x01; /* Macros to look like lex */ -#define yyGet() *(lex->ptr++) -#define yyGetLast() lex->ptr[-1] -#define yyPeek() lex->ptr[0] -#define yyPeek2() lex->ptr[1] +#define yyGet() ((uchar) *(lex->ptr++)) +#define yyGetLast() ((uchar) lex->ptr[-1]) +#define yyPeek() ((uchar) lex->ptr[0]) +#define yyPeek2() ((uchar) lex->ptr[1]) #define yyUnget() lex->ptr-- #define yySkip() lex->ptr++ #define yyLength() ((uint) (lex->ptr - lex->tok_start)-1) @@ -69,6 +69,17 @@ static uchar to_upper_lex[]= 208,209,210,211,212,213,214,247,216,217,218,219,220,221,222,255 }; +/* + Names of the index hints (for error messages). Keep in sync with + index_hint_type +*/ + +const char * index_hint_type_name[] = +{ + "IGNORE INDEX", + "USE INDEX", + "FORCE INDEX" +}; inline int lex_casecmp(const char *s, const char *t, uint len) { @@ -116,7 +127,7 @@ st_parsing_options::reset() (We already do too much here) */ -void lex_start(THD *thd, const uchar *buf, uint length) +void lex_start(THD *thd, const char *buf, uint length) { LEX *lex= thd->lex; DBUG_ENTER("lex_start"); @@ -158,7 +169,6 @@ void lex_start(THD *thd, const uchar *buf, uint length) lex->lock_option= TL_READ; lex->found_semicolon= 0; lex->safe_to_cache_query= 1; - lex->time_zone_tables_used= 0; lex->leaf_tables_insert= 0; lex->parsing_options.reset(); lex->empty_field_list_on_rset= 0; @@ -228,9 +238,9 @@ void lex_end(LEX *lex) static int find_keyword(LEX *lex, uint len, bool function) { - const uchar *tok=lex->tok_start; + const char *tok= lex->tok_start; - SYMBOL *symbol= get_hash_symbol((const char *)tok,len,function); + SYMBOL *symbol= get_hash_symbol(tok, len, function); if (symbol) { lex->yylval->symbol.symbol=symbol; @@ -295,16 +305,16 @@ static LEX_STRING get_token(LEX *lex,uint length) static LEX_STRING get_quoted_token(LEX *lex,uint length, char quote) { LEX_STRING tmp; - const uchar *from, *end; - uchar *to; + const char *from, *end; + char *to; yyUnget(); // ptr points now after last token char tmp.length=lex->yytoklen=length; tmp.str=(char*) lex->thd->alloc(tmp.length+1); - for (from= lex->tok_start, to= (uchar*) tmp.str, end= to+length ; + for (from= lex->tok_start, to= tmp.str, end= to+length ; to != end ; ) { - if ((*to++= *from++) == (uchar) quote) + if ((*to++= *from++) == quote) from++; // Skip double quotes } *to= 0; // End null for safety @@ -331,9 +341,7 @@ static char *get_text(LEX *lex) { int l; if (use_mb(cs) && - (l = my_ismbchar(cs, - (const char *)lex->ptr-1, - (const char *)lex->end_of_query))) { + (l = my_ismbchar(cs, lex->ptr-1, lex->end_of_query))) { lex->ptr += l-1; continue; } @@ -358,12 +366,12 @@ static char *get_text(LEX *lex) yyUnget(); /* Found end. Unescape and return string */ - const uchar *str, *end; - uchar *start; + const char *str, *end; + char *start; str=lex->tok_start+1; end=lex->ptr-1; - if (!(start=(uchar*) lex->thd->alloc((uint) (end-str)+1))) + if (!(start= (char*) lex->thd->alloc((uint) (end-str)+1))) return (char*) ""; // Sql_alloc has set error flag if (!found_escape) { @@ -373,15 +381,14 @@ static char *get_text(LEX *lex) } else { - uchar *to; + char *to; for (to=start ; str != end ; str++) { #ifdef USE_MB int l; if (use_mb(cs) && - (l = my_ismbchar(cs, - (const char *)str, (const char *)end))) { + (l = my_ismbchar(cs, str, end))) { while (l--) *to++ = *str++; str--; @@ -427,7 +434,7 @@ static char *get_text(LEX *lex) *to=0; lex->yytoklen=(uint) (to-start); } - return (char*) start; + return start; } } return 0; // unexpected end of query @@ -546,7 +553,6 @@ int MYSQLlex(void *arg, void *yythd) lex->yylval=yylval; // The global state - lex->tok_end_prev= lex->tok_end; lex->tok_start_prev= lex->tok_start; lex->tok_start=lex->tok_end=lex->ptr; @@ -630,16 +636,14 @@ int MYSQLlex(void *arg, void *yythd) break; } case MY_LEX_IDENT: - const uchar *start; + const char *start; #if defined(USE_MB) && defined(USE_MB_IDENT) if (use_mb(cs)) { result_state= IDENT_QUOTED; if (my_mbcharlen(cs, yyGetLast()) > 1) { - int l = my_ismbchar(cs, - (const char *)lex->ptr-1, - (const char *)lex->end_of_query); + int l = my_ismbchar(cs, lex->ptr-1, lex->end_of_query); if (l == 0) { state = MY_LEX_CHAR; continue; @@ -651,9 +655,7 @@ int MYSQLlex(void *arg, void *yythd) if (my_mbcharlen(cs, c) > 1) { int l; - if ((l = my_ismbchar(cs, - (const char *)lex->ptr-1, - (const char *)lex->end_of_query)) == 0) + if ((l = my_ismbchar(cs, lex->ptr-1, lex->end_of_query)) == 0) break; lex->ptr += l-1; } @@ -776,9 +778,7 @@ int MYSQLlex(void *arg, void *yythd) if (my_mbcharlen(cs, c) > 1) { int l; - if ((l = my_ismbchar(cs, - (const char *)lex->ptr-1, - (const char *)lex->end_of_query)) == 0) + if ((l = my_ismbchar(cs, lex->ptr-1, lex->end_of_query)) == 0) break; lex->ptr += l-1; } @@ -1112,7 +1112,7 @@ int MYSQLlex(void *arg, void *yythd) Pointer to the last non-comment symbol of the statement. */ -const uchar *skip_rear_comments(const uchar *begin, const uchar *end) +const char *skip_rear_comments(const char *begin, const char *end) { while (begin < end && (end[-1] <= ' ' || end[-1] == '*' || end[-1] == '/' || end[-1] == ';')) @@ -1201,7 +1201,6 @@ void st_select_lex::init_select() group_list.empty(); type= db= 0; having= 0; - use_index_ptr= ignore_index_ptr= 0; table_join_options= 0; in_sum_expr= with_wild= 0; options= 0; @@ -1209,7 +1208,6 @@ void st_select_lex::init_select() braces= 0; expr_list.empty(); interval_list.empty(); - use_index.empty(); ftfunc_list_alloc.empty(); inner_sum_func_list= 0; ftfunc_list= &ftfunc_list_alloc; @@ -1224,6 +1222,7 @@ void st_select_lex::init_select() is_correlated= 0; cur_pos_in_select_list= UNDEF_POS; non_agg_fields.empty(); + cond_value= having_value= Item::COND_UNDEF; inner_refs_list.empty(); } @@ -1435,14 +1434,11 @@ bool st_select_lex_node::inc_in_sum_expr() { return 1; } uint st_select_lex_node::get_in_sum_expr() { return 0; } TABLE_LIST* st_select_lex_node::get_table_list() { return 0; } List<Item>* st_select_lex_node::get_item_list() { return 0; } -List<String>* st_select_lex_node::get_use_index() { return 0; } -List<String>* st_select_lex_node::get_ignore_index() { return 0; } -TABLE_LIST *st_select_lex_node::add_table_to_list(THD *thd, Table_ident *table, +TABLE_LIST *st_select_lex_node::add_table_to_list (THD *thd, Table_ident *table, LEX_STRING *alias, ulong table_join_options, thr_lock_type flags, - List<String> *use_index, - List<String> *ignore_index, + List<index_hint> *hints, LEX_STRING *option) { return 0; @@ -1549,19 +1545,6 @@ List<Item>* st_select_lex::get_item_list() return &item_list; } - -List<String>* st_select_lex::get_use_index() -{ - return use_index_ptr; -} - - -List<String>* st_select_lex::get_ignore_index() -{ - return ignore_index_ptr; -} - - ulong st_select_lex::get_table_join_options() { return table_join_options; @@ -1852,6 +1835,7 @@ bool st_lex::can_use_merged() case SQLCOM_UPDATE_MULTI: case SQLCOM_DELETE: case SQLCOM_DELETE_MULTI: + case SQLCOM_TRUNCATE: case SQLCOM_INSERT: case SQLCOM_INSERT_SELECT: case SQLCOM_REPLACE: @@ -2093,31 +2077,6 @@ void st_lex::first_lists_tables_same() /* - Add implicitly used time zone description tables to global table list - (if needed). - - SYNOPSYS - st_lex::add_time_zone_tables_to_query_tables() - thd - pointer to current thread context - - RETURN VALUE - TRUE - error - FALSE - success -*/ - -bool st_lex::add_time_zone_tables_to_query_tables(THD *thd_arg) -{ - /* We should not add these tables twice */ - if (!time_zone_tables_used) - { - time_zone_tables_used= my_tz_get_table_list(thd_arg, &query_tables_last); - if (time_zone_tables_used == &fake_time_zone_tables_list) - return TRUE; - } - return FALSE; -} - -/* Link table back that was unlinked with unlink_first_table() SYNOPSIS @@ -2186,7 +2145,6 @@ void st_lex::cleanup_after_one_table_open() /* remove underlying units (units of VIEW) subtree */ select_lex.cut_subtree(); } - time_zone_tables_used= 0; } @@ -2327,3 +2285,61 @@ void st_select_lex::fix_prepare_information(THD *thd, Item **conds, are in sql_union.cc */ +/* + Sets the kind of hints to be added by the calls to add_index_hint(). + + SYNOPSIS + set_index_hint_type() + type the kind of hints to be added from now on. + clause the clause to use for hints to be added from now on. + + DESCRIPTION + Used in filling up the tagged hints list. + This list is filled by first setting the kind of the hint as a + context variable and then adding hints of the current kind. + Then the context variable index_hint_type can be reset to the + next hint type. +*/ +void st_select_lex::set_index_hint_type(enum index_hint_type type, + index_clause_map clause) +{ + current_index_hint_type= type; + current_index_hint_clause= clause; +} + + +/* + Makes an array to store index usage hints (ADD/FORCE/IGNORE INDEX). + + SYNOPSIS + alloc_index_hints() + thd current thread. +*/ + +void st_select_lex::alloc_index_hints (THD *thd) +{ + index_hints= new (thd->mem_root) List<index_hint>(); +} + + + +/* + adds an element to the array storing index usage hints + (ADD/FORCE/IGNORE INDEX). + + SYNOPSIS + add_index_hint() + thd current thread. + str name of the index. + length number of characters in str. + + RETURN VALUE + 0 on success, non-zero otherwise +*/ +bool st_select_lex::add_index_hint (THD *thd, char *str, uint length) +{ + return index_hints->push_front (new (thd->mem_root) + index_hint(current_index_hint_type, + current_index_hint_clause, + str, length)); +} diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 33d028c829e..1175776ffd3 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -188,12 +188,12 @@ typedef struct st_lex_master_info uint port, connect_retry; ulonglong pos; ulong server_id; - /* - Variable for MASTER_SSL option. - MASTER_SSL=0 in CHANGE MASTER TO corresponds to SSL_DISABLE - MASTER_SSL=1 corresponds to SSL_ENABLE - */ - enum {SSL_UNCHANGED=0, SSL_DISABLE, SSL_ENABLE} ssl; + /* + Enum is used for making it possible to detect if the user + changed variable or if it should be left at old value + */ + enum {SSL_UNCHANGED, SSL_DISABLE, SSL_ENABLE} + ssl, ssl_verify_server_cert; char *ssl_key, *ssl_cert, *ssl_ca, *ssl_capath, *ssl_cipher; char *relay_log_name; ulong relay_log_pos; @@ -217,6 +217,47 @@ enum tablespace_op_type }; /* + String names used to print a statement with index hints. + Keep in sync with index_hint_type. +*/ +extern const char * index_hint_type_name[]; +typedef byte index_clause_map; + +/* + Bits in index_clause_map : one for each possible FOR clause in + USE/FORCE/IGNORE INDEX index hint specification +*/ +#define INDEX_HINT_MASK_JOIN (1) +#define INDEX_HINT_MASK_GROUP (1 << 1) +#define INDEX_HINT_MASK_ORDER (1 << 2) + +#define INDEX_HINT_MASK_ALL (INDEX_HINT_MASK_JOIN | INDEX_HINT_MASK_GROUP | \ + INDEX_HINT_MASK_ORDER) + +/* Single element of an USE/FORCE/IGNORE INDEX list specified as a SQL hint */ +class index_hint : public Sql_alloc +{ +public: + /* The type of the hint : USE/FORCE/IGNORE */ + enum index_hint_type type; + /* Where the hit applies to. A bitmask of INDEX_HINT_MASK_<place> values */ + index_clause_map clause; + /* + The index name. Empty (str=NULL) name represents an empty list + USE INDEX () clause + */ + LEX_STRING key_name; + + index_hint (enum index_hint_type type_arg, index_clause_map clause_arg, + char *str, uint length) : + type(type_arg), clause(clause_arg) + { + key_name.str= str; + key_name.length= length; + } +}; + +/* The state of the lex parsing for selects master and slaves are pointers to select_lex. @@ -394,15 +435,12 @@ public: virtual uint get_in_sum_expr(); virtual TABLE_LIST* get_table_list(); virtual List<Item>* get_item_list(); - virtual List<String>* get_use_index(); - virtual List<String>* get_ignore_index(); virtual ulong get_table_join_options(); virtual TABLE_LIST *add_table_to_list(THD *thd, Table_ident *table, LEX_STRING *alias, ulong table_options, thr_lock_type flags= TL_UNLOCK, - List<String> *use_index= 0, - List<String> *ignore_index= 0, + List<index_hint> *hints= 0, LEX_STRING *option= 0); virtual void set_lock_for_tables(thr_lock_type lock_type) {} @@ -503,8 +541,9 @@ public: bool change_result(select_subselect *result, select_subselect *old_result); void set_limit(st_select_lex *values); void set_thd(THD *thd_arg) { thd= thd_arg; } + inline bool is_union (); - friend void lex_start(THD *thd, const uchar *buf, uint length); + friend void lex_start(THD *thd, const char *buf, uint length); friend int subselect_union_engine::exec(); List<Item> *get_unit_column_types(); @@ -523,6 +562,8 @@ public: Item *where, *having; /* WHERE & HAVING clauses */ Item *prep_where; /* saved WHERE clause for prepared statement processing */ Item *prep_having;/* saved HAVING clause for prepared statement processing */ + /* Saved values of the WHERE and HAVING clauses*/ + Item::cond_result cond_value, having_value; /* point on lex in which it was created, used in view subquery detection */ st_lex *parent_lex; enum olap_type olap; @@ -530,8 +571,7 @@ public: SQL_LIST table_list; SQL_LIST group_list; /* GROUP BY clause. */ List<Item> item_list; /* list of fields & expressions */ - List<String> interval_list, use_index, *use_index_ptr, - ignore_index, *ignore_index_ptr; + List<String> interval_list; bool is_item_list_lookup; /* Usualy it is pointer to ftfunc_list_alloc, but in union used to create fake @@ -678,8 +718,7 @@ public: LEX_STRING *alias, ulong table_options, thr_lock_type flags= TL_UNLOCK, - List<String> *use_index= 0, - List<String> *ignore_index= 0, + List<index_hint> *hints= 0, LEX_STRING *option= 0); TABLE_LIST* get_table_list(); bool init_nested_join(THD *thd); @@ -688,8 +727,6 @@ public: void add_joined_table(TABLE_LIST *table); TABLE_LIST *convert_right_join(); List<Item>* get_item_list(); - List<String>* get_use_index(); - List<String>* get_ignore_index(); ulong get_table_join_options(); void set_lock_for_tables(thr_lock_type lock_type); inline void init_order() @@ -707,7 +744,7 @@ public: void cut_subtree() { slave= 0; } bool test_limit(); - friend void lex_start(THD *thd, const uchar *buf, uint length); + friend void lex_start(THD *thd, const char *buf, uint length); st_select_lex() : n_sum_items(0), n_child_sum_items(0) {} void make_empty_select() { @@ -729,9 +766,43 @@ public: select lexes. */ void cleanup_all_joins(bool full); + + void set_index_hint_type(enum index_hint_type type, index_clause_map clause); + + /* + Add a index hint to the tagged list of hints. The type and clause of the + hint will be the current ones (set by set_index_hint()) + */ + bool add_index_hint (THD *thd, char *str, uint length); + + /* make a list to hold index hints */ + void alloc_index_hints (THD *thd); + /* read and clear the index hints */ + List<index_hint>* pop_index_hints(void) + { + List<index_hint> *hints= index_hints; + index_hints= NULL; + return hints; + } + + void clear_index_hints(void) { index_hints= NULL; } + +private: + /* current index hint kind. used in filling up index_hints */ + enum index_hint_type current_index_hint_type; + index_clause_map current_index_hint_clause; + /* a list of USE/FORCE/IGNORE INDEX */ + List<index_hint> *index_hints; }; typedef class st_select_lex SELECT_LEX; + +inline bool st_select_lex_unit::is_union () +{ + return first_select()->next_select() && + first_select()->next_select()->linkage == UNION_TYPE; +} + #define ALTER_ADD_COLUMN (1L << 0) #define ALTER_DROP_COLUMN (1L << 1) #define ALTER_CHANGE_COLUMN (1L << 2) @@ -933,11 +1004,11 @@ typedef struct st_lex : public Query_tables_list SELECT_LEX *current_select; /* list of all SELECT_LEX */ SELECT_LEX *all_selects_list; - const uchar *buf; /* The beginning of string, used by SPs */ - const uchar *ptr,*tok_start,*tok_end,*end_of_query; + const char *buf; /* The beginning of string, used by SPs */ + const char *ptr,*tok_start,*tok_end,*end_of_query; - /* The values of tok_start/tok_end as they were one call of MYSQLlex before */ - const uchar *tok_start_prev, *tok_end_prev; + /* The value of tok_start as they were one call of MYSQLlex before */ + const char *tok_start_prev; char *length,*dec,*change; LEX_STRING name; @@ -1092,11 +1163,6 @@ typedef struct st_lex : public Query_tables_list bool prepared_stmt_code_is_varref; /* Names of user variables holding parameters (in EXECUTE) */ List<LEX_STRING> prepared_stmt_params; - /* - Points to part of global table list which contains time zone tables - implicitly used by the statement. - */ - TABLE_LIST *time_zone_tables_used; sp_head *sphead; sp_name *spname; bool sp_lex_in_use; /* Keep track on lex usage in SPs for error handling */ @@ -1144,7 +1210,7 @@ typedef struct st_lex : public Query_tables_list Pointers to part of LOAD DATA statement that should be rewritten during replication ("LOCAL 'filename' REPLACE INTO" part). */ - const uchar *fname_start, *fname_end; + const char *fname_start, *fname_end; /* Reference to a struct that contains information in various commands @@ -1183,7 +1249,6 @@ typedef struct st_lex : public Query_tables_list TABLE_LIST *unlink_first_table(bool *link_to_local); void link_first_table_back(TABLE_LIST *first, bool link_to_local); void first_lists_tables_same(); - bool add_time_zone_tables_to_query_tables(THD *thd); bool can_be_merged(); bool can_use_merged(); @@ -1262,10 +1327,10 @@ struct st_lex_local: public st_lex extern void lex_init(void); extern void lex_free(void); -extern void lex_start(THD *thd, const uchar *buf, uint length); +extern void lex_start(THD *thd, const char *buf, uint length); extern void lex_end(LEX *lex); extern int MYSQLlex(void *arg, void *yythd); -extern const uchar *skip_rear_comments(const uchar *ubegin, const uchar *uend); +extern const char *skip_rear_comments(const char *ubegin, const char *uend); extern bool is_lex_native_function(const LEX_STRING *name); diff --git a/sql/sql_list.h b/sql/sql_list.h index af30cbe0d6a..ba61a931e04 100644 --- a/sql/sql_list.h +++ b/sql/sql_list.h @@ -23,7 +23,7 @@ class Sql_alloc { public: - static void *operator new(size_t size) + static void *operator new(size_t size) throw () { return (void*) sql_alloc((uint) size); } @@ -31,9 +31,9 @@ public: { return (void*) sql_alloc((uint) size); } - static void *operator new[](size_t size, MEM_ROOT *mem_root) + static void *operator new[](size_t size, MEM_ROOT *mem_root) throw () { return (void*) alloc_root(mem_root, (uint) size); } - static void *operator new(size_t size, MEM_ROOT *mem_root) + static void *operator new(size_t size, MEM_ROOT *mem_root) throw () { return (void*) alloc_root(mem_root, (uint) size); } static void operator delete(void *ptr, size_t size) { TRASH(ptr, size); } static void operator delete(void *ptr, MEM_ROOT *mem_root) diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 09066e875bd..71cc4c0507c 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -175,7 +175,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, table is marked to be 'used for insert' in which case we should never mark this table as 'const table' (ie, one that has only one row). */ - if (unique_table(thd, table_list, table_list->next_global)) + if (unique_table(thd, table_list, table_list->next_global, 0)) { my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name); DBUG_RETURN(TRUE); @@ -226,7 +226,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, DBUG_RETURN(TRUE); } - table->mark_columns_needed_for_insert(); + prepare_triggers_for_insert_stmt(table); uint tot_length=0; bool use_blobs= 0, use_vars= 0; @@ -376,7 +376,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, table->file->ha_start_bulk_insert((ha_rows) 0); table->copy_blobs=1; - thd->no_trans_update= 0; + thd->no_trans_update.stmt= FALSE; thd->abort_on_warning= (!ignore && (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | @@ -470,7 +470,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, (ulong) (info.records - info.copied), (ulong) thd->cuted_fields); if (!transactional_table) - thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; + thd->no_trans_update.all= TRUE; #ifndef EMBEDDED_LIBRARY if (mysql_bin_log.is_open()) { @@ -512,6 +512,7 @@ err: mysql_unlock_tables(thd, thd->lock); thd->lock=0; } + table->auto_increment_field_not_null= FALSE; thd->abort_on_warning= 0; DBUG_RETURN(error); } @@ -551,7 +552,7 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, Item_field *sql_field; TABLE *table= table_list->table; ulonglong id; - bool no_trans_update; + bool no_trans_update_stmt, err; DBUG_ENTER("read_fixed_length"); id= 0; @@ -579,7 +580,7 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, #ifdef HAVE_purify read_info.row_end[0]=0; #endif - no_trans_update= !table->file->has_transactions(); + no_trans_update_stmt= !table->file->has_transactions(); restore_record(table, s->default_values); /* @@ -609,8 +610,6 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, { uint length; byte save_chr; - if (field == table->next_number_field) - table->auto_increment_field_not_null= TRUE; if ((length=(uint) (read_info.row_end-pos)) > field->field_length) length=field->field_length; @@ -645,9 +644,11 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, DBUG_RETURN(-1); } - if (write_record(thd, table, &info)) + err= write_record(thd, table, &info); + table->auto_increment_field_not_null= FALSE; + if (err) DBUG_RETURN(1); - thd->no_trans_update= no_trans_update; + thd->no_trans_update.stmt= no_trans_update_stmt; /* We don't need to reset auto-increment field since we are restoring @@ -682,12 +683,12 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, TABLE *table= table_list->table; uint enclosed_length; ulonglong id; - bool no_trans_update; + bool no_trans_update_stmt, err; DBUG_ENTER("read_sep_field"); enclosed_length=enclosed.length(); id= 0; - no_trans_update= !table->file->has_transactions(); + no_trans_update_stmt= !table->file->has_transactions(); for (;;it.rewind()) { @@ -729,8 +730,6 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, DBUG_RETURN(1); } field->set_null(); - if (field == table->next_number_field) - table->auto_increment_field_not_null= TRUE; if (!field->maybe_null()) { if (field->type() == MYSQL_TYPE_TIMESTAMP) @@ -816,14 +815,15 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, DBUG_RETURN(-1); } - - if (write_record(thd, table, &info)) + err= write_record(thd, table, &info); + table->auto_increment_field_not_null= FALSE; + if (err) DBUG_RETURN(1); /* We don't need to reset auto-increment field since we are restoring its default value at the beginning of each loop iteration. */ - thd->no_trans_update= no_trans_update; + thd->no_trans_update.stmt= no_trans_update_stmt; if (read_info.next_line()) // Skip to next line break; if (read_info.line_cuted) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 33209b6e023..ec0dcb7e761 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -26,7 +26,6 @@ #include "sp.h" #include "sp_cache.h" #include "events.h" -#include "event_data_objects.h" #include "sql_trigger.h" /* Used in error handling only */ @@ -119,8 +118,8 @@ bool end_active_trans(THD *thd) if (ha_commit(thd)) error=1; } - thd->options&= ~(OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE | - OPTION_KEEP_LOG); + thd->options&= ~(OPTION_BEGIN | OPTION_KEEP_LOG); + thd->no_trans_update.all= FALSE; DBUG_RETURN(error); } @@ -365,6 +364,11 @@ pthread_handler_t handle_bootstrap(void *arg) buff[length-1] == ';')) length--; buff[length]=0; + + /* Skip lines starting with delimiter */ + if (strncmp(buff, STRING_WITH_LEN("delimiter")) == 0) + continue; + thd->query_length=length; thd->query= thd->memdup_w_gap(buff, length+1, thd->db_length+1+QUERY_CACHE_FLAGS_SIZE); @@ -546,8 +550,8 @@ int end_trans(THD *thd, enum enum_mysql_completiontype completion) */ thd->server_status&= ~SERVER_STATUS_IN_TRANS; res= ha_commit(thd); - thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE | - OPTION_KEEP_LOG); + thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_KEEP_LOG); + thd->no_trans_update.all= FALSE; break; case COMMIT_RELEASE: do_release= 1; /* fall through */ @@ -564,8 +568,8 @@ int end_trans(THD *thd, enum enum_mysql_completiontype completion) thd->server_status&= ~SERVER_STATUS_IN_TRANS; if (ha_rollback(thd)) res= -1; - thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE | - OPTION_KEEP_LOG); + thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_KEEP_LOG); + thd->no_trans_update.all= FALSE; if (!res && (completion == ROLLBACK_AND_CHAIN)) res= begin_trans(thd); break; @@ -689,7 +693,10 @@ bool dispatch_command(enum enum_server_command command, THD *thd, DBUG_ENTER("dispatch_command"); if (thd->killed == THD::KILL_QUERY || thd->killed == THD::KILL_BAD_DATA) + { thd->killed= THD::NOT_KILLED; + thd->mysys_var->abort= 0; + } thd->command=command; /* @@ -717,7 +724,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, &LOCK_status); thd->convert_string(&tmp, system_charset_info, packet, packet_length-1, thd->charset()); - if (!mysql_change_db(thd, tmp.str, FALSE)) + if (!mysql_change_db(thd, &tmp, FALSE)) { general_log_print(thd, command, "%s",thd->db); send_ok(thd); @@ -782,7 +789,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, char *save_db; uint passwd_len= (thd->client_capabilities & CLIENT_SECURE_CONNECTION ? *passwd++ : strlen(passwd)); - uint dummy_errors, save_db_length, db_length, res; + uint dummy_errors, save_db_length, db_length; + int res; Security_context save_security_ctx= *thd->security_ctx; USER_CONN *save_user_connect; @@ -829,6 +837,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, /* authentication failure, we shall restore old user */ if (res > 0) my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0)); + else + thd->clear_error(); // Error already sent to client x_free(thd->security_ctx->user); *thd->security_ctx= save_security_ctx; thd->user_connect= save_user_connect; @@ -958,7 +968,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, packet= arg_end + 1; if (!my_strcasecmp(system_charset_info, table_list.db, - information_schema_name.str)) + INFORMATION_SCHEMA_NAME.str)) { ST_SCHEMA_TABLE *schema_table= find_schema_table(thd, table_list.alias); if (schema_table) @@ -980,7 +990,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, break; /* init structures for VIEW processing */ table_list.select_lex= &(thd->lex->select_lex); - mysql_init_query(thd, (uchar*)"", 0); + mysql_init_query(thd, "", 0); thd->lex-> select_lex.table_list.link_in_list((byte*) &table_list, (byte**) &table_list.next_local); @@ -1138,6 +1148,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, STATUS_VAR current_global_status_var; ulong uptime; uint length; + ulonglong queries_per_second1000; #ifndef EMBEDDED_LIBRARY char buff[250]; uint buff_len= sizeof(buff); @@ -1150,19 +1161,23 @@ bool dispatch_command(enum enum_server_command command, THD *thd, statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_STATUS], &LOCK_status); calc_sum_of_all_status(¤t_global_status_var); - uptime= (ulong) (thd->start_time - server_start_time); + if (!(uptime= (ulong) (thd->start_time - server_start_time))) + queries_per_second1000= 0; + else + queries_per_second1000= thd->query_id * LL(1000) / uptime; + length= my_snprintf((char*) buff, buff_len - 1, "Uptime: %lu Threads: %d Questions: %lu " "Slow queries: %lu Opens: %lu Flush tables: %lu " - "Open tables: %u Queries per second avg: %.3f", + "Open tables: %u Queries per second avg: %u.%u", uptime, (int) thread_count, (ulong) thd->query_id, current_global_status_var.long_query_count, current_global_status_var.opened_tables, refresh_version, cached_open_tables(), - (uptime ? (ulonglong2double(thd->query_id) / - (double) uptime) : (double) 0)); + (uint) (queries_per_second1000 / 1000), + (uint) (queries_per_second1000 % 1000)); #ifdef SAFEMALLOC if (sf_malloc_cur_memory) // Using SAFEMALLOC { @@ -1348,8 +1363,9 @@ void log_slow_statement(THD *thd) int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, enum enum_schema_tables schema_table_idx) { + SELECT_LEX *schema_select_lex= NULL; DBUG_ENTER("prepare_schema_table"); - SELECT_LEX *sel= 0; + switch (schema_table_idx) { case SCH_SCHEMATA: #if defined(DONT_ALLOW_SHOW_COMMANDS) @@ -1357,11 +1373,9 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, ER(ER_NOT_ALLOWED_COMMAND), MYF(0)); /* purecov: inspected */ DBUG_RETURN(1); #else - if ((specialflag & SPECIAL_SKIP_SHOW_DB) && - check_global_access(thd, SHOW_DB_ACL)) - DBUG_RETURN(1); break; #endif + case SCH_TABLE_NAMES: case SCH_TABLES: case SCH_VIEWS: @@ -1380,55 +1394,38 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, { DBUG_RETURN(1); } - db.str= lex->select_lex.db; + schema_select_lex= new SELECT_LEX(); + db.str= schema_select_lex->db= lex->select_lex.db; + schema_select_lex->table_list.first= NULL; db.length= strlen(db.str); + if (check_db_name(&db)) { my_error(ER_WRONG_DB_NAME, MYF(0), db.str); DBUG_RETURN(1); } - if (check_access(thd, SELECT_ACL, db.str, &thd->col_access, 0, 0, - is_schema_db(db.str))) - DBUG_RETURN(1); /* purecov: inspected */ - if (!thd->col_access && check_grant_db(thd, db.str)) - { - my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), - thd->security_ctx->priv_user, thd->security_ctx->priv_host, - db.str); - DBUG_RETURN(1); - } break; } #endif case SCH_COLUMNS: case SCH_STATISTICS: + { #ifdef DONT_ALLOW_SHOW_COMMANDS my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND), MYF(0)); /* purecov: inspected */ DBUG_RETURN(1); #else - if (table_ident) - { - TABLE_LIST **query_tables_last= lex->query_tables_last; - sel= new SELECT_LEX(); - /* 'parent_lex' is used in init_query() so it must be before it. */ - sel->parent_lex= lex; - sel->init_query(); - if (!sel->add_table_to_list(thd, table_ident, 0, 0, TL_READ, - (List<String> *) 0, (List<String> *) 0)) - DBUG_RETURN(1); - lex->query_tables_last= query_tables_last; - TABLE_LIST *table_list= (TABLE_LIST*) sel->table_list.first; - char *db= table_list->db; - if (check_access(thd,SELECT_ACL | EXTRA_ACL,db, - &table_list->grant.privilege, 0, 0, - test(table_list->schema_table))) - DBUG_RETURN(1); /* purecov: inspected */ - if (grant_option && check_grant(thd, SELECT_ACL, table_list, 2, - UINT_MAX, 0)) - DBUG_RETURN(1); - break; - } + DBUG_ASSERT(table_ident); + TABLE_LIST **query_tables_last= lex->query_tables_last; + schema_select_lex= new SELECT_LEX(); + /* 'parent_lex' is used in init_query() so it must be before it. */ + schema_select_lex->parent_lex= lex; + schema_select_lex->init_query(); + if (!schema_select_lex->add_table_to_list(thd, table_ident, 0, 0, TL_READ)) + DBUG_RETURN(1); + lex->query_tables_last= query_tables_last; + break; + } #endif case SCH_OPEN_TABLES: case SCH_VARIABLES: @@ -1454,7 +1451,7 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, DBUG_RETURN(1); } TABLE_LIST *table_list= (TABLE_LIST*) select_lex->table_list.first; - table_list->schema_select_lex= sel; + table_list->schema_select_lex= schema_select_lex; table_list->schema_table_reformed= 1; DBUG_RETURN(0); } @@ -1687,8 +1684,7 @@ mysql_execute_command(THD *thd) Don't reset warnings when executing a stored routine. */ if ((all_tables || &lex->select_lex != lex->all_selects_list || - lex->sroutines.records) && !thd->spcont || - lex->time_zone_tables_used) + lex->sroutines.records) && !thd->spcont) mysql_reset_errors(thd, 0); #ifdef HAVE_REPLICATION @@ -2171,7 +2167,7 @@ mysql_execute_command(THD *thd) if (!(lex->create_info.options & HA_LEX_CREATE_TMP_TABLE)) { TABLE_LIST *duplicate; - if ((duplicate= unique_table(thd, create_table, select_tables))) + if ((duplicate= unique_table(thd, create_table, select_tables, 0))) { update_non_unique_table_error(create_table, "CREATE", duplicate); res= 1; @@ -2187,7 +2183,7 @@ mysql_execute_command(THD *thd) tab= tab->next_local) { TABLE_LIST *duplicate; - if ((duplicate= unique_table(thd, tab, select_tables))) + if ((duplicate= unique_table(thd, tab, select_tables, 0))) { update_non_unique_table_error(tab, "CREATE", duplicate); res= 1; @@ -2594,6 +2590,36 @@ end_with_restore_list: break; } case SQLCOM_REPLACE: +#ifndef DBUG_OFF + if (mysql_bin_log.is_open()) + { + /* + Generate an incident log event before writing the real event + to the binary log. We put this event is before the statement + since that makes it simpler to check that the statement was + not executed on the slave (since incidents usually stop the + slave). + + Observe that any row events that are generated will be + generated before. + + This is only for testing purposes and will not be present in a + release build. + */ + + Incident incident= INCIDENT_NONE; + DBUG_PRINT("debug", ("Just before generate_incident()")); + DBUG_EXECUTE_IF("incident_database_resync_on_replace", + incident= INCIDENT_LOST_EVENTS;); + if (incident) + { + Incident_log_event ev(thd, incident); + mysql_bin_log.write(&ev); + mysql_bin_log.rotate_and_purge(RP_FORCE_ROTATE); + } + DBUG_PRINT("debug", ("Just after generate_incident()")); + } +#endif case SQLCOM_INSERT: { DBUG_ASSERT(first_table == all_tables && first_table != 0); @@ -2813,7 +2839,7 @@ end_with_restore_list: we silently add IF EXISTS if TEMPORARY was used. */ if (thd->slave_thread) - lex->drop_if_exists= 1; + lex->drop_if_exists= 1; /* So that DROP TEMPORARY TABLE gets to binlog at commit/rollback */ thd->options|= OPTION_KEEP_LOG; @@ -2870,9 +2896,14 @@ end_with_restore_list: } #endif case SQLCOM_CHANGE_DB: - if (!mysql_change_db(thd,select_lex->db,FALSE)) + { + LEX_STRING db_str= { (char *) select_lex->db, strlen(select_lex->db) }; + + if (!mysql_change_db(thd, &db_str, FALSE)) send_ok(thd); + break; + } case SQLCOM_LOAD: { @@ -3150,13 +3181,16 @@ end_with_restore_list: switch (lex->sql_command) { case SQLCOM_CREATE_EVENT: - res= Events::get_instance()-> - create_event(thd, lex->event_parse_data, - lex->create_info.options & HA_LEX_CREATE_IF_NOT_EXISTS); + { + bool if_not_exists= (lex->create_info.options & + HA_LEX_CREATE_IF_NOT_EXISTS); + res= Events::create_event(thd, lex->event_parse_data, if_not_exists); break; + } case SQLCOM_ALTER_EVENT: - res= Events::get_instance()->update_event(thd, lex->event_parse_data, - lex->spname); + res= Events::update_event(thd, lex->event_parse_data, + lex->spname ? &lex->spname->m_db : NULL, + lex->spname ? &lex->spname->m_name : NULL); break; default: DBUG_ASSERT(0); @@ -3174,39 +3208,16 @@ end_with_restore_list: } /* lex->unit.cleanup() is called outside, no need to call it here */ break; - case SQLCOM_DROP_EVENT: case SQLCOM_SHOW_CREATE_EVENT: - { - DBUG_ASSERT(lex->spname); - if (! lex->spname->m_db.str) - { - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); - goto error; - } - if (check_access(thd, EVENT_ACL, lex->spname->m_db.str, 0, 0, 0, - is_schema_db(lex->spname->m_db.str))) - break; - - if (lex->spname->m_name.length > NAME_LEN) - { - my_error(ER_TOO_LONG_IDENT, MYF(0), lex->spname->m_name.str); - /* this jumps to the end of the function and skips own messaging */ - goto error; - } - - if (lex->sql_command == SQLCOM_SHOW_CREATE_EVENT) - res= Events::get_instance()->show_create_event(thd, lex->spname->m_db, - lex->spname->m_name); - else - { - if (!(res= Events::get_instance()->drop_event(thd, - lex->spname->m_db, - lex->spname->m_name, - lex->drop_if_exists))) - send_ok(thd); - } + res= Events::show_create_event(thd, lex->spname->m_db, + lex->spname->m_name); + break; + case SQLCOM_DROP_EVENT: + if (!(res= Events::drop_event(thd, + lex->spname->m_db, lex->spname->m_name, + lex->drop_if_exists))) + send_ok(thd); break; - } case SQLCOM_CREATE_FUNCTION: // UDF function { if (check_access(thd,INSERT_ACL,"mysql",0,1,0,0)) @@ -3535,8 +3546,7 @@ end_with_restore_list: res= TRUE; // cannot happen else { - if ((thd->options & - (OPTION_STATUS_NO_TRANS_UPDATE | OPTION_KEEP_LOG)) && + if (((thd->options & OPTION_KEEP_LOG) || thd->no_trans_update.all) && !thd->slave_thread) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARNING_NOT_COMPLETE_ROLLBACK, @@ -3922,7 +3932,6 @@ create_sp_error: if (check_access(thd, DELETE_ACL, "mysql", 0, 1, 0, 0)) goto error; - /* Does NOT write to binlog */ if (!(res = mysql_drop_function(thd, &lex->spname->m_name))) { send_ok(thd); @@ -3966,11 +3975,6 @@ create_sp_error: } case SQLCOM_SHOW_CREATE_PROC: { - if (lex->spname->m_name.length > NAME_LEN) - { - my_error(ER_TOO_LONG_IDENT, MYF(0), lex->spname->m_name.str); - goto error; - } if (sp_show_create_procedure(thd, lex->spname) != SP_OK) { /* We don't distinguish between errors for now */ my_error(ER_SP_DOES_NOT_EXIST, MYF(0), @@ -3981,11 +3985,6 @@ create_sp_error: } case SQLCOM_SHOW_CREATE_FUNC: { - if (lex->spname->m_name.length > NAME_LEN) - { - my_error(ER_TOO_LONG_IDENT, MYF(0), lex->spname->m_name.str); - goto error; - } if (sp_show_create_function(thd, lex->spname) != SP_OK) { /* We don't distinguish between errors for now */ my_error(ER_SP_DOES_NOT_EXIST, MYF(0), @@ -4014,11 +4013,6 @@ create_sp_error: { sp_head *sp; - if (lex->spname->m_name.length > NAME_LEN) - { - my_error(ER_TOO_LONG_IDENT, MYF(0), lex->spname->m_name.str); - goto error; - } if (lex->sql_command == SQLCOM_SHOW_PROC_CODE) sp= sp_find_routine(thd, TYPE_ENUM_PROCEDURE, lex->spname, &thd->sp_proc_cache, FALSE); @@ -4109,9 +4103,8 @@ create_sp_error: thd->transaction.xid_state.xa_state=XA_ACTIVE; thd->transaction.xid_state.xid.set(thd->lex->xid); xid_cache_insert(&thd->transaction.xid_state); - thd->options= ((thd->options & ~(OPTION_STATUS_NO_TRANS_UPDATE | - OPTION_KEEP_LOG)) | - OPTION_BEGIN); + thd->options= ((thd->options & ~(OPTION_KEEP_LOG)) | OPTION_BEGIN); + thd->no_trans_update.all= FALSE; thd->server_status|= SERVER_STATUS_IN_TRANS; send_ok(thd); break; @@ -4204,8 +4197,8 @@ create_sp_error: xa_state_names[thd->transaction.xid_state.xa_state]); break; } - thd->options&= ~(OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE | - OPTION_KEEP_LOG); + thd->options&= ~(OPTION_BEGIN | OPTION_KEEP_LOG); + thd->no_trans_update.all= FALSE; thd->server_status&= ~SERVER_STATUS_IN_TRANS; xid_cache_delete(&thd->transaction.xid_state); thd->transaction.xid_state.xa_state=XA_NOTR; @@ -4235,8 +4228,8 @@ create_sp_error: my_error(ER_XAER_RMERR, MYF(0)); else send_ok(thd); - thd->options&= ~(OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE | - OPTION_KEEP_LOG); + thd->options&= ~(OPTION_BEGIN | OPTION_KEEP_LOG); + thd->no_trans_update.all= FALSE; thd->server_status&= ~SERVER_STATUS_IN_TRANS; xid_cache_delete(&thd->transaction.xid_state); thd->transaction.xid_state.xa_state=XA_NOTR; @@ -4273,6 +4266,10 @@ create_sp_error: int error; LEX *lex= thd->lex; DBUG_PRINT("info", ("case SQLCOM_CREATE_SERVER")); + + if (check_global_access(thd, SUPER_ACL)) + break; + if ((error= create_server(thd, &lex->server_options))) { DBUG_PRINT("info", ("problem creating server <%s>", @@ -4288,6 +4285,10 @@ create_sp_error: int error; LEX *lex= thd->lex; DBUG_PRINT("info", ("case SQLCOM_ALTER_SERVER")); + + if (check_global_access(thd, SUPER_ACL)) + break; + if ((error= alter_server(thd, &lex->server_options))) { DBUG_PRINT("info", ("problem altering server <%s>", @@ -4303,9 +4304,13 @@ create_sp_error: int err_code; LEX *lex= thd->lex; DBUG_PRINT("info", ("case SQLCOM_DROP_SERVER")); + + if (check_global_access(thd, SUPER_ACL)) + break; + if ((err_code= drop_server(thd, &lex->server_options))) { - if (! lex->drop_if_exists && err_code == ER_FOREIGN_SERVER_EXISTS) + if (! lex->drop_if_exists && err_code == ER_FOREIGN_SERVER_DOESNT_EXIST) { DBUG_PRINT("info", ("problem dropping server %s", lex->server_options.server_name)); @@ -4431,6 +4436,8 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables) thd Thread handler privilege requested privilege all_tables global table list of query + no_errors FALSE/TRUE - report/don't report error to + the client (using my_error() call). RETURN 0 - OK @@ -4438,7 +4445,7 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables) */ bool check_single_table_access(THD *thd, ulong privilege, - TABLE_LIST *all_tables) + TABLE_LIST *all_tables, bool no_errors) { Security_context * backup_ctx= thd->security_ctx; @@ -4454,12 +4461,15 @@ bool check_single_table_access(THD *thd, ulong privilege, db_name= all_tables->db; if (check_access(thd, privilege, db_name, - &all_tables->grant.privilege, 0, 0, + &all_tables->grant.privilege, 0, no_errors, test(all_tables->schema_table))) goto deny; /* Show only 1 table for check_grant */ - if (grant_option && check_grant(thd, privilege, all_tables, 0, 1, 0)) + if (grant_option && + !(all_tables->belong_to_view && + (thd->lex->sql_command == SQLCOM_SHOW_FIELDS)) && + check_grant(thd, privilege, all_tables, 0, 1, no_errors)) goto deny; thd->security_ctx= backup_ctx; @@ -4487,7 +4497,7 @@ deny: bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *all_tables) { - if (check_single_table_access (thd,privilege,all_tables)) + if (check_single_table_access (thd,privilege,all_tables, FALSE)) return 1; /* Check rights on tables of subselects and implictly opened tables */ @@ -4500,7 +4510,7 @@ bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *all_tables) */ if (view && subselects_tables->belong_to_view == view) { - if (check_single_table_access (thd, privilege, subselects_tables)) + if (check_single_table_access (thd, privilege, subselects_tables, FALSE)) return 1; subselects_tables= subselects_tables->next_global; } @@ -4676,6 +4686,66 @@ bool check_global_access(THD *thd, ulong want_access) } +static bool check_show_access(THD *thd, TABLE_LIST *table) +{ + switch (get_schema_table_idx(table->schema_table)) { + case SCH_SCHEMATA: + return (specialflag & SPECIAL_SKIP_SHOW_DB) && + check_global_access(thd, SHOW_DB_ACL); + + case SCH_TABLE_NAMES: + case SCH_TABLES: + case SCH_VIEWS: + case SCH_TRIGGERS: + case SCH_EVENTS: + { + const char *dst_db_name= table->schema_select_lex->db; + + DBUG_ASSERT(dst_db_name); + + if (check_access(thd, SELECT_ACL, dst_db_name, + &thd->col_access, FALSE, FALSE, + is_schema_db(dst_db_name))) + return TRUE; + + if (!thd->col_access && check_grant_db(thd, dst_db_name)) + { + my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), + thd->security_ctx->priv_user, + thd->security_ctx->priv_host, + dst_db_name); + return TRUE; + } + + return FALSE; + } + + case SCH_COLUMNS: + case SCH_STATISTICS: + { + TABLE_LIST *dst_table; + dst_table= (TABLE_LIST *) table->schema_select_lex->table_list.first; + + DBUG_ASSERT(dst_table); + + if (check_access(thd, SELECT_ACL | EXTRA_ACL, + dst_table->db, + &dst_table->grant.privilege, + FALSE, FALSE, + test(dst_table->schema_table))) + return FALSE; + + return (grant_option && + check_grant(thd, SELECT_ACL, dst_table, 2, UINT_MAX, FALSE)); + } + default: + break; + } + + return FALSE; +} + + /* Check the privilege for all used tables. @@ -4705,7 +4775,7 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables, { uint found=0; ulong found_access=0; -#ifndef EMBEDDED_LIBRARY +#ifndef NO_EMBEDDED_ACCESS_CHECKS TABLE_LIST *org_tables= tables; #endif TABLE_LIST *first_not_own_table= thd->lex->first_not_own_table(); @@ -4728,7 +4798,7 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables, if (!no_errors) my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), sctx->priv_user, sctx->priv_host, - information_schema_name.str); + INFORMATION_SCHEMA_NAME.str); return TRUE; } /* @@ -4736,10 +4806,17 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables, Remove SHOW_VIEW_ACL, because it will be checked during making view */ tables->grant.orig_want_privilege= (want_access & ~SHOW_VIEW_ACL); - if (tables->derived || tables->schema_table || - (tables->table && (int)tables->table->s->tmp_table) || - my_tz_check_n_skip_implicit_tables(&tables, - thd->lex->time_zone_tables_used)) + + if (tables->schema_table_reformed) + { + if (check_show_access(thd, tables)) + goto deny; + + continue; + } + + if (tables->derived || + (tables->table && (int)tables->table->s->tmp_table)) continue; thd->security_ctx= sctx; if ((sctx->master_access & want_access) == @@ -4971,7 +5048,7 @@ bool my_yyoverflow(short **yyss, YYSTYPE **yyvs, ulong *yystacksize) ****************************************************************************/ void -mysql_init_query(THD *thd, uchar *buf, uint length) +mysql_init_query(THD *thd, const char *buf, uint length) { DBUG_ENTER("mysql_init_query"); lex_start(thd, buf, length); @@ -5018,8 +5095,10 @@ void mysql_reset_thd_for_next_command(THD *thd) in ha_rollback_trans() about some tables couldn't be rolled back. */ if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) - thd->options&= ~(OPTION_STATUS_NO_TRANS_UPDATE | OPTION_KEEP_LOG); - + { + thd->options&= ~OPTION_KEEP_LOG; + thd->no_trans_update.all= FALSE; + } DBUG_ASSERT(thd->security_ctx== &thd->main_security_ctx); thd->tmp_table_used= 0; if (!thd->in_sub_stmt) @@ -5183,6 +5262,7 @@ void mysql_init_multi_delete(LEX *lex) lex->query_tables_last= &lex->query_tables; } + /* When you modify mysql_parse(), you may need to mofify mysql_test_parse_for_slave() in this same file. @@ -5194,7 +5274,8 @@ void mysql_parse(THD *thd, char *inBuf, uint length) DBUG_EXECUTE_IF("parser_debug", turn_parser_debug_on();); - mysql_init_query(thd, (uchar*) inBuf, length); + mysql_init_query(thd, inBuf, length); + if (query_cache_send_result_to_client(thd, inBuf, length) <= 0) { LEX *lex= thd->lex; @@ -5273,7 +5354,7 @@ bool mysql_test_parse_for_slave(THD *thd, char *inBuf, uint length) bool error= 0; DBUG_ENTER("mysql_test_parse_for_slave"); - mysql_init_query(thd, (uchar*) inBuf, length); + mysql_init_query(thd, inBuf, length); if (!MYSQLparse((void*) thd) && ! thd->is_fatal_error && all_tables_not_ok(thd,(TABLE_LIST*) lex->select_lex.table_list.first)) error= 1; /* Ignore question */ @@ -5290,7 +5371,7 @@ bool mysql_test_parse_for_slave(THD *thd, char *inBuf, uint length) ** Return 0 if ok ******************************************************************************/ -bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, +bool add_field_to_list(THD *thd, LEX_STRING *field_name, enum_field_types type, char *length, char *decimals, uint type_modifier, Item *default_value, Item *on_update_value, @@ -5303,14 +5384,15 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, LEX *lex= thd->lex; DBUG_ENTER("add_field_to_list"); - if (strlen(field_name) > NAME_LEN) + if (check_string_char_length(field_name, "", NAME_CHAR_LEN, + system_charset_info, 1)) { - my_error(ER_TOO_LONG_IDENT, MYF(0), field_name); /* purecov: inspected */ + my_error(ER_TOO_LONG_IDENT, MYF(0), field_name->str); /* purecov: inspected */ DBUG_RETURN(1); /* purecov: inspected */ } if (type_modifier & PRI_KEY_FLAG) { - lex->col_list.push_back(new key_part_spec(field_name,0)); + lex->col_list.push_back(new key_part_spec(field_name->str, 0)); lex->key_list.push_back(new Key(Key::PRIMARY, NullS, &default_key_create_info, 0, lex->col_list)); @@ -5318,7 +5400,7 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, } if (type_modifier & (UNIQUE_FLAG | UNIQUE_KEY_FLAG)) { - lex->col_list.push_back(new key_part_spec(field_name,0)); + lex->col_list.push_back(new key_part_spec(field_name->str, 0)); lex->key_list.push_back(new Key(Key::UNIQUE, NullS, &default_key_create_info, 0, lex->col_list)); @@ -5338,7 +5420,7 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, !(((Item_func*)default_value)->functype() == Item_func::NOW_FUNC && type == MYSQL_TYPE_TIMESTAMP)) { - my_error(ER_INVALID_DEFAULT, MYF(0), field_name); + my_error(ER_INVALID_DEFAULT, MYF(0), field_name->str); DBUG_RETURN(1); } else if (default_value->type() == Item::NULL_ITEM) @@ -5347,20 +5429,20 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, if ((type_modifier & (NOT_NULL_FLAG | AUTO_INCREMENT_FLAG)) == NOT_NULL_FLAG) { - my_error(ER_INVALID_DEFAULT, MYF(0), field_name); + my_error(ER_INVALID_DEFAULT, MYF(0), field_name->str); DBUG_RETURN(1); } } else if (type_modifier & AUTO_INCREMENT_FLAG) { - my_error(ER_INVALID_DEFAULT, MYF(0), field_name); + my_error(ER_INVALID_DEFAULT, MYF(0), field_name->str); DBUG_RETURN(1); } } if (on_update_value && type != MYSQL_TYPE_TIMESTAMP) { - my_error(ER_INVALID_ON_UPDATE, MYF(0), field_name); + my_error(ER_INVALID_ON_UPDATE, MYF(0), field_name->str); DBUG_RETURN(1); } @@ -5376,7 +5458,7 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, } if (!(new_field= new create_field()) || - new_field->init(thd, field_name, type, length, decimals, type_modifier, + new_field->init(thd, field_name->str, type, length, decimals, type_modifier, default_value, on_update_value, comment, change, interval_list, cs, uint_geom_type)) DBUG_RETURN(1); @@ -5458,8 +5540,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, LEX_STRING *alias, ulong table_options, thr_lock_type lock_type, - List<String> *use_index_arg, - List<String> *ignore_index_arg, + List<index_hint> *index_hints_arg, LEX_STRING *option) { register TABLE_LIST *ptr; @@ -5518,7 +5599,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, ptr->ignore_leaves= test(table_options & TL_OPTION_IGNORE_LEAVES); ptr->derived= table->sel; if (!ptr->derived && !my_strcasecmp(system_charset_info, ptr->db, - information_schema_name.str)) + INFORMATION_SCHEMA_NAME.str)) { ST_SCHEMA_TABLE *schema_table= find_schema_table(thd, ptr->table_name); if (!schema_table || @@ -5526,7 +5607,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, (sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0)) { my_error(ER_UNKNOWN_TABLE, MYF(0), - ptr->table_name, information_schema_name.str); + ptr->table_name, INFORMATION_SCHEMA_NAME.str); DBUG_RETURN(0); } ptr->schema_table_name= ptr->table_name; @@ -5534,12 +5615,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, } ptr->select_lex= lex->current_select; ptr->cacheable_table= 1; - if (use_index_arg) - ptr->use_index=(List<String> *) thd->memdup((gptr) use_index_arg, - sizeof(*use_index_arg)); - if (ignore_index_arg) - ptr->ignore_index=(List<String> *) thd->memdup((gptr) ignore_index_arg, - sizeof(*ignore_index_arg)); + ptr->index_hints= index_hints_arg; ptr->option= option ? option->str : 0; /* check that used name is unique */ if (lock_type != TL_IGNORE) @@ -6507,14 +6583,12 @@ bool multi_update_precheck(THD *thd, TABLE_LIST *tables) /* Is there tables of subqueries? */ - if (&lex->select_lex != lex->all_selects_list || lex->time_zone_tables_used) + if (&lex->select_lex != lex->all_selects_list) { DBUG_PRINT("info",("Checking sub query list")); for (table= tables; table; table= table->next_global) { - if (!my_tz_check_n_skip_implicit_tables(&table, - lex->time_zone_tables_used) && - !table->table_in_first_from_clause) + if (!table->table_in_first_from_clause) { if (check_access(thd, SELECT_ACL, table->db, &table->grant.privilege, 0, 0, @@ -6936,26 +7010,62 @@ LEX_USER *get_current_user(THD *thd, LEX_USER *user) /* - Check that length of a string does not exceed some limit. + Check that byte length of a string does not exceed some limit. SYNOPSIS - check_string_length() - str string to be checked - err_msg error message to be displayed if the string is too long - max_length max length + check_string_byte_length() + str string to be checked + err_msg error message to be displayed if the string is too long + max_byte_length max length in bytes RETURN FALSE the passed string is not longer than max_length TRUE the passed string is longer than max_length + + NOTE + The function is not used in existing code but can be useful later? */ -bool check_string_length(LEX_STRING *str, const char *err_msg, - uint max_length) +bool check_string_byte_length(LEX_STRING *str, const char *err_msg, + uint max_byte_length) { - if (str->length <= max_length) + if (str->length <= max_byte_length) return FALSE; - my_error(ER_WRONG_STRING_LENGTH, MYF(0), str->str, err_msg, max_length); + my_error(ER_WRONG_STRING_LENGTH, MYF(0), str->str, err_msg, max_byte_length); + + return TRUE; +} + + +/* + Check that char length of a string does not exceed some limit. + + SYNOPSIS + check_string_char_length() + str string to be checked + err_msg error message to be displayed if the string is too long + max_char_length max length in symbols + cs string charset + + RETURN + FALSE the passed string is not longer than max_char_length + TRUE the passed string is longer than max_char_length +*/ + + +bool check_string_char_length(LEX_STRING *str, const char *err_msg, + uint max_char_length, CHARSET_INFO *cs, + bool no_error) +{ + int well_formed_error; + uint res= cs->cset->well_formed_len(cs, str->str, str->str + str->length, + max_char_length, &well_formed_error); + + if (!well_formed_error && str->length == res) + return FALSE; + if (!no_error) + my_error(ER_WRONG_STRING_LENGTH, MYF(0), str->str, err_msg, max_char_length); return TRUE; } diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index dbac53ed5f6..d445c8bfbe0 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -1892,12 +1892,15 @@ static int add_partition_options(File fptr, partition_element *p_elem) err+= add_keyword_int(fptr,"MAX_ROWS",(longlong)p_elem->part_max_rows); if (p_elem->part_min_rows) err+= add_keyword_int(fptr,"MIN_ROWS",(longlong)p_elem->part_min_rows); - if (p_elem->data_file_name) - err+= add_keyword_string(fptr, "DATA DIRECTORY", TRUE, - p_elem->data_file_name); - if (p_elem->index_file_name) - err+= add_keyword_string(fptr, "INDEX DIRECTORY", TRUE, - p_elem->index_file_name); + if (!(current_thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE)) + { + if (p_elem->data_file_name) + err+= add_keyword_string(fptr, "DATA DIRECTORY", TRUE, + p_elem->data_file_name); + if (p_elem->index_file_name) + err+= add_keyword_string(fptr, "INDEX DIRECTORY", TRUE, + p_elem->index_file_name); + } if (p_elem->part_comment) err+= add_keyword_string(fptr, "COMMENT", TRUE, p_elem->part_comment); return err + add_engine(fptr,p_elem->engine_type); @@ -3700,9 +3703,9 @@ void get_partition_set(const TABLE *table, byte *buf, const uint index, possible to retrace this given an item tree. */ -bool mysql_unpack_partition(THD *thd, const uchar *part_buf, - uint part_info_len, - uchar *part_state, uint part_state_len, +bool mysql_unpack_partition(THD *thd, + const char *part_buf, uint part_info_len, + const char *part_state, uint part_state_len, TABLE* table, bool is_create_table_ind, handlerton *default_db_type) { diff --git a/sql/sql_partition.h b/sql/sql_partition.h index 7ed43527688..e0c0f1c5bd3 100644 --- a/sql/sql_partition.h +++ b/sql/sql_partition.h @@ -77,9 +77,9 @@ void get_full_part_id_from_key(const TABLE *table, byte *buf, KEY *key_info, const key_range *key_spec, part_id_range *part_spec); -bool mysql_unpack_partition(THD *thd, const uchar *part_buf, +bool mysql_unpack_partition(THD *thd, const char *part_buf, uint part_info_len, - uchar *part_state, uint part_state_len, + const char *part_state, uint part_state_len, TABLE *table, bool is_create_table_ind, handlerton *default_db_type); void make_used_partitions_str(partition_info *part_info, String *parts_str); diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index fff324e139c..bc3e3cf0b05 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -162,7 +162,8 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report) plugin directory are used (to make this even remotely secure). */ if (my_strchr(files_charset_info, dl->str, dl->str + dl->length, FN_LIBCHAR) || - dl->length > NAME_LEN || + check_string_char_length((LEX_STRING *) dl, "", NAME_CHAR_LEN, + system_charset_info, 1) || plugin_dir_len + dl->length + 1 >= FN_REFLEN) { if (report & REPORT_TO_USER) @@ -944,8 +945,7 @@ my_bool mysql_uninstall_plugin(THD *thd, const LEX_STRING *name) table->use_all_columns(); table->field[0]->store(name->str, name->length, system_charset_info); if (! table->file->index_read_idx(table->record[0], 0, - (byte *)table->field[0]->ptr, - table->key_info[0].key_length, + (byte *)table->field[0]->ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { int error; diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index ce6072b2a63..31a6c7af04a 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -34,6 +34,12 @@ When one prepares a statement: [Params meta info (stubs only for now)] (if Param_count > 0) [Columns meta info] (if Column_count > 0) + During prepare the tables used in a statement are opened, but no + locks are acquired. Table opening will block any DDL during the + operation, and we do not need any locks as we neither read nor + modify any data during prepare. Tables are closed after prepare + finishes. + When one executes a statement: - Server gets the command 'COM_STMT_EXECUTE' to execute the @@ -53,6 +59,10 @@ When one executes a statement: - Execute the query without re-parsing and send back the results to client + During execution of prepared statement tables are opened and locked + the same way they would for normal (non-prepared) statement + execution. Tables are unlocked and closed after the execution. + When one supplies long data for a placeholder: - Server gets the long data in pieces with command type @@ -83,11 +93,11 @@ When one supplies long data for a placeholder: /* A result class used to send cursor rows using the binary protocol. */ -class Select_fetch_protocol_prep: public select_send +class Select_fetch_protocol_binary: public select_send { - Protocol_prep protocol; + Protocol_binary protocol; public: - Select_fetch_protocol_prep(THD *thd); + Select_fetch_protocol_binary(THD *thd); virtual bool send_fields(List<Item> &list, uint flags); virtual bool send_data(List<Item> &items); virtual bool send_eof(); @@ -115,7 +125,7 @@ public: }; THD *thd; - Select_fetch_protocol_prep result; + Select_fetch_protocol_binary result; Protocol *protocol; Item_param **param_array; uint param_count; @@ -237,9 +247,9 @@ static bool send_prep_stmt(Prepared_statement *stmt, uint columns) */ DBUG_RETURN(my_net_write(net, buff, sizeof(buff)) || (stmt->param_count && - stmt->thd->protocol_simple.send_fields((List<Item> *) - &stmt->lex->param_list, - Protocol::SEND_EOF))); + stmt->thd->protocol_text.send_fields((List<Item> *) + &stmt->lex->param_list, + Protocol::SEND_EOF))); } #else static bool send_prep_stmt(Prepared_statement *stmt, @@ -681,7 +691,7 @@ static void setup_one_conversion_function(THD *thd, Item_param *param, and generate a valid query for logging. NOTES - This function, along with other _withlog functions is called when one of + This function, along with other _with_log functions is called when one of binary, slow or general logs is open. Logging of prepared statements in all cases is performed by means of conventional queries: if parameter data was supplied from C API, each placeholder in the query is @@ -705,9 +715,9 @@ static void setup_one_conversion_function(THD *thd, Item_param *param, 0 if success, 1 otherwise */ -static bool insert_params_withlog(Prepared_statement *stmt, uchar *null_array, - uchar *read_pos, uchar *data_end, - String *query) +static bool insert_params_with_log(Prepared_statement *stmt, uchar *null_array, + uchar *read_pos, uchar *data_end, + String *query) { THD *thd= stmt->thd; Item_param **begin= stmt->param_array; @@ -715,7 +725,7 @@ static bool insert_params_withlog(Prepared_statement *stmt, uchar *null_array, uint32 length= 0; String str; const String *res; - DBUG_ENTER("insert_params_withlog"); + DBUG_ENTER("insert_params_with_log"); if (query->copy(stmt->query, stmt->query_length, default_charset_info)) DBUG_RETURN(1); @@ -859,7 +869,8 @@ static bool emb_insert_params(Prepared_statement *stmt, String *expanded_query) } -static bool emb_insert_params_withlog(Prepared_statement *stmt, String *query) +static bool emb_insert_params_with_log(Prepared_statement *stmt, + String *query) { THD *thd= stmt->thd; Item_param **it= stmt->param_array; @@ -870,7 +881,7 @@ static bool emb_insert_params_withlog(Prepared_statement *stmt, String *query) const String *res; uint32 length= 0; - DBUG_ENTER("emb_insert_params_withlog"); + DBUG_ENTER("emb_insert_params_with_log"); if (query->copy(stmt->query, stmt->query_length, default_charset_info)) DBUG_RETURN(1); @@ -1071,7 +1082,7 @@ static bool mysql_test_insert(Prepared_statement *stmt, if (mysql_prepare_insert(thd, table_list, table_list->table, fields, values, update_fields, update_values, - duplic, &unused_conds, FALSE)) + duplic, &unused_conds, FALSE, FALSE, FALSE)) goto error; value_count= values->elements; @@ -1129,32 +1140,20 @@ static int mysql_test_update(Prepared_statement *stmt, #ifndef NO_EMBEDDED_ACCESS_CHECKS uint want_privilege; #endif - bool need_reopen; DBUG_ENTER("mysql_test_update"); - if (update_precheck(thd, table_list)) + if (update_precheck(thd, table_list) || + open_tables(thd, &table_list, &table_count, 0)) goto error; - for ( ; ; ) + if (table_list->multitable_view) { - if (open_tables(thd, &table_list, &table_count, 0)) - goto error; - - if (table_list->multitable_view) - { - DBUG_ASSERT(table_list->view != 0); - DBUG_PRINT("info", ("Switch to multi-update")); - /* pass counter value */ - thd->lex->table_count= table_count; - /* convert to multiupdate */ - DBUG_RETURN(2); - } - - if (!lock_tables(thd, table_list, table_count, &need_reopen)) - break; - if (!need_reopen) - goto error; - close_tables_for_reopen(thd, &table_list); + DBUG_ASSERT(table_list->view != 0); + DBUG_PRINT("info", ("Switch to multi-update")); + /* pass counter value */ + thd->lex->table_count= table_count; + /* convert to multiupdate */ + DBUG_RETURN(2); } /* @@ -1221,7 +1220,7 @@ static bool mysql_test_delete(Prepared_statement *stmt, DBUG_ENTER("mysql_test_delete"); if (delete_precheck(thd, table_list) || - open_and_lock_tables(thd, table_list)) + open_normal_and_derived_tables(thd, table_list, 0)) goto error; if (!table_list->table) @@ -1280,7 +1279,7 @@ static int mysql_test_select(Prepared_statement *stmt, goto error; } - if (open_and_lock_tables(thd, tables)) + if (open_normal_and_derived_tables(thd, tables, 0)) goto error; thd->used_tables= 0; // Updated by setup_fields @@ -1341,7 +1340,7 @@ static bool mysql_test_do_fields(Prepared_statement *stmt, if (tables && check_table_access(thd, SELECT_ACL, tables, 0)) DBUG_RETURN(TRUE); - if (open_and_lock_tables(thd, tables)) + if (open_normal_and_derived_tables(thd, tables, 0)) DBUG_RETURN(TRUE); DBUG_RETURN(setup_fields(thd, 0, *values, MARK_COLUMNS_NONE, 0, 0)); } @@ -1371,7 +1370,7 @@ static bool mysql_test_set_fields(Prepared_statement *stmt, set_var_base *var; if (tables && check_table_access(thd, SELECT_ACL, tables, 0) || - open_and_lock_tables(thd, tables)) + open_normal_and_derived_tables(thd, tables, 0)) goto error; while ((var= it++)) @@ -1397,7 +1396,7 @@ error: NOTE This function won't directly open tables used in select. They should be opened either by calling function (and in this case you probably - should use select_like_stmt_test_with_open_n_lock()) or by + should use select_like_stmt_test_with_open()) or by "specific_prepare" call (like this happens in case of multi-update). RETURN VALUE @@ -1425,14 +1424,14 @@ static bool select_like_stmt_test(Prepared_statement *stmt, } /* - Check internal SELECT of the prepared command (with opening and - locking of used tables). + Check internal SELECT of the prepared command (with opening of used + tables). SYNOPSIS - select_like_stmt_test_with_open_n_lock() + select_like_stmt_test_with_open() stmt prepared statement - tables list of tables to be opened and locked - before calling specific_prepare function + tables list of tables to be opened before calling + specific_prepare function specific_prepare function of command specific prepare setup_tables_done_option options to be passed to LEX::unit.prepare() @@ -1442,19 +1441,20 @@ static bool select_like_stmt_test(Prepared_statement *stmt, */ static bool -select_like_stmt_test_with_open_n_lock(Prepared_statement *stmt, - TABLE_LIST *tables, - bool (*specific_prepare)(THD *thd), - ulong setup_tables_done_option) +select_like_stmt_test_with_open(Prepared_statement *stmt, + TABLE_LIST *tables, + bool (*specific_prepare)(THD *thd), + ulong setup_tables_done_option) { - DBUG_ENTER("select_like_stmt_test_with_open_n_lock"); + DBUG_ENTER("select_like_stmt_test_with_open"); /* - We should not call LEX::unit.cleanup() after this open_and_lock_tables() - call because we don't allow prepared EXPLAIN yet so derived tables will - clean up after themself. + We should not call LEX::unit.cleanup() after this + open_normal_and_derived_tables() call because we don't allow + prepared EXPLAIN yet so derived tables will clean up after + themself. */ - if (open_and_lock_tables(stmt->thd, tables)) + if (open_normal_and_derived_tables(stmt->thd, tables, 0)) DBUG_RETURN(TRUE); DBUG_RETURN(select_like_stmt_test(stmt, specific_prepare, @@ -1493,7 +1493,7 @@ static bool mysql_test_create_table(Prepared_statement *stmt) if (select_lex->item_list.elements) { select_lex->context.resolve_in_select_list= TRUE; - res= select_like_stmt_test_with_open_n_lock(stmt, tables, 0, 0); + res= select_like_stmt_test_with_open(stmt, tables, 0, 0); } /* put tables back for PS rexecuting */ @@ -1553,9 +1553,9 @@ static bool mysql_test_multidelete(Prepared_statement *stmt, } if (multi_delete_precheck(stmt->thd, tables) || - select_like_stmt_test_with_open_n_lock(stmt, tables, - &mysql_multi_delete_prepare, - OPTION_SETUP_TABLES_DONE)) + select_like_stmt_test_with_open(stmt, tables, + &mysql_multi_delete_prepare, + OPTION_SETUP_TABLES_DONE)) goto error; if (!tables->table) { @@ -1571,15 +1571,16 @@ error: /* Wrapper for mysql_insert_select_prepare, to make change of local tables - after open_and_lock_tables() call. + after open_normal_and_derived_tables() call. SYNOPSIS mysql_insert_select_prepare_tester() thd thread handle NOTE - We need to remove the first local table after open_and_lock_tables, - because mysql_handle_derived uses local tables lists. + We need to remove the first local table after + open_normal_and_derived_tables(), because mysql_handle_derived + uses local tables lists. */ static bool mysql_insert_select_prepare_tester(THD *thd) @@ -1631,9 +1632,9 @@ static bool mysql_test_insert_select(Prepared_statement *stmt, DBUG_ASSERT(first_local_table != 0); res= - select_like_stmt_test_with_open_n_lock(stmt, tables, - &mysql_insert_select_prepare_tester, - OPTION_SETUP_TABLES_DONE); + select_like_stmt_test_with_open(stmt, tables, + &mysql_insert_select_prepare_tester, + OPTION_SETUP_TABLES_DONE); /* revert changes made by mysql_insert_select_prepare_tester */ lex->select_lex.table_list.first= (byte*) first_local_table; return res; @@ -1669,7 +1670,7 @@ static bool check_prepared_statement(Prepared_statement *stmt, enum enum_sql_command sql_command= lex->sql_command; int res= 0; DBUG_ENTER("check_prepared_statement"); - DBUG_PRINT("enter",("command: %d, param_count: %u", + DBUG_PRINT("enter",("command: %d param_count: %u", sql_command, stmt->param_count)); lex->first_lists_tables_same(); @@ -1889,7 +1890,7 @@ void mysql_stmt_prepare(THD *thd, const char *packet, uint packet_length) /* First of all clear possible warnings from the previous command */ mysql_reset_thd_for_next_command(thd); - if (! (stmt= new Prepared_statement(thd, &thd->protocol_prep))) + if (! (stmt= new Prepared_statement(thd, &thd->protocol_binary))) DBUG_VOID_RETURN; /* out of memory: error is set in Sql_alloc */ if (thd->stmt_map.insert(thd, stmt)) @@ -2061,8 +2062,8 @@ void mysql_sql_stmt_prepare(THD *thd) const char *query; uint query_len; DBUG_ENTER("mysql_sql_stmt_prepare"); - DBUG_ASSERT(thd->protocol == &thd->protocol_simple); LINT_INIT(query_len); + DBUG_ASSERT(thd->protocol == &thd->protocol_text); if ((stmt= (Prepared_statement*) thd->stmt_map.find_by_name(name))) { @@ -2075,7 +2076,7 @@ void mysql_sql_stmt_prepare(THD *thd) } if (! (query= get_dynamic_sql_string(lex, &query_len)) || - ! (stmt= new Prepared_statement(thd, &thd->protocol_simple))) + ! (stmt= new Prepared_statement(thd, &thd->protocol_text))) { DBUG_VOID_RETURN; /* out of memory */ } @@ -2628,14 +2629,14 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) /*************************************************************************** - Select_fetch_protocol_prep + Select_fetch_protocol_binary ****************************************************************************/ -Select_fetch_protocol_prep::Select_fetch_protocol_prep(THD *thd_arg) +Select_fetch_protocol_binary::Select_fetch_protocol_binary(THD *thd_arg) :protocol(thd_arg) {} -bool Select_fetch_protocol_prep::send_fields(List<Item> &list, uint flags) +bool Select_fetch_protocol_binary::send_fields(List<Item> &list, uint flags) { bool rc; Protocol *save_protocol= thd->protocol; @@ -2653,7 +2654,7 @@ bool Select_fetch_protocol_prep::send_fields(List<Item> &list, uint flags) return rc; } -bool Select_fetch_protocol_prep::send_eof() +bool Select_fetch_protocol_binary::send_eof() { Protocol *save_protocol= thd->protocol; @@ -2665,7 +2666,7 @@ bool Select_fetch_protocol_prep::send_eof() bool -Select_fetch_protocol_prep::send_data(List<Item> &fields) +Select_fetch_protocol_binary::send_data(List<Item> &fields) { Protocol *save_protocol= thd->protocol; bool rc; @@ -2699,15 +2700,26 @@ Prepared_statement::Prepared_statement(THD *thd_arg, Protocol *protocol_arg) void Prepared_statement::setup_set_params() { - /* Setup binary logging */ + /* + Note: BUG#25843 applies here too (query cache lookup uses thd->db, not + db from "prepare" time). + */ + if (query_cache_maybe_disabled(thd)) // we won't expand the query + lex->safe_to_cache_query= FALSE; // so don't cache it at Execution + + /* + Decide if we have to expand the query (because we must write it to logs or + because we want to look it up in the query cache) or not. + */ if (mysql_bin_log.is_open() && is_update_query(lex->sql_command) || - opt_log || opt_slow_log) + opt_log || opt_slow_log || + query_cache_is_cacheable_query(lex)) { set_params_from_vars= insert_params_from_vars_with_log; #ifndef EMBEDDED_LIBRARY - set_params= insert_params_withlog; + set_params= insert_params_with_log; #else - set_params_data= emb_insert_params_withlog; + set_params_data= emb_insert_params_with_log; #endif } else @@ -2838,13 +2850,12 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) old_stmt_arena= thd->stmt_arena; thd->stmt_arena= this; - lex_start(thd, (uchar*) thd->query, thd->query_length); + lex_start(thd, thd->query, thd->query_length); lex->stmt_prepare_mode= TRUE; error= MYSQLparse((void *)thd) || thd->is_fatal_error || thd->net.report_error || init_param_array(this); - lex->safe_to_cache_query= FALSE; /* While doing context analysis of the query (in check_prepared_statement) we allocate a lot of additional memory: for open tables, JOINs, derived @@ -2887,6 +2898,18 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) thd->restore_backup_statement(this, &stmt_backup); thd->stmt_arena= old_stmt_arena; + if ((protocol->type() == Protocol::PROTOCOL_TEXT) && (param_count > 0)) + { + /* + This is a mysql_sql_stmt_prepare(); query expansion will insert user + variable references, and user variables are uncacheable, thus we have to + mark this statement as uncacheable. + This has to be done before setup_set_params(), as it may make expansion + unneeded. + */ + lex->safe_to_cache_query= FALSE; + } + if (error == 0) { setup_set_params(); @@ -3003,11 +3026,26 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) reinit_stmt_before_use(thd, lex); thd->protocol= protocol; /* activate stmt protocol */ - error= (open_cursor ? - mysql_open_cursor(thd, (uint) ALWAYS_MATERIALIZED_CURSOR, - &result, &cursor) : - mysql_execute_command(thd)); - thd->protocol= &thd->protocol_simple; /* use normal protocol */ + + if (open_cursor) + error= mysql_open_cursor(thd, (uint) ALWAYS_MATERIALIZED_CURSOR, + &result, &cursor); + else + { + /* + Try to find it in the query cache, if not, execute it. + Note that multi-statements cannot exist here (they are not supported in + prepared statements). + */ + if (query_cache_send_result_to_client(thd, thd->query, + thd->query_length) <= 0) + { + error= mysql_execute_command(thd); + query_cache_end_of_result(thd); + } + } + + thd->protocol= &thd->protocol_text; /* use normal protocol */ /* Assert that if an error, no cursor is open */ DBUG_ASSERT(! (error && cursor)); diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index debc9a7b572..a7200a772bd 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -16,6 +16,7 @@ #include "mysql_priv.h" #ifdef HAVE_REPLICATION +#include "rpl_mi.h" #include "sql_repl.h" #include "log_event.h" #include "rpl_filter.h" @@ -1130,6 +1131,11 @@ bool change_master(THD* thd, MASTER_INFO* mi) if (lex_mi->ssl != LEX_MASTER_INFO::SSL_UNCHANGED) mi->ssl= (lex_mi->ssl == LEX_MASTER_INFO::SSL_ENABLE); + + if (lex_mi->ssl_verify_server_cert != LEX_MASTER_INFO::SSL_UNCHANGED) + mi->ssl_verify_server_cert= + (lex_mi->ssl_verify_server_cert == LEX_MASTER_INFO::SSL_ENABLE); + if (lex_mi->ssl_ca) strmake(mi->ssl_ca, lex_mi->ssl_ca, sizeof(mi->ssl_ca)-1); if (lex_mi->ssl_capath) @@ -1142,7 +1148,8 @@ bool change_master(THD* thd, MASTER_INFO* mi) strmake(mi->ssl_key, lex_mi->ssl_key, sizeof(mi->ssl_key)-1); #ifndef HAVE_OPENSSL if (lex_mi->ssl || lex_mi->ssl_ca || lex_mi->ssl_capath || - lex_mi->ssl_cert || lex_mi->ssl_cipher || lex_mi->ssl_key ) + lex_mi->ssl_cert || lex_mi->ssl_cipher || lex_mi->ssl_key || + lex_mi->ssl_verify_server_cert ) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SLAVE_IGNORED_SSL_PARAMS, ER(ER_SLAVE_IGNORED_SSL_PARAMS)); #endif diff --git a/sql/sql_select.cc b/sql/sql_select.cc index d062cbb3ef6..da2c8e96b0f 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -163,13 +163,15 @@ static COND *make_cond_for_table(COND *cond,table_map table, static Item* part_of_refkey(TABLE *form,Field *field); uint find_shortest_key(TABLE *table, const key_map *usable_keys); static bool test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order, - ha_rows select_limit, bool no_changes); + ha_rows select_limit, bool no_changes, + key_map *map); static bool list_contains_unique_index(TABLE *table, bool (*find_func) (Field *, void *), void *data); static bool find_field_in_item_list (Field *field, void *data); static bool find_field_in_order_list (Field *field, void *data); static int create_sort_index(THD *thd, JOIN *join, ORDER *order, - ha_rows filesort_limit, ha_rows select_limit); + ha_rows filesort_limit, ha_rows select_limit, + bool is_order_by); static int remove_duplicates(JOIN *join,TABLE *entry,List<Item> &fields, Item *having); static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field, @@ -278,15 +280,30 @@ bool handle_select(THD *thd, LEX *lex, select_result *result, ref_pointer_array Array of references to Items used in current select DESCRIPTION - The function fixes fields referenced from inner selects and - also fixes references (Item_ref objects) to these fields. Each field - is fixed as a usual hidden field of the current select - it is added - to the all_fields list and the pointer to it is saved in the - ref_pointer_array if latter is provided. - After the field has been fixed we proceed with fixing references - (Item_ref objects) to this field from inner subqueries. If the - ref_pointer_array is provided then Item_ref objects is set to - reference element in that array with the pointer to the field. + The function serves 3 purposes - adds fields referenced from inner + selects to the current select list, resolves which class to use + to access referenced item (Item_ref of Item_direct_ref) and fixes + references (Item_ref objects) to these fields. + + If a field isn't already in the select list and the ref_pointer_array + is provided then it is added to the all_fields list and the pointer to + it is saved in the ref_pointer_array. + + The class to access the outer field is determined by the following rules: + 1. If the outer field isn't used under an aggregate function + then the Item_ref class should be used. + 2. If the outer field is used under an aggregate function and this + function is aggregated in the select where the outer field was + resolved or in some more inner select then the Item_direct_ref + class should be used. + The resolution is done here and not at the fix_fields() stage as + it can be done only after sum functions are fixed and pulled up to + selects where they are have to be aggregated. + When the class is chosen it substitutes the original field in the + Item_outer_ref object. + + After this we proceed with fixing references (Item_outer_ref objects) to + this field from inner subqueries. RETURN TRUE an error occured @@ -299,33 +316,64 @@ fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select, { Item_outer_ref *ref; bool res= FALSE; + bool direct_ref= FALSE; + List_iterator<Item_outer_ref> ref_it(select->inner_refs_list); while ((ref= ref_it++)) { - Item_field *item= ref->outer_field; + Item *item= ref->outer_ref; + Item **item_ref= ref->ref; + Item_ref *new_ref; /* TODO: this field item already might be present in the select list. In this case instead of adding new field item we could use an existing one. The change will lead to less operations for copying fields, smaller temporary tables and less data passed through filesort. */ - if (ref_pointer_array) + if (ref_pointer_array && !ref->found_in_select_list) { int el= all_fields.elements; - ref_pointer_array[el]= (Item*)item; + ref_pointer_array[el]= item; /* Add the field item to the select list of the current select. */ - all_fields.push_front((Item*)item); + all_fields.push_front(item); /* If it's needed reset each Item_ref item that refers this field with a new reference taken from ref_pointer_array. */ - ref->ref= ref_pointer_array + el; + item_ref= ref_pointer_array + el; } - if (!ref->fixed && ref->fix_fields(thd, 0)) + + if (ref->in_sum_func) { - res= TRUE; - break; + Item_sum *sum_func; + if (ref->in_sum_func->nest_level > select->nest_level) + direct_ref= TRUE; + else + { + for (sum_func= ref->in_sum_func; sum_func && + sum_func->aggr_level >= select->nest_level; + sum_func= sum_func->in_sum_func) + { + if (sum_func->aggr_level == select->nest_level) + { + direct_ref= TRUE; + break; + } + } + } } + new_ref= direct_ref ? + new Item_direct_ref(ref->context, item_ref, ref->field_name, + ref->table_name, ref->alias_name_used) : + new Item_ref(ref->context, item_ref, ref->field_name, + ref->table_name, ref->alias_name_used); + if (!new_ref) + return TRUE; + ref->outer_ref= new_ref; + ref->ref= &ref->outer_ref; + + if (!ref->fixed && ref->fix_fields(thd, 0)) + return TRUE; thd->used_tables|= item->used_tables(); } return res; @@ -396,6 +444,7 @@ JOIN::prepare(Item ***rref_pointer_array, join_list= &select_lex->top_join_list; union_part= (unit_arg->first_select()->next_select() != 0); + thd->lex->current_select->is_item_list_lookup= 1; /* If we have already executed SELECT, then it have not sense to prevent its table from update (see unique_table()) @@ -405,12 +454,19 @@ JOIN::prepare(Item ***rref_pointer_array, /* Check that all tables, fields, conds and order are ok */ - if ((!(select_options & OPTION_SETUP_TABLES_DONE) && - setup_tables_and_check_access(thd, &select_lex->context, join_list, - tables_list, - &select_lex->leaf_tables, FALSE, - SELECT_ACL, SELECT_ACL)) || - setup_wild(thd, tables_list, fields_list, &all_fields, wild_num) || + if (!(select_options & OPTION_SETUP_TABLES_DONE) && + setup_tables_and_check_access(thd, &select_lex->context, join_list, + tables_list, &select_lex->leaf_tables, + FALSE, SELECT_ACL, SELECT_ACL)) + DBUG_RETURN(-1); + + TABLE_LIST *table_ptr; + for (table_ptr= select_lex->leaf_tables; + table_ptr; + table_ptr= table_ptr->next_leaf) + tables++; + + if (setup_wild(thd, tables_list, fields_list, &all_fields, wild_num) || select_lex->setup_ref_array(thd, og_num) || setup_fields(thd, (*rref_pointer_array), fields_list, MARK_COLUMNS_READ, &all_fields, 1) || @@ -455,13 +511,20 @@ JOIN::prepare(Item ***rref_pointer_array, select_lex->fix_prepare_information(thd, &conds, &having); + if (order) + { + ORDER *ord; + for (ord= order; ord; ord= ord->next) + { + Item *item= *ord->item; + if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM) + item->split_sum_func(thd, ref_pointer_array, all_fields); + } + } + if (having && having->with_sum_func) having->split_sum_func2(thd, ref_pointer_array, all_fields, &having, TRUE); - if (select_lex->inner_refs_list.elements && - fix_inner_refs(thd, all_fields, select_lex, ref_pointer_array)) - DBUG_RETURN(-1); - if (select_lex->inner_sum_func_list) { Item_sum *end=select_lex->inner_sum_func_list; @@ -474,6 +537,10 @@ JOIN::prepare(Item ***rref_pointer_array, } while (item_sum != end); } + if (select_lex->inner_refs_list.elements && + fix_inner_refs(thd, all_fields, select_lex, ref_pointer_array)) + DBUG_RETURN(-1); + if (setup_ftfuncs(select_lex)) /* should be after having->fix_fields */ DBUG_RETURN(-1); @@ -504,11 +571,6 @@ JOIN::prepare(Item ***rref_pointer_array, DBUG_RETURN(-1); } } - TABLE_LIST *table_ptr; - for (table_ptr= select_lex->leaf_tables; - table_ptr; - table_ptr= table_ptr->next_leaf) - tables++; } { /* Caclulate the number of groups */ @@ -640,7 +702,7 @@ void JOIN::remove_subq_pushed_predicates(Item **where) static void save_index_subquery_explain_info(JOIN_TAB *join_tab, Item* where) { join_tab->packed_info= TAB_INFO_HAVE_VALUE; - if (join_tab->table->used_keys.is_set(join_tab->ref.key)) + if (join_tab->table->covering_keys.is_set(join_tab->ref.key)) join_tab->packed_info |= TAB_INFO_USING_INDEX; if (where) join_tab->packed_info |= TAB_INFO_USING_WHERE; @@ -738,7 +800,6 @@ JOIN::optimize() } { - Item::cond_result having_value; having= optimize_cond(this, having, join_list, &having_value); if (thd->net.report_error) { @@ -746,6 +807,10 @@ JOIN::optimize() DBUG_PRINT("error",("Error from optimize_cond")); DBUG_RETURN(1); } + if (select_lex->where) + select_lex->cond_value= cond_value; + if (select_lex->having) + select_lex->having_value= having_value; if (cond_value == Item::COND_FALSE || having_value == Item::COND_FALSE || (!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS))) @@ -906,6 +971,7 @@ JOIN::optimize() conds->update_used_tables(); DBUG_EXECUTE("where", print_where(conds, "after substitute_best_equal");); } + /* Permorm the the optimization on fields evaluation mentioned above for all on expressions. @@ -1008,14 +1074,15 @@ JOIN::optimize() JOIN_TAB *tab= &join_tab[const_tables]; bool all_order_fields_used; if (order) - skip_sort_order= test_if_skip_sort_order(tab, order, select_limit, 1); + skip_sort_order= test_if_skip_sort_order(tab, order, select_limit, 1, + &tab->table->keys_in_use_for_order_by); if ((group_list=create_distinct_group(thd, select_lex->ref_pointer_array, order, fields_list, &all_order_fields_used))) { bool skip_group= (skip_sort_order && - test_if_skip_sort_order(tab, group_list, select_limit, - 1) != 0); + test_if_skip_sort_order(tab, group_list, select_limit, 1, + &tab->table->keys_in_use_for_group_by) != 0); if ((skip_group && all_order_fields_used) || select_limit == HA_POS_ERROR || (order && !skip_sort_order)) @@ -1213,7 +1280,9 @@ JOIN::optimize() ((group_list && (!simple_group || !test_if_skip_sort_order(&join_tab[const_tables], group_list, - unit->select_limit_cnt, 0))) || + unit->select_limit_cnt, 0, + &join_tab[const_tables].table-> + keys_in_use_for_group_by))) || select_distinct) && tmp_table_param.quick_group && !procedure) { @@ -1315,7 +1384,7 @@ JOIN::optimize() DBUG_PRINT("info",("Sorting for group")); thd->proc_info="Sorting for group"; if (create_sort_index(thd, this, group_list, - HA_POS_ERROR, HA_POS_ERROR) || + HA_POS_ERROR, HA_POS_ERROR, FALSE) || alloc_group_fields(this, group_list) || make_sum_func_list(all_fields, fields_list, 1) || setup_sum_funcs(thd, sum_funcs)) @@ -1332,7 +1401,7 @@ JOIN::optimize() DBUG_PRINT("info",("Sorting for order")); thd->proc_info="Sorting for order"; if (create_sort_index(thd, this, order, - HA_POS_ERROR, HA_POS_ERROR)) + HA_POS_ERROR, HA_POS_ERROR, TRUE)) DBUG_RETURN(1); order=0; } @@ -1359,7 +1428,9 @@ JOIN::optimize() { /* Should always succeed */ if (test_if_skip_sort_order(&join_tab[const_tables], - order, unit->select_limit_cnt, 0)) + order, unit->select_limit_cnt, 0, + &join_tab[const_tables].table-> + keys_in_use_for_order_by)) order=0; } } @@ -1552,7 +1623,9 @@ JOIN::exec() (const_tables == tables || ((simple_order || skip_sort_order) && test_if_skip_sort_order(&join_tab[const_tables], order, - select_limit, 0)))) + select_limit, 0, + &join_tab[const_tables].table-> + keys_in_use_for_order_by)))) order=0; having= tmp_having; select_describe(this, need_tmp, @@ -1728,7 +1801,7 @@ JOIN::exec() DBUG_VOID_RETURN; } if (create_sort_index(thd, curr_join, curr_join->group_list, - HA_POS_ERROR, HA_POS_ERROR) || + HA_POS_ERROR, HA_POS_ERROR, FALSE) || make_group_fields(this, curr_join)) { DBUG_VOID_RETURN; @@ -1944,7 +2017,8 @@ JOIN::exec() curr_join->group_list : curr_join->order, curr_join->select_limit, (select_options & OPTION_FOUND_ROWS ? - HA_POS_ERROR : unit->select_limit_cnt))) + HA_POS_ERROR : unit->select_limit_cnt), + curr_join->group_list ? TRUE : FALSE)) DBUG_VOID_RETURN; sortorder= curr_join->sortorder; if (curr_join->const_tables != curr_join->tables && @@ -3948,7 +4022,7 @@ best_access_path(JOIN *join, /* Limit the number of matched rows */ tmp= records; set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); - if (table->used_keys.is_set(key)) + if (table->covering_keys.is_set(key)) { /* we can use only index tree */ uint keys_per_block= table->file->stats.block_size/2/ @@ -4115,7 +4189,7 @@ best_access_path(JOIN *join, /* Limit the number of matched rows */ set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); - if (table->used_keys.is_set(key)) + if (table->covering_keys.is_set(key)) { /* we can use only index tree */ uint keys_per_block= table->file->stats.block_size/2/ @@ -4174,7 +4248,7 @@ best_access_path(JOIN *join, !(s->quick && best_key && s->quick->index == best_key->key && // (2) best_max_key_part >= s->table->quick_key_parts[best_key->key]) &&// (2) !((s->table->file->ha_table_flags() & HA_TABLE_SCAN_ON_INDEX) && // (3) - ! s->table->used_keys.is_clear_all() && best_key) && // (3) + ! s->table->covering_keys.is_clear_all() && best_key) && // (3) !(s->table->force_index && best_key && !s->quick)) // (4) { // Check full join ha_rows rnd_records= s->found_records; @@ -5351,7 +5425,9 @@ get_store_key(THD *thd, KEYUSE *keyuse, table_map used_tables, } else if (keyuse->val->type() == Item::FIELD_ITEM || (keyuse->val->type() == Item::REF_ITEM && - ((Item_ref*)keyuse->val)->ref_type() == Item_ref::OUTER_REF) ) + ((Item_ref*)keyuse->val)->ref_type() == Item_ref::OUTER_REF && + (*(Item_ref**)((Item_ref*)keyuse->val)->ref)->ref_type() == + Item_ref::DIRECT_REF) ) return new store_key_field(thd, key_part->field, key_buff + maybe_null, @@ -6080,10 +6156,7 @@ make_join_readinfo(JOIN *join, ulonglong options) */ if (!ordered_set && (table == join->sort_by_table && - (!join->order || join->skip_sort_order || - test_if_skip_sort_order(tab, join->order, join->select_limit, - 1)) - ) || + (!join->order || join->skip_sort_order)) || (join->sort_by_table == (TABLE *) 1 && i != join->const_tables)) ordered_set= 1; @@ -6099,7 +6172,7 @@ make_join_readinfo(JOIN *join, ulonglong options) table->status=STATUS_NO_RECORD; tab->read_first_record= join_read_const; tab->read_record.read_record= join_no_more_records; - if (table->used_keys.is_set(tab->ref.key) && + if (table->covering_keys.is_set(tab->ref.key) && !table->no_keyread) { table->key_read=1; @@ -6117,7 +6190,7 @@ make_join_readinfo(JOIN *join, ulonglong options) tab->quick=0; tab->read_first_record= join_read_key; tab->read_record.read_record= join_no_more_records; - if (table->used_keys.is_set(tab->ref.key) && + if (table->covering_keys.is_set(tab->ref.key) && !table->no_keyread) { table->key_read=1; @@ -6134,7 +6207,7 @@ make_join_readinfo(JOIN *join, ulonglong options) } delete tab->quick; tab->quick=0; - if (table->used_keys.is_set(tab->ref.key) && + if (table->covering_keys.is_set(tab->ref.key) && !table->no_keyread) { table->key_read=1; @@ -6220,15 +6293,15 @@ make_join_readinfo(JOIN *join, ulonglong options) { if (tab->select && tab->select->quick && tab->select->quick->index != MAX_KEY && //not index_merge - table->used_keys.is_set(tab->select->quick->index)) + table->covering_keys.is_set(tab->select->quick->index)) { table->key_read=1; table->file->extra(HA_EXTRA_KEYREAD); } - else if (!table->used_keys.is_clear_all() && + else if (!table->covering_keys.is_clear_all() && !(tab->select && tab->select->quick)) { // Only read index tree - tab->index=find_shortest_key(table, & table->used_keys); + tab->index=find_shortest_key(table, & table->covering_keys); tab->read_first_record= join_read_first; tab->type=JT_NEXT; // Read with index_first / index_next } @@ -6612,7 +6685,8 @@ static void update_depend_map(JOIN *join, ORDER *order) order->item[0]->update_used_tables(); order->depend_map=depend_map=order->item[0]->used_tables(); // Not item_sum(), RAND() and no reference to table outside of sub select - if (!(order->depend_map & (OUTER_REF_TABLE_BIT | RAND_TABLE_BIT))) + if (!(order->depend_map & (OUTER_REF_TABLE_BIT | RAND_TABLE_BIT)) + && !order->item[0]->with_sum_func) { for (JOIN_TAB **tab=join->map2table; depend_map ; @@ -7094,6 +7168,7 @@ static bool check_simple_equality(Item *left_item, Item *right_item, SYNOPSIS check_row_equality() + thd thread handle left_row left term of the row equality to be processed right_row right term of the row equality to be processed cond_equal multiple equalities that must hold together with the predicate @@ -7114,7 +7189,7 @@ static bool check_simple_equality(Item *left_item, Item *right_item, FALSE otherwise */ -static bool check_row_equality(Item *left_row, Item_row *right_row, +static bool check_row_equality(THD *thd, Item *left_row, Item_row *right_row, COND_EQUAL *cond_equal, List<Item>* eq_list) { uint n= left_row->cols(); @@ -7125,13 +7200,21 @@ static bool check_row_equality(Item *left_row, Item_row *right_row, Item *right_item= right_row->element_index(i); if (left_item->type() == Item::ROW_ITEM && right_item->type() == Item::ROW_ITEM) - is_converted= check_row_equality((Item_row *) left_item, - (Item_row *) right_item, - cond_equal, eq_list); - else + { + is_converted= check_row_equality(thd, + (Item_row *) left_item, + (Item_row *) right_item, + cond_equal, eq_list); + if (!is_converted) + thd->lex->current_select->cond_count++; + } + else + { is_converted= check_simple_equality(left_item, right_item, 0, cond_equal); - - if (!is_converted) + thd->lex->current_select->cond_count++; + } + + if (!is_converted) { Item_func_eq *eq_item; if (!(eq_item= new Item_func_eq(left_item, right_item))) @@ -7150,6 +7233,7 @@ static bool check_row_equality(Item *left_row, Item_row *right_row, SYNOPSIS check_equality() + thd thread handle item predicate to process cond_equal multiple equalities that must hold together with the predicate eq_list results of conversions of row equalities that are not simple @@ -7174,7 +7258,7 @@ static bool check_row_equality(Item *left_row, Item_row *right_row, or, if the procedure fails by a fatal error. */ -static bool check_equality(Item *item, COND_EQUAL *cond_equal, +static bool check_equality(THD *thd, Item *item, COND_EQUAL *cond_equal, List<Item> *eq_list) { if (item->type() == Item::FUNC_ITEM && @@ -7185,9 +7269,13 @@ static bool check_equality(Item *item, COND_EQUAL *cond_equal, if (left_item->type() == Item::ROW_ITEM && right_item->type() == Item::ROW_ITEM) - return check_row_equality((Item_row *) left_item, + { + thd->lex->current_select->cond_count--; + return check_row_equality(thd, + (Item_row *) left_item, (Item_row *) right_item, cond_equal, eq_list); + } else return check_simple_equality(left_item, right_item, item, cond_equal); } @@ -7200,6 +7288,7 @@ static bool check_equality(Item *item, COND_EQUAL *cond_equal, SYNOPSIS build_equal_items_for_cond() + thd thread handle cond condition(expression) where to make replacement inherited path to all inherited multiple equality items @@ -7262,7 +7351,7 @@ static bool check_equality(Item *item, COND_EQUAL *cond_equal, pointer to the transformed condition */ -static COND *build_equal_items_for_cond(COND *cond, +static COND *build_equal_items_for_cond(THD *thd, COND *cond, COND_EQUAL *inherited) { Item_equal *item_equal; @@ -7295,7 +7384,7 @@ static COND *build_equal_items_for_cond(COND *cond, structure here because it's restored before each re-execution of any prepared statement/stored procedure. */ - if (check_equality(item, &cond_equal, &eq_list)) + if (check_equality(thd, item, &cond_equal, &eq_list)) li.remove(); } @@ -7330,7 +7419,7 @@ static COND *build_equal_items_for_cond(COND *cond, while ((item= li++)) { Item *new_item; - if ((new_item = build_equal_items_for_cond(item, inherited))!= item) + if ((new_item= build_equal_items_for_cond(thd, item, inherited)) != item) { /* This replacement happens only for standalone equalities */ /* @@ -7360,7 +7449,7 @@ static COND *build_equal_items_for_cond(COND *cond, for WHERE a=b AND c=d AND (b=c OR d=5) b=c is replaced by =(a,b,c,d). */ - if (check_equality(cond, &cond_equal, &eq_list)) + if (check_equality(thd, cond, &cond_equal, &eq_list)) { int n= cond_equal.current_level.elements + eq_list.elements; if (n == 0) @@ -7423,7 +7512,7 @@ static COND *build_equal_items_for_cond(COND *cond, SYNOPSIS build_equal_items() - thd Thread handler + thd thread handle cond condition to build the multiple equalities for inherited path to all inherited multiple equality items join_list list of join tables to which the condition refers to @@ -7484,7 +7573,7 @@ static COND *build_equal_items(THD *thd, COND *cond, if (cond) { - cond= build_equal_items_for_cond(cond, inherited); + cond= build_equal_items_for_cond(thd, cond, inherited); cond->update_used_tables(); if (cond->type() == Item::COND_ITEM && ((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC) @@ -7772,6 +7861,10 @@ static COND* substitute_for_best_equal_field(COND *cond, break; } } + if (cond->type() == Item::COND_ITEM && + !((Item_cond*)cond)->argument_list()->elements) + cond= new Item_int((int32)cond->val_bool()); + } else if (cond->type() == Item::FUNC_ITEM && ((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC) @@ -8166,9 +8259,14 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top) */ expr= simplify_joins(join, &nested_join->join_list, expr, FALSE); - table->on_expr= expr; - if (!table->prep_on_expr) + + if (!table->prep_on_expr || expr != table->on_expr) + { + DBUG_ASSERT(expr); + + table->on_expr= expr; table->prep_on_expr= expr->copy_andor_structure(join->thd); + } } nested_join->used_tables= (table_map) 0; nested_join->not_null_tables=(table_map) 0; @@ -8178,7 +8276,7 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top) } else { - if (!(table->prep_on_expr)) + if (!table->prep_on_expr) table->prep_on_expr= table->on_expr; used_tables= table->table->map; if (conds) @@ -8670,7 +8768,7 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value) if ((new_cond= new Item_func_eq(args[0], new Item_int("last_insert_id()", thd->read_first_successful_insert_id_in_prev_stmt(), - 21)))) + MY_INT64_NUM_DECIMAL_DIGITS)))) { cond=new_cond; /* @@ -8935,7 +9033,7 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, break; case INT_RESULT: /* Select an integer type with the minimal fit precision */ - if (item->max_length > 11) + if (item->max_length > MY_INT32_NUM_DECIMAL_DIGITS) new_field=new Field_longlong(item->max_length, maybe_null, item->name, item->unsigned_flag); else @@ -8947,20 +9045,19 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, enum enum_field_types type; /* - DATE/TIME fields have STRING_RESULT result type. To preserve - type they needed to be handled separately. + DATE/TIME and GEOMETRY fields have STRING_RESULT result type. + To preserve type they needed to be handled separately. */ if ((type= item->field_type()) == MYSQL_TYPE_DATETIME || type == MYSQL_TYPE_TIME || type == MYSQL_TYPE_DATE || - type == MYSQL_TYPE_TIMESTAMP) + type == MYSQL_TYPE_TIMESTAMP || type == MYSQL_TYPE_GEOMETRY) new_field= item->tmp_table_field_from_field_type(table, 1); /* Make sure that the blob fits into a Field_varstring which has 2-byte lenght. */ else if (item->max_length/item->collation.collation->mbmaxlen > 255 && - item->max_length/item->collation.collation->mbmaxlen < UINT_MAX16 - && convert_blob_length) + convert_blob_length < UINT_MAX16 && convert_blob_length) new_field= new Field_varstring(convert_blob_length, maybe_null, item->name, table->s, item->collation.collation); @@ -9358,7 +9455,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, table->copy_blobs= 1; table->in_use= thd; table->quick_keys.init(); - table->used_keys.init(); + table->covering_keys.init(); table->keys_in_use_for_query.init(); table->s= share; @@ -9387,13 +9484,19 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, { if (item->with_sum_func && type != Item::SUM_FUNC_ITEM) { - /* - Mark that the we have ignored an item that refers to a summary - function. We need to know this if someone is going to use - DISTINCT on the result. - */ - param->using_indirect_summary_function=1; - continue; + if (item->used_tables() & OUTER_REF_TABLE_BIT) + item->update_used_tables(); + if (type == Item::SUBSELECT_ITEM || + (item->used_tables() & ~OUTER_REF_TABLE_BIT)) + { + /* + Mark that the we have ignored an item that refers to a summary + function. We need to know this if someone is going to use + DISTINCT on the result. + */ + param->using_indirect_summary_function=1; + continue; + } } if (item->const_item() && (int) hidden_field_count <= 0) continue; // We don't have to store this @@ -9578,6 +9681,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, share->default_values= table->record[1]+alloc_length; } copy_func[0]=0; // End marker + param->func_count= copy_func - param->items_to_copy; setup_tmp_table_column_bitmaps(table, bitmaps); @@ -10953,7 +11057,8 @@ int safe_index_read(JOIN_TAB *tab) TABLE *table= tab->table; if ((error=table->file->index_read(table->record[0], tab->ref.key_buff, - tab->ref.key_length, HA_READ_KEY_EXACT))) + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT))) return report_error(table, error); return 0; } @@ -10983,7 +11088,7 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos) } else { - if (!table->key_read && table->used_keys.is_set(tab->ref.key) && + if (!table->key_read && table->covering_keys.is_set(tab->ref.key) && !table->no_keyread && (int) table->reginfo.lock_type <= (int) TL_READ_HIGH_PRIORITY) { @@ -11091,7 +11196,8 @@ join_read_const(JOIN_TAB *tab) { error=table->file->index_read_idx(table->record[0],tab->ref.key, (byte*) tab->ref.key_buff, - tab->ref.key_length,HA_READ_KEY_EXACT); + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT); } if (error) { @@ -11134,7 +11240,8 @@ join_read_key(JOIN_TAB *tab) } error=table->file->index_read(table->record[0], tab->ref.key_buff, - tab->ref.key_length,HA_READ_KEY_EXACT); + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT); if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) return report_error(table, error); } @@ -11162,7 +11269,8 @@ join_read_always_key(JOIN_TAB *tab) return -1; if ((error=table->file->index_read(table->record[0], tab->ref.key_buff, - tab->ref.key_length,HA_READ_KEY_EXACT))) + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) return report_error(table, error); @@ -11188,8 +11296,7 @@ join_read_last_key(JOIN_TAB *tab) if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref)) return -1; if ((error=table->file->index_read_last(table->record[0], - tab->ref.key_buff, - tab->ref.key_length))) + tab->ref.key_buff, make_prev_keypart_map(tab->ref.key_parts)))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) return report_error(table, error); @@ -11290,7 +11397,7 @@ join_read_first(JOIN_TAB *tab) { int error; TABLE *table=tab->table; - if (!table->key_read && table->used_keys.is_set(tab->index) && + if (!table->key_read && table->covering_keys.is_set(tab->index) && !table->no_keyread) { table->key_read=1; @@ -11329,7 +11436,7 @@ join_read_last(JOIN_TAB *tab) { TABLE *table=tab->table; int error; - if (!table->key_read && table->used_keys.is_set(tab->index) && + if (!table->key_read && table->covering_keys.is_set(tab->index) && !table->no_keyread) { table->key_read=1; @@ -11730,7 +11837,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), group->buff[-1]= (char) group->field->is_null(); } if (!table->file->index_read(table->record[1], - join->tmp_table_param.group_buff,0, + join->tmp_table_param.group_buff, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { /* Update old record */ restore_record(table,record[1]); @@ -12353,7 +12460,7 @@ find_field_in_item_list (Field *field, void *data) static bool test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, - bool no_changes) + bool no_changes, key_map *map) { int ref_key; uint ref_key_parts; @@ -12363,14 +12470,11 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, DBUG_ENTER("test_if_skip_sort_order"); LINT_INIT(ref_key_parts); - /* Check which keys can be used to resolve ORDER BY. */ - usable_keys= table->keys_in_use_for_query; - /* Keys disabled by ALTER TABLE ... DISABLE KEYS should have already been taken into account. */ - DBUG_ASSERT(usable_keys.is_subset(table->s->keys_in_use)); + usable_keys= *map; for (ORDER *tmp_order=order; tmp_order ; tmp_order=tmp_order->next) { @@ -12428,8 +12532,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, If using index only read, only consider other possible index only keys */ - if (table->used_keys.is_set(ref_key)) - usable_keys.intersect(table->used_keys); + if (table->covering_keys.is_set(ref_key)) + usable_keys.intersect(table->covering_keys); if ((new_ref_key= test_if_subkey(order, table, ref_key, ref_key_parts, &usable_keys)) < MAX_KEY) { @@ -12544,7 +12648,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, if (select_limit >= table->file->stats.records) { keys= *table->file->keys_to_use_for_scanning(); - keys.merge(table->used_keys); + keys.merge(table->covering_keys); /* We are adding here also the index specified in FORCE INDEX clause, @@ -12572,7 +12676,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, tab->read_first_record= (flag > 0 ? join_read_first: join_read_last); tab->type=JT_NEXT; // Read with index_first(), index_next() - if (table->used_keys.is_set(nr)) + if (table->covering_keys.is_set(nr)) { table->key_read=1; table->file->extra(HA_EXTRA_KEYREAD); @@ -12598,6 +12702,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, filesort_limit Max number of rows that needs to be sorted select_limit Max number of rows in final output Used to decide if we should use index or not + is_order_by true if we are sorting on ORDER BY, false if GROUP BY + Used to decide if we should use index or not IMPLEMENTATION @@ -12616,7 +12722,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, static int create_sort_index(THD *thd, JOIN *join, ORDER *order, - ha_rows filesort_limit, ha_rows select_limit) + ha_rows filesort_limit, ha_rows select_limit, + bool is_order_by) { uint length= 0; ha_rows examined_rows; @@ -12637,7 +12744,9 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, */ if ((order != join->group_list || !(join->select_options & SELECT_BIG_RESULT)) && - test_if_skip_sort_order(tab,order,select_limit,0)) + test_if_skip_sort_order(tab,order,select_limit,0, + is_order_by ? &table->keys_in_use_for_order_by : + &table->keys_in_use_for_group_by)) DBUG_RETURN(0); for (ORDER *ord= join->order; ord; ord= ord->next) length++; @@ -13407,7 +13516,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, Item **select_item; /* The corresponding item from the SELECT clause. */ Field *from_field; /* The corresponding field from the FROM clause. */ uint counter; - bool unaliased; + enum_resolution_type resolution; /* Local SP variables may be int but are expressions, not positions. @@ -13430,7 +13539,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, } /* Lookup the current GROUP/ORDER field in the SELECT clause. */ select_item= find_item_in_list(order_item, fields, &counter, - REPORT_EXCEPT_NOT_FOUND, &unaliased); + REPORT_EXCEPT_NOT_FOUND, &resolution); if (!select_item) return TRUE; /* The item is not unique, or some other error occured. */ @@ -13444,7 +13553,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, original field name, we should additionaly check if we have conflict for this name (in case if we would perform lookup in all tables). */ - if (unaliased && !order_item->fixed && + if (resolution == RESOLVED_BEHIND_ALIAS && !order_item->fixed && order_item->fix_fields(thd, order->item)) return TRUE; @@ -13514,16 +13623,11 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, We check order_item->fixed because Item_func_group_concat can put arguments for which fix_fields already was called. */ - thd->lex->current_select->is_item_list_lookup= 1; if (!order_item->fixed && (order_item->fix_fields(thd, order->item) || (order_item= *order->item)->check_cols(1) || thd->is_fatal_error)) - { - thd->lex->current_select->is_item_list_lookup= 0; return TRUE; /* Wrong field. */ - } - thd->lex->current_select->is_item_list_lookup= 0; uint el= all_fields.elements; all_fields.push_front(order_item); /* Add new field to field list. */ @@ -13675,7 +13779,7 @@ setup_new_fields(THD *thd, List<Item> &fields, { Item **item; uint counter; - bool not_used; + enum_resolution_type not_used; DBUG_ENTER("setup_new_fields"); thd->mark_used_columns= MARK_COLUMNS_READ; // Not really needed, but... @@ -13743,9 +13847,7 @@ create_distinct_group(THD *thd, Item **ref_pointer_array, ORDER *ord_iter; for (ord_iter= group; ord_iter; ord_iter= ord_iter->next) if ((*ord_iter->item)->eq(item, 1)) - break; - if (ord_iter) - continue; + goto next_item; ORDER *ord=(ORDER*) thd->calloc(sizeof(ORDER)); if (!ord) @@ -13760,6 +13862,7 @@ create_distinct_group(THD *thd, Item **ref_pointer_array, *prev=ord; prev= &ord->next; } +next_item: ref_pointer_array++; } *prev=0; @@ -13794,6 +13897,7 @@ count_field_types(TMP_TABLE_PARAM *param, List<Item> &fields, if (!sum_item->quick_group) param->quick_group=0; // UDF SUM function param->sum_func_count++; + param->func_count++; for (uint i=0 ; i < sum_item->arg_count ; i++) { @@ -15250,7 +15354,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, examined_rows=(ha_rows)join->best_positions[i].records_read; item_list.push_back(new Item_int((longlong) (ulonglong) examined_rows, - 21)); + MY_INT64_NUM_DECIMAL_DIGITS)); /* Add "filtered" field to item_list. */ if (join->thd->lex->describe & DESCRIBE_EXTENDED) @@ -15270,7 +15374,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, /* Build "Extra" field and add it to item_list. */ my_bool key_read=table->key_read; if ((tab->type == JT_NEXT || tab->type == JT_CONST) && - table->used_keys.is_set(tab->index)) + table->covering_keys.is_set(tab->index)) key_read=1; if (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT && !((QUICK_ROR_INTERSECT_SELECT*)tab->select->quick)->need_to_fetch_row) @@ -15635,10 +15739,13 @@ void st_select_lex::print(THD *thd, String *str) Item *cur_where= where; if (join) cur_where= join->conds; - if (cur_where) + if (cur_where || cond_value != Item::COND_UNDEF) { str->append(STRING_WITH_LEN(" where ")); - cur_where->print(str); + if (cur_where) + cur_where->print(str); + else + str->append(cond_value != Item::COND_FALSE ? "1" : "0"); } // group by & olap @@ -15664,10 +15771,13 @@ void st_select_lex::print(THD *thd, String *str) if (join) cur_having= join->having; - if (cur_having) + if (cur_having || having_value != Item::COND_UNDEF) { str->append(STRING_WITH_LEN(" having ")); - cur_having->print(str); + if (cur_having) + cur_having->print(str); + else + str->append(having_value != Item::COND_FALSE ? "1" : "0"); } if (order_list.elements) diff --git a/sql/sql_select.h b/sql/sql_select.h index 1d1fa666c60..ca37c7bd274 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -192,7 +192,7 @@ typedef struct st_join_table { JOIN *join; /* Bitmap of nested joins this table is part of */ nested_join_map embedding_map; - + void cleanup(); inline bool is_using_loose_index_scan() { @@ -331,7 +331,7 @@ public: bool need_tmp, hidden_group_fields; DYNAMIC_ARRAY keyuse; - Item::cond_result cond_value; + Item::cond_result cond_value, having_value; List<Item> all_fields; // to store all fields that used in query //Above list changed to use temporary table List<Item> tmp_all_fields1, tmp_all_fields2, tmp_all_fields3; @@ -527,15 +527,11 @@ extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b); class store_key :public Sql_alloc { - protected: - Field *to_field; // Store data here - char *null_ptr; - char err; public: bool null_key; /* TRUE <=> the value of the key has a null part */ enum store_key_result { STORE_KEY_OK, STORE_KEY_FATAL, STORE_KEY_CONV }; store_key(THD *thd, Field *field_arg, char *ptr, char *null, uint length) - :null_ptr(null), err(0), null_key(0) + :null_key(0), null_ptr(null), err(0) { if (field_arg->type() == MYSQL_TYPE_BLOB) { @@ -550,8 +546,35 @@ public: ptr, (uchar*) null, 1); } virtual ~store_key() {} /* Not actually needed */ - virtual enum store_key_result copy()=0; virtual const char *name() const=0; + + /** + @brief sets ignore truncation warnings mode and calls the real copy method + + @details this function makes sure truncation warnings when preparing the + key buffers don't end up as errors (because of an enclosing INSERT/UPDATE). + */ + enum store_key_result copy() + { + enum store_key_result result; + enum_check_fields saved_count_cuted_fields= + to_field->table->in_use->count_cuted_fields; + + to_field->table->in_use->count_cuted_fields= CHECK_FIELD_IGNORE; + + result= copy_inner(); + + to_field->table->in_use->count_cuted_fields= saved_count_cuted_fields; + + return result; + } + + protected: + Field *to_field; // Store data here + char *null_ptr; + char err; + + virtual enum store_key_result copy_inner()=0; }; @@ -571,7 +594,10 @@ class store_key_field: public store_key copy_field.set(to_field,from_field,0); } } - enum store_key_result copy() + const char *name() const { return field_name; } + + protected: + enum store_key_result copy_inner() { TABLE *table= copy_field.to_field->table; my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, @@ -581,7 +607,6 @@ class store_key_field: public store_key null_key= to_field->is_null(); return err != 0 ? STORE_KEY_FATAL : STORE_KEY_OK; } - const char *name() const { return field_name; } }; @@ -596,7 +621,10 @@ public: null_ptr_arg ? null_ptr_arg : item_arg->maybe_null ? &err : NullS, length), item(item_arg) {} - enum store_key_result copy() + const char *name() const { return "func"; } + + protected: + enum store_key_result copy_inner() { TABLE *table= to_field->table; my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, @@ -606,7 +634,6 @@ public: null_key= to_field->is_null() || item->null_value; return (err != 0 || res > 2 ? STORE_KEY_FATAL : (store_key_result) res); } - const char *name() const { return "func"; } }; @@ -622,7 +649,10 @@ public: &err : NullS, length, item_arg), inited(0) { } - enum store_key_result copy() + const char *name() const { return "const"; } + +protected: + enum store_key_result copy_inner() { int res; if (!inited) @@ -637,7 +667,6 @@ public: null_key= to_field->is_null() || item->null_value; return (err > 2 ? STORE_KEY_FATAL : (store_key_result) err); } - const char *name() const { return "const"; } }; bool cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref); diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc index 9a5176d9fe3..a62ce98850b 100644 --- a/sql/sql_servers.cc +++ b/sql/sql_servers.cc @@ -16,6 +16,21 @@ /* The servers are saved in the system table "servers" + + Currently, when the user performs an ALTER SERVER or a DROP SERVER + operation, it will cause all open tables which refer to the named + server connection to be flushed. This may cause some undesirable + behaviour with regard to currently running transactions. It is + expected that the DBA knows what s/he is doing when s/he performs + the ALTER SERVER or DROP SERVER operation. + + TODO: + It is desirable for us to implement a callback mechanism instead where + callbacks can be registered for specific server protocols. The callback + will be fired when such a server name has been created/altered/dropped + or when statistics are to be gathered such as how many actual connections. + Storage engines etc will be able to make use of the callback so that + currently running transactions etc will not be disrupted. */ #include "mysql_priv.h" @@ -25,14 +40,43 @@ #include "sp_head.h" #include "sp.h" -HASH servers_cache; -pthread_mutex_t servers_cache_mutex; // To init the hash -uint servers_cache_initialised=FALSE; -/* Version of server table. incremented by servers_load */ -static uint servers_version=0; +/* + We only use 1 mutex to guard the data structures - THR_LOCK_servers. + Read locked when only reading data and write-locked for all other access. +*/ + +static HASH servers_cache; static MEM_ROOT mem; static rw_lock_t THR_LOCK_servers; +static bool get_server_from_table_to_cache(TABLE *table); + +/* insert functions */ +static int insert_server(THD *thd, FOREIGN_SERVER *server_options); +static int insert_server_record(TABLE *table, FOREIGN_SERVER *server); +static int insert_server_record_into_cache(FOREIGN_SERVER *server); +static void prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options, + FOREIGN_SERVER *server); +/* drop functions */ +static int delete_server_record(TABLE *table, + char *server_name, + int server_name_length); +static int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options); + +/* update functions */ +static void prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options, + FOREIGN_SERVER *existing, + FOREIGN_SERVER *altered); +static int update_server(THD *thd, FOREIGN_SERVER *existing, + FOREIGN_SERVER *altered); +static int update_server_record(TABLE *table, FOREIGN_SERVER *server); +static int update_server_record_in_cache(FOREIGN_SERVER *existing, + FOREIGN_SERVER *altered); +/* utility functions */ +static void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to); + + + static byte *servers_cache_get_key(FOREIGN_SERVER *server, uint *length, my_bool not_used __attribute__((unused))) { @@ -45,6 +89,7 @@ static byte *servers_cache_get_key(FOREIGN_SERVER *server, uint *length, DBUG_RETURN((byte*) server->server_name); } + /* Initialize structures responsible for servers used in federated server scheme information for them from the server @@ -64,35 +109,27 @@ static byte *servers_cache_get_key(FOREIGN_SERVER *server, uint *length, 1 Could not initialize servers */ -my_bool servers_init(bool dont_read_servers_table) +bool servers_init(bool dont_read_servers_table) { THD *thd; - my_bool return_val= 0; + bool return_val= FALSE; DBUG_ENTER("servers_init"); /* init the mutex */ - if (pthread_mutex_init(&servers_cache_mutex, MY_MUTEX_INIT_FAST)) - DBUG_RETURN(1); - if (my_rwlock_init(&THR_LOCK_servers, NULL)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); /* initialise our servers cache */ if (hash_init(&servers_cache, system_charset_info, 32, 0, 0, (hash_get_key) servers_cache_get_key, 0, 0)) { - return_val= 1; /* we failed, out of memory? */ + return_val= TRUE; /* we failed, out of memory? */ goto end; } /* Initialize the mem root for data */ init_alloc_root(&mem, ACL_ALLOC_BLOCK_SIZE, 0); - /* - at this point, the cache is initialised, let it be known - */ - servers_cache_initialised= TRUE; - if (dont_read_servers_table) goto end; @@ -100,7 +137,7 @@ my_bool servers_init(bool dont_read_servers_table) To be able to run this from boot, we allocate a temporary THD */ if (!(thd=new THD)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); thd->thread_stack= (char*) &thd; thd->store_globals(); /* @@ -130,19 +167,13 @@ end: TRUE Error */ -static my_bool servers_load(THD *thd, TABLE_LIST *tables) +static bool servers_load(THD *thd, TABLE_LIST *tables) { TABLE *table; READ_RECORD read_record_info; - my_bool return_val= TRUE; + bool return_val= TRUE; DBUG_ENTER("servers_load"); - if (!servers_cache_initialised) - DBUG_RETURN(0); - - /* need to figure out how to utilise this variable */ - servers_version++; /* servers updated */ - /* first, send all cached rows to sleep with the fishes, oblivion! I expect this crappy comment replaced */ free_root(&mem, MYF(MY_MARK_BLOCKS_FREE)); @@ -156,7 +187,7 @@ static my_bool servers_load(THD *thd, TABLE_LIST *tables) goto end; } - return_val=0; + return_val= FALSE; end: end_read_record(&read_record_info); @@ -183,10 +214,10 @@ end: TRUE Failure */ -my_bool servers_reload(THD *thd) +bool servers_reload(THD *thd) { TABLE_LIST tables[1]; - my_bool return_val= 1; + bool return_val= TRUE; DBUG_ENTER("servers_reload"); if (thd->locked_tables) @@ -196,10 +227,9 @@ my_bool servers_reload(THD *thd) close_thread_tables(thd); } - /* - To avoid deadlocks we should obtain table locks before - obtaining servers_cache->lock mutex. - */ + DBUG_PRINT("info", ("locking servers_cache")); + rw_wrlock(&THR_LOCK_servers); + bzero((char*) tables, sizeof(tables)); tables[0].alias= tables[0].table_name= (char*) "servers"; tables[0].db= (char*) "mysql"; @@ -207,17 +237,11 @@ my_bool servers_reload(THD *thd) if (simple_open_n_lock_tables(thd, tables)) { - sql_print_error("Fatal error: Can't open and lock privilege tables: %s", + sql_print_error("Can't open and lock privilege tables: %s", thd->net.last_error); goto end; } - DBUG_PRINT("info", ("locking servers_cache")); - VOID(pthread_mutex_lock(&servers_cache_mutex)); - - //old_servers_cache= servers_cache; - //old_mem=mem; - if ((return_val= servers_load(thd, tables))) { // Error. Revert to old list /* blast, for now, we have no servers, discuss later way to preserve */ @@ -226,14 +250,14 @@ my_bool servers_reload(THD *thd) servers_free(); } - DBUG_PRINT("info", ("unlocking servers_cache")); - VOID(pthread_mutex_unlock(&servers_cache_mutex)); - end: close_thread_tables(thd); + DBUG_PRINT("info", ("unlocking servers_cache")); + rw_unlock(&THR_LOCK_servers); DBUG_RETURN(return_val); } + /* Initialize structures responsible for servers used in federated server scheme information for them from the server @@ -260,7 +284,8 @@ end: 1 could not insert server struct into global servers cache */ -my_bool get_server_from_table_to_cache(TABLE *table) +static bool +get_server_from_table_to_cache(TABLE *table) { /* alloc a server struct */ char *ptr; @@ -308,69 +333,6 @@ my_bool get_server_from_table_to_cache(TABLE *table) DBUG_RETURN(FALSE); } -/* - SYNOPSIS - server_exists_in_table() - THD *thd - thread pointer - LEX_SERVER_OPTIONS *server_options - pointer to Lex->server_options - - NOTES - This function takes a LEX_SERVER_OPTIONS struct, which is very much the - same type of structure as a FOREIGN_SERVER, it contains the values parsed - in any one of the [CREATE|DELETE|DROP] SERVER statements. Using the - member "server_name", index_read_idx either founds the record and returns - 1, or doesn't find the record, and returns 0 - - RETURN VALUES - 0 record not found - 1 record found -*/ - -my_bool server_exists_in_table(THD *thd, LEX_SERVER_OPTIONS *server_options) -{ - int result= 1; - int error= 0; - TABLE_LIST tables; - TABLE *table; - DBUG_ENTER("server_exists"); - - bzero((char*) &tables, sizeof(tables)); - tables.db= (char*) "mysql"; - tables.alias= tables.table_name= (char*) "servers"; - - /* need to open before acquiring THR_LOCK_plugin or it will deadlock */ - if (! (table= open_ltable(thd, &tables, TL_WRITE))) - DBUG_RETURN(TRUE); - - table->use_all_columns(); - - rw_wrlock(&THR_LOCK_servers); - VOID(pthread_mutex_lock(&servers_cache_mutex)); - - /* set the field that's the PK to the value we're looking for */ - table->field[0]->store(server_options->server_name, - server_options->server_name_length, - system_charset_info); - - if ((error= table->file->index_read_idx(table->record[0], 0, - (byte *)table->field[0]->ptr, - table->key_info[0].key_length, - HA_READ_KEY_EXACT))) - { - if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) - { - table->file->print_error(error, MYF(0)); - result= -1; - } - result= 0; - DBUG_PRINT("info",("record for server '%s' not found!", - server_options->server_name)); - } - - VOID(pthread_mutex_unlock(&servers_cache_mutex)); - rw_unlock(&THR_LOCK_servers); - DBUG_RETURN(result); -} /* SYNOPSIS @@ -382,15 +344,18 @@ my_bool server_exists_in_table(THD *thd, LEX_SERVER_OPTIONS *server_options) This function takes a server object that is has all members properly prepared, ready to be inserted both into the mysql.servers table and the servers cache. + + THR_LOCK_servers must be write locked. RETURN VALUES 0 - no error other - error code */ -int insert_server(THD *thd, FOREIGN_SERVER *server) +static int +insert_server(THD *thd, FOREIGN_SERVER *server) { - int error= 0; + int error= -1; TABLE_LIST tables; TABLE *table; @@ -402,13 +367,7 @@ int insert_server(THD *thd, FOREIGN_SERVER *server) /* need to open before acquiring THR_LOCK_plugin or it will deadlock */ if (! (table= open_ltable(thd, &tables, TL_WRITE))) - DBUG_RETURN(TRUE); - - /* lock mutex to make sure no changes happen */ - VOID(pthread_mutex_lock(&servers_cache_mutex)); - - /* lock table */ - rw_wrlock(&THR_LOCK_servers); + goto end; /* insert the server into the table */ if ((error= insert_server_record(table, server))) @@ -419,12 +378,10 @@ int insert_server(THD *thd, FOREIGN_SERVER *server) goto end; end: - /* unlock the table */ - rw_unlock(&THR_LOCK_servers); - VOID(pthread_mutex_unlock(&servers_cache_mutex)); DBUG_RETURN(error); } + /* SYNOPSIS int insert_server_record_into_cache() @@ -434,13 +391,16 @@ end: This function takes a FOREIGN_SERVER pointer to an allocated (root mem) and inserts it into the global servers cache + THR_LOCK_servers must be write locked. + RETURN VALUE 0 - no error >0 - error code */ -int insert_server_record_into_cache(FOREIGN_SERVER *server) +static int +insert_server_record_into_cache(FOREIGN_SERVER *server) { int error=0; DBUG_ENTER("insert_server_record_into_cache"); @@ -461,6 +421,7 @@ int insert_server_record_into_cache(FOREIGN_SERVER *server) DBUG_RETURN(error); } + /* SYNOPSIS store_server_fields() @@ -478,7 +439,8 @@ int insert_server_record_into_cache(FOREIGN_SERVER *server) */ -void store_server_fields(TABLE *table, FOREIGN_SERVER *server) +static void +store_server_fields(TABLE *table, FOREIGN_SERVER *server) { table->use_all_columns(); @@ -539,6 +501,7 @@ void store_server_fields(TABLE *table, FOREIGN_SERVER *server) */ +static int insert_server_record(TABLE *table, FOREIGN_SERVER *server) { int error; @@ -554,8 +517,7 @@ int insert_server_record(TABLE *table, FOREIGN_SERVER *server) /* read index until record is that specified in server_name */ if ((error= table->file->index_read_idx(table->record[0], 0, - (byte *)table->field[0]->ptr, - table->key_info[0].key_length, + (byte *)table->field[0]->ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT))) { /* if not found, err */ @@ -606,9 +568,11 @@ int insert_server_record(TABLE *table, FOREIGN_SERVER *server) int drop_server(THD *thd, LEX_SERVER_OPTIONS *server_options) { - int error= 0; + int error; TABLE_LIST tables; TABLE *table; + LEX_STRING name= { server_options->server_name, + server_options->server_name_length }; DBUG_ENTER("drop_server"); DBUG_PRINT("info", ("server name server->server_name %s", @@ -618,28 +582,35 @@ int drop_server(THD *thd, LEX_SERVER_OPTIONS *server_options) tables.db= (char*) "mysql"; tables.alias= tables.table_name= (char*) "servers"; - /* need to open before acquiring THR_LOCK_plugin or it will deadlock */ - if (! (table= open_ltable(thd, &tables, TL_WRITE))) - DBUG_RETURN(TRUE); - rw_wrlock(&THR_LOCK_servers); - VOID(pthread_mutex_lock(&servers_cache_mutex)); + /* hit the memory hit first */ + if ((error= delete_server_record_in_cache(server_options))) + goto end; - if ((error= delete_server_record(table, - server_options->server_name, - server_options->server_name_length))) + if (! (table= open_ltable(thd, &tables, TL_WRITE))) + { + error= my_errno; goto end; + } + error= delete_server_record(table, name.str, name.length); - if ((error= delete_server_record_in_cache(server_options))) - goto end; + /* close the servers table before we call closed_cached_connection_tables */ + close_thread_tables(thd); + + if (close_cached_connection_tables(thd, TRUE, &name)) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, "Server connection in use"); + } end: - VOID(pthread_mutex_unlock(&servers_cache_mutex)); rw_unlock(&THR_LOCK_servers); DBUG_RETURN(error); } + + /* SYNOPSIS @@ -658,10 +629,10 @@ end: */ -int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options) +static int +delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options) { - - int error= 0; + int error= ER_FOREIGN_SERVER_DOESNT_EXIST; FOREIGN_SERVER *server; DBUG_ENTER("delete_server_record_in_cache"); @@ -677,7 +648,7 @@ int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options) DBUG_PRINT("info", ("server_name %s length %d not found!", server_options->server_name, server_options->server_name_length)); - // what should be done if not found in the cache? + goto end; } /* We succeded in deletion of the server to the table, now delete @@ -687,14 +658,15 @@ int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options) server->server_name, server->server_name_length)); - if (server) - VOID(hash_delete(&servers_cache, (byte*) server)); - - servers_version++; /* servers updated */ + VOID(hash_delete(&servers_cache, (byte*) server)); + + error= 0; +end: DBUG_RETURN(error); } + /* SYNOPSIS @@ -714,6 +686,8 @@ int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options) table for the particular server via the call to update_server_record, and in the servers_cache via update_server_record_in_cache. + THR_LOCK_servers must be write locked. + RETURN VALUE 0 - no error >0 - error code @@ -722,7 +696,7 @@ int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options) int update_server(THD *thd, FOREIGN_SERVER *existing, FOREIGN_SERVER *altered) { - int error= 0; + int error; TABLE *table; TABLE_LIST tables; DBUG_ENTER("update_server"); @@ -732,19 +706,26 @@ int update_server(THD *thd, FOREIGN_SERVER *existing, FOREIGN_SERVER *altered) tables.alias= tables.table_name= (char*)"servers"; if (!(table= open_ltable(thd, &tables, TL_WRITE))) - DBUG_RETURN(1); + { + error= my_errno; + goto end; + } - rw_wrlock(&THR_LOCK_servers); if ((error= update_server_record(table, altered))) goto end; - update_server_record_in_cache(existing, altered); + error= update_server_record_in_cache(existing, altered); + + /* + Perform a reload so we don't have a 'hole' in our mem_root + */ + servers_load(thd, &tables); end: - rw_unlock(&THR_LOCK_servers); DBUG_RETURN(error); } + /* SYNOPSIS @@ -761,6 +742,8 @@ end: HASH, then the updated record inserted, in essence replacing the old record. + THR_LOCK_servers must be write locked. + RETURN VALUE 0 - no error 1 - error @@ -791,13 +774,13 @@ int update_server_record_in_cache(FOREIGN_SERVER *existing, { DBUG_PRINT("info", ("had a problem inserting server %s at %lx", altered->server_name, (long unsigned int) altered)); - error= 1; + error= ER_OUT_OF_RESOURCES; } - servers_version++; /* servers updated */ DBUG_RETURN(error); } + /* SYNOPSIS @@ -830,9 +813,9 @@ void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to) to->password= strdup_root(&mem, from->password); if (to->port == -1) to->port= from->port; - if (!to->socket) + if (!to->socket && from->socket) to->socket= strdup_root(&mem, from->socket); - if (!to->scheme) + if (!to->scheme && from->scheme) to->scheme= strdup_root(&mem, from->scheme); if (!to->owner) to->owner= strdup_root(&mem, from->owner); @@ -840,6 +823,7 @@ void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to) DBUG_VOID_RETURN; } + /* SYNOPSIS @@ -862,7 +846,9 @@ void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to) */ -int update_server_record(TABLE *table, FOREIGN_SERVER *server) + +static int +update_server_record(TABLE *table, FOREIGN_SERVER *server) { int error=0; DBUG_ENTER("update_server_record"); @@ -873,15 +859,11 @@ int update_server_record(TABLE *table, FOREIGN_SERVER *server) system_charset_info); if ((error= table->file->index_read_idx(table->record[0], 0, - (byte *)table->field[0]->ptr, - table->key_info[0].key_length, + (byte *)table->field[0]->ptr, ~(longlong)0, HA_READ_KEY_EXACT))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) - { table->file->print_error(error, MYF(0)); - error= 1; - } DBUG_PRINT("info",("server not found!")); error= ER_FOREIGN_SERVER_DOESNT_EXIST; } @@ -901,6 +883,7 @@ end: DBUG_RETURN(error); } + /* SYNOPSIS @@ -916,11 +899,11 @@ end: */ -int delete_server_record(TABLE *table, - char *server_name, - int server_name_length) +static int +delete_server_record(TABLE *table, + char *server_name, int server_name_length) { - int error= 0; + int error; DBUG_ENTER("delete_server_record"); table->use_all_columns(); @@ -928,15 +911,11 @@ int delete_server_record(TABLE *table, table->field[0]->store(server_name, server_name_length, system_charset_info); if ((error= table->file->index_read_idx(table->record[0], 0, - (byte *)table->field[0]->ptr, - table->key_info[0].key_length, + (byte *)table->field[0]->ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) - { table->file->print_error(error, MYF(0)); - error= 1; - } DBUG_PRINT("info",("server not found!")); error= ER_FOREIGN_SERVER_DOESNT_EXIST; } @@ -965,28 +944,35 @@ int delete_server_record(TABLE *table, int create_server(THD *thd, LEX_SERVER_OPTIONS *server_options) { - int error; + int error= ER_FOREIGN_SERVER_EXISTS; FOREIGN_SERVER *server; DBUG_ENTER("create_server"); DBUG_PRINT("info", ("server_options->server_name %s", server_options->server_name)); + rw_wrlock(&THR_LOCK_servers); + + /* hit the memory first */ + if (hash_search(&servers_cache, (byte*) server_options->server_name, + server_options->server_name_length)) + goto end; + server= (FOREIGN_SERVER *)alloc_root(&mem, sizeof(FOREIGN_SERVER)); - if ((error= prepare_server_struct_for_insert(server_options, server))) - goto end; + prepare_server_struct_for_insert(server_options, server); - if ((error= insert_server(thd, server))) - goto end; + error= insert_server(thd, server); DBUG_PRINT("info", ("error returned %d", error)); end: + rw_unlock(&THR_LOCK_servers); DBUG_RETURN(error); } + /* SYNOPSIS @@ -1003,37 +989,44 @@ end: int alter_server(THD *thd, LEX_SERVER_OPTIONS *server_options) { - int error= 0; + int error= ER_FOREIGN_SERVER_DOESNT_EXIST; FOREIGN_SERVER *altered, *existing; + LEX_STRING name= { server_options->server_name, + server_options->server_name_length }; DBUG_ENTER("alter_server"); DBUG_PRINT("info", ("server_options->server_name %s", server_options->server_name)); + rw_wrlock(&THR_LOCK_servers); + + if (!(existing= (FOREIGN_SERVER *) hash_search(&servers_cache, + (byte*) name.str, + name.length))) + goto end; + altered= (FOREIGN_SERVER *)alloc_root(&mem, sizeof(FOREIGN_SERVER)); - VOID(pthread_mutex_lock(&servers_cache_mutex)); + prepare_server_struct_for_update(server_options, existing, altered); - if (!(existing= (FOREIGN_SERVER *) hash_search(&servers_cache, - (byte*) server_options->server_name, - server_options->server_name_length))) - { - error= ER_FOREIGN_SERVER_DOESNT_EXIST; - goto end; - } + error= update_server(thd, existing, altered); - if ((error= prepare_server_struct_for_update(server_options, existing, altered))) - goto end; + /* close the servers table before we call closed_cached_connection_tables */ + close_thread_tables(thd); - if ((error= update_server(thd, existing, altered))) - goto end; + if (close_cached_connection_tables(thd, FALSE, &name)) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, "Server connection in use"); + } end: DBUG_PRINT("info", ("error returned %d", error)); - VOID(pthread_mutex_unlock(&servers_cache_mutex)); + rw_unlock(&THR_LOCK_servers); DBUG_RETURN(error); } + /* SYNOPSIS @@ -1044,19 +1037,17 @@ end: NOTES RETURN VALUE - 0 - no error + none */ -int prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options, - FOREIGN_SERVER *server) +static void +prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options, + FOREIGN_SERVER *server) { - int error; char *unset_ptr= (char*)""; DBUG_ENTER("prepare_server_struct"); - error= 0; - /* these two MUST be set */ server->server_name= strdup_root(&mem, server_options->server_name); server->server_name_length= server_options->server_name_length; @@ -1086,7 +1077,7 @@ int prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options, server->owner= server_options->owner ? strdup_root(&mem, server_options->owner) : unset_ptr; - DBUG_RETURN(error); + DBUG_VOID_RETURN; } /* @@ -1102,13 +1093,12 @@ int prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options, */ -int prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options, - FOREIGN_SERVER *existing, - FOREIGN_SERVER *altered) +static void +prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options, + FOREIGN_SERVER *existing, + FOREIGN_SERVER *altered) { - int error; DBUG_ENTER("prepare_server_struct_for_update"); - error= 0; altered->server_name= strdup_root(&mem, server_options->server_name); altered->server_name_length= server_options->server_name_length; @@ -1159,7 +1149,7 @@ int prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options, (strcmp(server_options->owner, existing->owner))) ? strdup_root(&mem, server_options->owner) : 0; - DBUG_RETURN(error); + DBUG_VOID_RETURN; } /* @@ -1178,16 +1168,65 @@ int prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options, void servers_free(bool end) { DBUG_ENTER("servers_free"); - if (!servers_cache_initialised) + if (!hash_inited(&servers_cache)) + DBUG_VOID_RETURN; + if (!end) + { + free_root(&mem, MYF(MY_MARK_BLOCKS_FREE)); + my_hash_reset(&servers_cache); DBUG_VOID_RETURN; - VOID(pthread_mutex_destroy(&servers_cache_mutex)); - servers_cache_initialised=0; + } + rwlock_destroy(&THR_LOCK_servers); free_root(&mem,MYF(0)); hash_free(&servers_cache); DBUG_VOID_RETURN; } +/* + SYNOPSIS + + clone_server(MEM_ROOT *mem_root, FOREIGN_SERVER *orig, FOREIGN_SERVER *buff) + + Create a clone of FOREIGN_SERVER. If the supplied mem_root is of + thd->mem_root then the copy is automatically disposed at end of statement. + + NOTES + + ARGS + MEM_ROOT pointer (strings are copied into this mem root) + FOREIGN_SERVER pointer (made a copy of) + FOREIGN_SERVER buffer (if not-NULL, this pointer is returned) + + RETURN VALUE + FOREIGN_SEVER pointer (copy of one supplied FOREIGN_SERVER) +*/ + +static FOREIGN_SERVER *clone_server(MEM_ROOT *mem, const FOREIGN_SERVER *server, + FOREIGN_SERVER *buffer) +{ + DBUG_ENTER("sql_server.cc:clone_server"); + + if (!buffer) + buffer= (FOREIGN_SERVER *) alloc_root(mem, sizeof(FOREIGN_SERVER)); + + buffer->server_name= strmake_root(mem, server->server_name, + server->server_name_length); + buffer->port= server->port; + buffer->server_name_length= server->server_name_length; + + /* TODO: We need to examine which of these can really be NULL */ + buffer->db= server->db ? strdup_root(mem, server->db) : NULL; + buffer->scheme= server->scheme ? strdup_root(mem, server->scheme) : NULL; + buffer->username= server->username? strdup_root(mem, server->username): NULL; + buffer->password= server->password? strdup_root(mem, server->password): NULL; + buffer->socket= server->socket ? strdup_root(mem, server->socket) : NULL; + buffer->owner= server->owner ? strdup_root(mem, server->owner) : NULL; + buffer->host= server->host ? strdup_root(mem, server->host) : NULL; + + DBUG_RETURN(buffer); +} + /* @@ -1202,11 +1241,11 @@ void servers_free(bool end) */ -FOREIGN_SERVER *get_server_by_name(const char *server_name) +FOREIGN_SERVER *get_server_by_name(MEM_ROOT *mem, const char *server_name, + FOREIGN_SERVER *buff) { - ulong error_num=0; uint server_name_length; - FOREIGN_SERVER *server= 0; + FOREIGN_SERVER *server; DBUG_ENTER("get_server_by_name"); DBUG_PRINT("info", ("server_name %s", server_name)); @@ -1215,12 +1254,11 @@ FOREIGN_SERVER *get_server_by_name(const char *server_name) if (! server_name || !strlen(server_name)) { DBUG_PRINT("info", ("server_name not defined!")); - error_num= 1; DBUG_RETURN((FOREIGN_SERVER *)NULL); } DBUG_PRINT("info", ("locking servers_cache")); - VOID(pthread_mutex_lock(&servers_cache_mutex)); + rw_rdlock(&THR_LOCK_servers); if (!(server= (FOREIGN_SERVER *) hash_search(&servers_cache, (byte*) server_name, server_name_length))) @@ -1229,8 +1267,12 @@ FOREIGN_SERVER *get_server_by_name(const char *server_name) server_name, server_name_length)); server= (FOREIGN_SERVER *) NULL; } + /* otherwise, make copy of server */ + else + server= clone_server(mem, server, buff); + DBUG_PRINT("info", ("unlocking servers_cache")); - VOID(pthread_mutex_unlock(&servers_cache_mutex)); + rw_unlock(&THR_LOCK_servers); DBUG_RETURN(server); } diff --git a/sql/sql_servers.h b/sql/sql_servers.h index 23b8cefd5bb..63c691893d1 100644 --- a/sql/sql_servers.h +++ b/sql/sql_servers.h @@ -25,40 +25,19 @@ typedef struct st_federated_server } FOREIGN_SERVER; /* cache handlers */ -my_bool servers_init(bool dont_read_server_table); -my_bool servers_reload(THD *thd); -my_bool get_server_from_table_to_cache(TABLE *table); +bool servers_init(bool dont_read_server_table); +bool servers_reload(THD *thd); void servers_free(bool end=0); /* insert functions */ int create_server(THD *thd, LEX_SERVER_OPTIONS *server_options); -int insert_server(THD *thd, FOREIGN_SERVER *server_options); -int insert_server_record(TABLE *table, FOREIGN_SERVER *server); -int insert_server_record_into_cache(FOREIGN_SERVER *server); -void store_server_fields_for_insert(TABLE *table, FOREIGN_SERVER *server); -void store_server_fields_for_insert(TABLE *table, - FOREIGN_SERVER *existing, - FOREIGN_SERVER *altered); -int prepare_server_struct_for_insert(LEX_SERVER_OPTIONS *server_options, - FOREIGN_SERVER *server); /* drop functions */ int drop_server(THD *thd, LEX_SERVER_OPTIONS *server_options); -int delete_server_record(TABLE *table, - char *server_name, - int server_name_length); -int delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options); /* update functions */ int alter_server(THD *thd, LEX_SERVER_OPTIONS *server_options); -int prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options, - FOREIGN_SERVER *existing, - FOREIGN_SERVER *altered); -int update_server(THD *thd, FOREIGN_SERVER *existing, FOREIGN_SERVER *altered); -int update_server_record(TABLE *table, FOREIGN_SERVER *server); -int update_server_record_in_cache(FOREIGN_SERVER *existing, - FOREIGN_SERVER *altered); -/* utility functions */ -void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to); -FOREIGN_SERVER *get_server_by_name(const char *server_name); -my_bool server_exists_in_table(THD *thd, char *server_name); + +/* lookup functions */ +FOREIGN_SERVER *get_server_by_name(MEM_ROOT *mem, const char *server_name, + FOREIGN_SERVER *server_buffer); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index a91eb28b191..b28077300f3 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -38,6 +38,7 @@ enum enum_i_s_events_fields ISE_EVENT_SCHEMA, ISE_EVENT_NAME, ISE_DEFINER, + ISE_TIME_ZONE, ISE_EVENT_BODY, ISE_EVENT_DEFINITION, ISE_EVENT_TYPE, @@ -52,7 +53,8 @@ enum enum_i_s_events_fields ISE_CREATED, ISE_LAST_ALTERED, ISE_LAST_EXECUTED, - ISE_EVENT_COMMENT + ISE_EVENT_COMMENT, + ISE_ORIGINATOR }; @@ -360,7 +362,7 @@ bool mysqld_show_privileges(THD *thd) field_list.push_back(new Item_empty_string("Privilege",10)); field_list.push_back(new Item_empty_string("Context",15)); - field_list.push_back(new Item_empty_string("Comment",NAME_LEN)); + field_list.push_back(new Item_empty_string("Comment",NAME_CHAR_LEN)); if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) @@ -424,7 +426,8 @@ bool mysqld_show_column_types(THD *thd) DBUG_ENTER("mysqld_show_column_types"); field_list.push_back(new Item_empty_string("Type",30)); - field_list.push_back(new Item_int("Size",(longlong) 1,21)); + field_list.push_back(new Item_int("Size",(longlong) 1, + MY_INT64_NUM_DECIMAL_DIGITS)); field_list.push_back(new Item_empty_string("Min_Value",20)); field_list.push_back(new Item_empty_string("Max_Value",20)); field_list.push_back(new Item_return_int("Prec", 4, MYSQL_TYPE_SHORT)); @@ -435,8 +438,8 @@ bool mysqld_show_column_types(THD *thd) field_list.push_back(new Item_empty_string("Zerofill",4)); field_list.push_back(new Item_empty_string("Searchable",4)); field_list.push_back(new Item_empty_string("Case_Sensitive",4)); - field_list.push_back(new Item_empty_string("Default",NAME_LEN)); - field_list.push_back(new Item_empty_string("Comment",NAME_LEN)); + field_list.push_back(new Item_empty_string("Default",NAME_CHAR_LEN)); + field_list.push_back(new Item_empty_string("Comment",NAME_CHAR_LEN)); if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) @@ -518,7 +521,7 @@ find_files(THD *thd, List<char> *files, const char *db, for (i=0 ; i < (uint) dirp->number_off_files ; i++) { - char uname[NAME_LEN*3+1]; /* Unencoded name */ + char uname[NAME_LEN + 1]; /* Unencoded name */ file=dirp->dir_entry+i; if (dir) { /* Return databases */ @@ -648,13 +651,13 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list) List<Item> field_list; if (table_list->view) { - field_list.push_back(new Item_empty_string("View",NAME_LEN)); + field_list.push_back(new Item_empty_string("View",NAME_CHAR_LEN)); field_list.push_back(new Item_empty_string("Create View", max(buffer.length(),1024))); } else { - field_list.push_back(new Item_empty_string("Table",NAME_LEN)); + field_list.push_back(new Item_empty_string("Table",NAME_CHAR_LEN)); // 1024 is for not to confuse old clients field_list.push_back(new Item_empty_string("Create Table", max(buffer.length(),1024))); @@ -712,9 +715,9 @@ bool mysqld_show_create_db(THD *thd, char *dbname, } #endif if (!my_strcasecmp(system_charset_info, dbname, - information_schema_name.str)) + INFORMATION_SCHEMA_NAME.str)) { - dbname= information_schema_name.str; + dbname= INFORMATION_SCHEMA_NAME.str; create.default_table_charset= system_charset_info; } else @@ -728,7 +731,7 @@ bool mysqld_show_create_db(THD *thd, char *dbname, load_db_opt_by_name(thd, dbname, &create); } List<Item> field_list; - field_list.push_back(new Item_empty_string("Database",NAME_LEN)); + field_list.push_back(new Item_empty_string("Database",NAME_CHAR_LEN)); field_list.push_back(new Item_empty_string("Create Database",1024)); if (protocol->send_fields(&field_list, @@ -1221,7 +1224,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, if (key_part->field && (key_part->length != table->field[key_part->fieldnr-1]->key_length() && - !(key_info->flags & HA_FULLTEXT))) + !(key_info->flags & (HA_FULLTEXT | HA_SPATIAL)))) { char *end; buff[0] = '('; @@ -1620,10 +1623,10 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) Protocol *protocol= thd->protocol; DBUG_ENTER("mysqld_list_processes"); - field_list.push_back(new Item_int("Id",0,11)); + field_list.push_back(new Item_int("Id", 0, MY_INT32_NUM_DECIMAL_DIGITS)); field_list.push_back(new Item_empty_string("User",16)); field_list.push_back(new Item_empty_string("Host",LIST_PROCESS_HOST_LEN)); - field_list.push_back(field=new Item_empty_string("db",NAME_LEN)); + field_list.push_back(field=new Item_empty_string("db",NAME_CHAR_LEN)); field->maybe_null=1; field_list.push_back(new Item_empty_string("Command",16)); field_list.push_back(new Item_return_int("Time",7, MYSQL_TYPE_LONG)); @@ -1797,7 +1800,7 @@ int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond) else table->field[4]->store(command_name[tmp->command].str, command_name[tmp->command].length, cs); - /* TIME */ + /* MYSQL_TIME */ table->field[5]->store((uint32)(tmp->start_time ? now - tmp->start_time : 0), TRUE); /* STATE */ @@ -2194,7 +2197,7 @@ LEX_STRING *make_lex_string(THD *thd, LEX_STRING *lex_str, /* INFORMATION_SCHEMA name */ -LEX_STRING information_schema_name= { C_STRING_WITH_LEN("information_schema")}; +LEX_STRING INFORMATION_SCHEMA_NAME= { C_STRING_WITH_LEN("information_schema")}; /* This is only used internally, but we need it here as a forward reference */ extern ST_SCHEMA_TABLE schema_tables[]; @@ -2266,8 +2269,7 @@ int make_table_list(THD *thd, SELECT_LEX *sel, ident_table.length= strlen(table); table_ident= new Table_ident(thd, ident_db, ident_table, 1); sel->init_query(); - if (!sel->add_table_to_list(thd, table_ident, 0, 0, TL_READ, - (List<String> *) 0, (List<String> *) 0)) + if (!sel->add_table_to_list(thd, table_ident, 0, 0, TL_READ)) return 1; return 0; } @@ -2411,11 +2413,11 @@ int make_db_list(THD *thd, List<char> *files, */ if (!idx_field_vals->db_value || !wild_case_compare(system_charset_info, - information_schema_name.str, + INFORMATION_SCHEMA_NAME.str, idx_field_vals->db_value)) { *with_i_schema= 1; - if (files->push_back(thd->strdup(information_schema_name.str))) + if (files->push_back(thd->strdup(INFORMATION_SCHEMA_NAME.str))) return 1; } return (find_files(thd, files, NullS, mysql_data_home, @@ -2429,11 +2431,11 @@ int make_db_list(THD *thd, List<char> *files, */ if (sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) { - if (!my_strcasecmp(system_charset_info, information_schema_name.str, + if (!my_strcasecmp(system_charset_info, INFORMATION_SCHEMA_NAME.str, idx_field_vals->db_value)) { *with_i_schema= 1; - return files->push_back(thd->strdup(information_schema_name.str)); + return files->push_back(thd->strdup(INFORMATION_SCHEMA_NAME.str)); } return files->push_back(thd->strdup(idx_field_vals->db_value)); } @@ -2442,7 +2444,7 @@ int make_db_list(THD *thd, List<char> *files, Create list of existing databases. It is used in case of select from information schema table */ - if (files->push_back(thd->strdup(information_schema_name.str))) + if (files->push_back(thd->strdup(INFORMATION_SCHEMA_NAME.str))) return 1; *with_i_schema= 1; return (find_files(thd, files, NullS, @@ -2562,7 +2564,7 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) */ thd->reset_n_backup_open_tables_state(&open_tables_state_backup); - if (lsel) + if (lsel && lsel->table_list.first) { TABLE_LIST *show_table_list= (TABLE_LIST*) lsel->table_list.first; bool res; @@ -2731,18 +2733,32 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) res= open_normal_and_derived_tables(thd, show_table_list, MYSQL_LOCK_IGNORE_FLUSH); lex->sql_command= save_sql_command; - /* - We should use show_table_list->alias instead of - show_table_list->table_name because table_name - could be changed during opening of I_S tables. It's safe - to use alias because alias contains original table name - in this case. + /* + They can drop table after table names list creation and + before table opening. We open non existing table and + get ER_NO_SUCH_TABLE error. In this case we do not store + the record into I_S table and clear error. */ - res= schema_table->process_table(thd, show_table_list, table, - res, orig_base_name, - show_table_list->alias); - close_tables_for_reopen(thd, &show_table_list); - DBUG_ASSERT(!lex->query_tables_own_last); + if (thd->net.last_errno == ER_NO_SUCH_TABLE) + { + res= 0; + thd->clear_error(); + } + else + { + /* + We should use show_table_list->alias instead of + show_table_list->table_name because table_name + could be changed during opening of I_S tables. It's safe + to use alias because alias contains original table name + in this case. + */ + res= schema_table->process_table(thd, show_table_list, table, + res, orig_base_name, + show_table_list->alias); + close_tables_for_reopen(thd, &show_table_list); + DBUG_ASSERT(!lex->query_tables_own_last); + } if (res) goto err; } @@ -2835,7 +2851,7 @@ static int get_schema_tables_record(THD *thd, struct st_table_list *tables, const char *file_name) { const char *tmp_buff; - TIME time; + MYSQL_TIME time; CHARSET_INFO *cs= system_charset_info; DBUG_ENTER("get_schema_tables_record"); @@ -3394,7 +3410,7 @@ bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table, { String tmp_string; String sp_db, sp_name, definer; - TIME time; + MYSQL_TIME time; LEX *lex= thd->lex; CHARSET_INFO *cs= system_charset_info; get_field(thd->mem_root, proc_table->field[0], &sp_db); @@ -3511,7 +3527,7 @@ int fill_schema_proc(THD *thd, TABLE_LIST *tables, COND *cond) err: proc_table->file->ha_index_end(); - close_proc_table(thd, &open_tables_state_backup); + close_system_tables(thd, &open_tables_state_backup); DBUG_RETURN(res); } @@ -3759,8 +3775,7 @@ static bool store_trigger(THD *thd, TABLE *table, const char *db, LEX_STRING *definer_buffer) { CHARSET_INFO *cs= system_charset_info; - byte *sql_mode_str; - ulong sql_mode_len; + LEX_STRING sql_mode_rep; restore_record(table, s->default_values); table->field[1]->store(db, strlen(db), cs); @@ -3776,11 +3791,9 @@ static bool store_trigger(THD *thd, TABLE *table, const char *db, table->field[14]->store(STRING_WITH_LEN("OLD"), cs); table->field[15]->store(STRING_WITH_LEN("NEW"), cs); - sql_mode_str= - sys_var_thd_sql_mode::symbolic_mode_representation(thd, - sql_mode, - &sql_mode_len); - table->field[17]->store((const char*)sql_mode_str, sql_mode_len, cs); + sys_var_thd_sql_mode::symbolic_mode_representation(thd, sql_mode, + &sql_mode_rep); + table->field[17]->store(sql_mode_rep.str, sql_mode_rep.length, cs); table->field[18]->store((const char *)definer_buffer->str, definer_buffer->length, cs); return schema_table_store_record(thd, table); } @@ -3966,7 +3979,7 @@ static void store_schema_partitions_record(THD *thd, TABLE *schema_table, TABLE* table= schema_table; CHARSET_INFO *cs= system_charset_info; PARTITION_INFO stat_info; - TIME time; + MYSQL_TIME time; file->get_dynamic_partition_info(&stat_info, part_id); table->field[12]->store((longlong) stat_info.records, TRUE); table->field[13]->store((longlong) stat_info.mean_rec_length, TRUE); @@ -4304,15 +4317,15 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) { const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS; CHARSET_INFO *scs= system_charset_info; - TIME time; + MYSQL_TIME time; Event_timed et; - DBUG_ENTER("fill_events_copy_to_schema_tab"); + DBUG_ENTER("copy_event_to_schema_table"); restore_record(sch_table, s->default_values); - if (et.load_from_row(event_table)) + if (et.load_from_row(thd, event_table)) { - my_error(ER_CANNOT_LOAD_FROM_TABLE, MYF(0)); + my_error(ER_CANNOT_LOAD_FROM_TABLE, MYF(0), event_table->alias); DBUG_RETURN(1); } @@ -4337,6 +4350,9 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) store(et.name.str, et.name.length, scs); sch_table->field[ISE_DEFINER]-> store(et.definer.str, et.definer.length, scs); + const String *tz_name= et.time_zone->get_name(); + sch_table->field[ISE_TIME_ZONE]-> + store(tz_name->ptr(), tz_name->length(), scs); sch_table->field[ISE_EVENT_BODY]-> store(STRING_WITH_LEN("SQL"), scs); sch_table->field[ISE_EVENT_DEFINITION]-> @@ -4344,15 +4360,15 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) /* SQL_MODE */ { - byte *sql_mode_str; - ulong sql_mode_len= 0; - sql_mode_str= - sys_var_thd_sql_mode::symbolic_mode_representation(thd, et.sql_mode, - &sql_mode_len); + LEX_STRING sql_mode; + sys_var_thd_sql_mode::symbolic_mode_representation(thd, et.sql_mode, + &sql_mode); sch_table->field[ISE_SQL_MODE]-> - store((const char*)sql_mode_str, sql_mode_len, scs); + store(sql_mode.str, sql_mode.length, scs); } + int not_used=0; + if (et.expression) { String show_str; @@ -4372,15 +4388,17 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) sch_table->field[ISE_INTERVAL_FIELD]->store(ival->str, ival->length, scs); /* starts & ends . STARTS is always set - see sql_yacc.yy */ + et.time_zone->gmt_sec_to_TIME(&time, et.starts); sch_table->field[ISE_STARTS]->set_notnull(); sch_table->field[ISE_STARTS]-> - store_time(&et.starts, MYSQL_TIMESTAMP_DATETIME); + store_time(&time, MYSQL_TIMESTAMP_DATETIME); if (!et.ends_null) { + et.time_zone->gmt_sec_to_TIME(&time, et.ends); sch_table->field[ISE_ENDS]->set_notnull(); sch_table->field[ISE_ENDS]-> - store_time(&et.ends, MYSQL_TIMESTAMP_DATETIME); + store_time(&time, MYSQL_TIMESTAMP_DATETIME); } } else @@ -4388,16 +4406,30 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) /* type */ sch_table->field[ISE_EVENT_TYPE]->store(STRING_WITH_LEN("ONE TIME"), scs); + et.time_zone->gmt_sec_to_TIME(&time, et.execute_at); sch_table->field[ISE_EXECUTE_AT]->set_notnull(); sch_table->field[ISE_EXECUTE_AT]-> - store_time(&et.execute_at, MYSQL_TIMESTAMP_DATETIME); + store_time(&time, MYSQL_TIMESTAMP_DATETIME); } /* status */ - if (et.status == Event_timed::ENABLED) - sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("ENABLED"), scs); - else - sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("DISABLED"), scs); + + switch (et.status) + { + case Event_timed::ENABLED: + sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("ENABLED"), scs); + break; + case Event_timed::SLAVESIDE_DISABLED: + sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("SLAVESIDE_DISABLED"), + scs); + break; + case Event_timed::DISABLED: + sch_table->field[ISE_STATUS]->store(STRING_WITH_LEN("DISABLED"), scs); + break; + default: + DBUG_ASSERT(0); + } + sch_table->field[ISE_ORIGINATOR]->store(et.originator, TRUE); /* on_completion */ if (et.on_completion == Event_timed::ON_COMPLETION_DROP) @@ -4407,7 +4439,6 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) sch_table->field[ISE_ON_COMPLETION]-> store(STRING_WITH_LEN("PRESERVE"), scs); - int not_used=0; number_to_datetime(et.created, &time, 0, ¬_used); DBUG_ASSERT(not_used==0); sch_table->field[ISE_CREATED]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); @@ -4417,11 +4448,12 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) sch_table->field[ISE_LAST_ALTERED]-> store_time(&time, MYSQL_TIMESTAMP_DATETIME); - if (et.last_executed.year) + if (et.last_executed) { + et.time_zone->gmt_sec_to_TIME(&time, et.last_executed); sch_table->field[ISE_LAST_EXECUTED]->set_notnull(); sch_table->field[ISE_LAST_EXECUTED]-> - store_time(&et.last_executed, MYSQL_TIMESTAMP_DATETIME); + store_time(&time, MYSQL_TIMESTAMP_DATETIME); } sch_table->field[ISE_EVENT_COMMENT]-> @@ -4686,6 +4718,12 @@ TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list) DBUG_RETURN(0); } break; + case MYSQL_TYPE_FLOAT: + case MYSQL_TYPE_DOUBLE: + if ((item= new Item_float(fields_info->field_name, 0.0, NOT_FIXED_DEC, + fields_info->field_length)) == NULL) + DBUG_RETURN(NULL); + break; case MYSQL_TYPE_DECIMAL: if (!(item= new Item_decimal((longlong) fields_info->value, false))) { @@ -4702,6 +4740,9 @@ TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list) strlen(fields_info->field_name), cs); break; default: + /* Don't let unimplemented types pass through. Could be a grave error. */ + DBUG_ASSERT(fields_info->field_type == MYSQL_TYPE_STRING); + /* this should be changed when Item_empty_string is fixed(in 4.1) */ if (!(item= new Item_empty_string("", 0, cs))) { @@ -4936,9 +4977,7 @@ int mysql_schema_table(THD *thd, LEX *lex, TABLE_LIST *table_list) TABLE *table; DBUG_ENTER("mysql_schema_table"); if (!(table= table_list->schema_table->create_table(thd, table_list))) - { DBUG_RETURN(1); - } table->s->tmp_table= SYSTEM_TMP_TABLE; table->grant.privilege= SELECT_ACL; /* @@ -5027,14 +5066,13 @@ int make_schema_select(THD *thd, SELECT_LEX *sel, We have to make non const db_name & table_name because of lower_case_table_names */ - make_lex_string(thd, &db, information_schema_name.str, - information_schema_name.length, 0); + make_lex_string(thd, &db, INFORMATION_SCHEMA_NAME.str, + INFORMATION_SCHEMA_NAME.length, 0); make_lex_string(thd, &table, schema_table->table_name, strlen(schema_table->table_name), 0); if (schema_table->old_format(thd, schema_table) || /* Handle old syntax */ !sel->add_table_to_list(thd, new Table_ident(thd, db, table, 0), - 0, 0, TL_READ, (List<String> *) 0, - (List<String> *) 0)) + 0, 0, TL_READ)) { DBUG_RETURN(1); } @@ -5321,7 +5359,7 @@ int fill_schema_session_variables(THD *thd, TABLE_LIST *tables, COND *cond) ST_FIELD_INFO schema_fields_info[]= { {"CATALOG_NAME", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"SCHEMA_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Database"}, + {"SCHEMA_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Database"}, {"DEFAULT_CHARACTER_SET_NAME", 64, MYSQL_TYPE_STRING, 0, 0, 0}, {"DEFAULT_COLLATION_NAME", 64, MYSQL_TYPE_STRING, 0, 0, 0}, {"SQL_PATH", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, @@ -5332,24 +5370,29 @@ ST_FIELD_INFO schema_fields_info[]= ST_FIELD_INFO tables_fields_info[]= { {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"TABLE_SCHEMA",NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Name"}, - {"TABLE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"ENGINE", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, "Engine"}, - {"VERSION", 21 , MYSQL_TYPE_LONG, 0, 1, "Version"}, + {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Name"}, + {"TABLE_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"ENGINE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, "Engine"}, + {"VERSION", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, "Version"}, {"ROW_FORMAT", 10, MYSQL_TYPE_STRING, 0, 1, "Row_format"}, - {"TABLE_ROWS", 21 , MYSQL_TYPE_LONG, 0, 1, "Rows"}, - {"AVG_ROW_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, "Avg_row_length"}, - {"DATA_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, "Data_length"}, - {"MAX_DATA_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, "Max_data_length"}, - {"INDEX_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, "Index_length"}, - {"DATA_FREE", 21 , MYSQL_TYPE_LONG, 0, 1, "Data_free"}, - {"AUTO_INCREMENT", 21 , MYSQL_TYPE_LONG, 0, 1, "Auto_increment"}, + {"TABLE_ROWS", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, "Rows"}, + {"AVG_ROW_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, + "Avg_row_length"}, + {"DATA_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, + "Data_length"}, + {"MAX_DATA_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, + "Max_data_length"}, + {"INDEX_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, + "Index_length"}, + {"DATA_FREE", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, "Data_free"}, + {"AUTO_INCREMENT", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, + "Auto_increment"}, {"CREATE_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Create_time"}, {"UPDATE_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Update_time"}, {"CHECK_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Check_time"}, {"TABLE_COLLATION", 64, MYSQL_TYPE_STRING, 0, 1, "Collation"}, - {"CHECKSUM", 21 , MYSQL_TYPE_LONG, 0, 1, "Checksum"}, + {"CHECKSUM", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, "Checksum"}, {"CREATE_OPTIONS", 255, MYSQL_TYPE_STRING, 0, 1, "Create_options"}, {"TABLE_COMMENT", 80, MYSQL_TYPE_STRING, 0, 0, "Comment"}, {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} @@ -5359,17 +5402,18 @@ ST_FIELD_INFO tables_fields_info[]= ST_FIELD_INFO columns_fields_info[]= { {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"COLUMN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Field"}, - {"ORDINAL_POSITION", 21 , MYSQL_TYPE_LONG, 0, 0, 0}, + {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"COLUMN_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Field"}, + {"ORDINAL_POSITION", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 0, 0}, {"COLUMN_DEFAULT", MAX_FIELD_VARCHARLENGTH, MYSQL_TYPE_STRING, 0, 1, "Default"}, {"IS_NULLABLE", 3, MYSQL_TYPE_STRING, 0, 0, "Null"}, - {"DATA_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"CHARACTER_MAXIMUM_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, 0}, - {"CHARACTER_OCTET_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, 0}, - {"NUMERIC_PRECISION", 21 , MYSQL_TYPE_LONG, 0, 1, 0}, - {"NUMERIC_SCALE", 21 , MYSQL_TYPE_LONG, 0, 1, 0}, + {"DATA_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CHARACTER_MAXIMUM_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, + 0}, + {"CHARACTER_OCTET_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, 0}, + {"NUMERIC_PRECISION", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, 0}, + {"NUMERIC_SCALE", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, 0}, {"CHARACTER_SET_NAME", 64, MYSQL_TYPE_STRING, 0, 1, 0}, {"COLLATION_NAME", 64, MYSQL_TYPE_STRING, 0, 1, "Collation"}, {"COLUMN_TYPE", 65535, MYSQL_TYPE_STRING, 0, 0, "Type"}, @@ -5395,7 +5439,7 @@ ST_FIELD_INFO collation_fields_info[]= { {"COLLATION_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Collation"}, {"CHARACTER_SET_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Charset"}, - {"ID", 11, MYSQL_TYPE_LONG, 0, 0, "Id"}, + {"ID", MY_INT32_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Id"}, {"IS_DEFAULT", 3, MYSQL_TYPE_STRING, 0, 0, "Default"}, {"IS_COMPILED", 3, MYSQL_TYPE_STRING, 0, 0, "Compiled"}, {"SORTLEN", 3 ,MYSQL_TYPE_LONG, 0, 0, "Sortlen"}, @@ -5417,10 +5461,11 @@ ST_FIELD_INFO engines_fields_info[]= ST_FIELD_INFO events_fields_info[]= { - {"EVENT_CATALOG", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"EVENT_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Db"}, - {"EVENT_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Name"}, + {"EVENT_CATALOG", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"EVENT_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Db"}, + {"EVENT_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Name"}, {"DEFINER", 77, MYSQL_TYPE_STRING, 0, 0, "Definer"}, + {"TIME_ZONE", 64, MYSQL_TYPE_STRING, 0, 0, "Time zone"}, {"EVENT_BODY", 8, MYSQL_TYPE_STRING, 0, 0, 0}, {"EVENT_DEFINITION", 65535, MYSQL_TYPE_STRING, 0, 0, 0}, {"EVENT_TYPE", 9, MYSQL_TYPE_STRING, 0, 0, "Type"}, @@ -5430,13 +5475,14 @@ ST_FIELD_INFO events_fields_info[]= {"SQL_MODE", 65535, MYSQL_TYPE_STRING, 0, 0, 0}, {"STARTS", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Starts"}, {"ENDS", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Ends"}, - {"STATUS", 8, MYSQL_TYPE_STRING, 0, 0, "Status"}, + {"STATUS", 18, MYSQL_TYPE_STRING, 0, 0, "Status"}, {"ON_COMPLETION", 12, MYSQL_TYPE_STRING, 0, 0, 0}, {"CREATED", 0, MYSQL_TYPE_TIMESTAMP, 0, 0, 0}, {"LAST_ALTERED", 0, MYSQL_TYPE_TIMESTAMP, 0, 0, 0}, {"LAST_EXECUTED", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, 0}, - {"EVENT_COMMENT", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} + {"EVENT_COMMENT", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"ORIGINATOR", 10, MYSQL_TYPE_LONG, 0, 0, "Originator"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} }; @@ -5451,25 +5497,25 @@ ST_FIELD_INFO coll_charset_app_fields_info[]= ST_FIELD_INFO proc_fields_info[]= { - {"SPECIFIC_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"SPECIFIC_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, {"ROUTINE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"ROUTINE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Db"}, - {"ROUTINE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Name"}, + {"ROUTINE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Db"}, + {"ROUTINE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Name"}, {"ROUTINE_TYPE", 9, MYSQL_TYPE_STRING, 0, 0, "Type"}, - {"DTD_IDENTIFIER", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"DTD_IDENTIFIER", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, {"ROUTINE_BODY", 8, MYSQL_TYPE_STRING, 0, 0, 0}, {"ROUTINE_DEFINITION", 65535, MYSQL_TYPE_STRING, 0, 1, 0}, - {"EXTERNAL_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"EXTERNAL_LANGUAGE", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"EXTERNAL_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"EXTERNAL_LANGUAGE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, {"PARAMETER_STYLE", 8, MYSQL_TYPE_STRING, 0, 0, 0}, {"IS_DETERMINISTIC", 3, MYSQL_TYPE_STRING, 0, 0, 0}, - {"SQL_DATA_ACCESS", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"SQL_PATH", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"SQL_DATA_ACCESS", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"SQL_PATH", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, {"SECURITY_TYPE", 7, MYSQL_TYPE_STRING, 0, 0, "Security_type"}, {"CREATED", 0, MYSQL_TYPE_TIMESTAMP, 0, 0, "Created"}, {"LAST_ALTERED", 0, MYSQL_TYPE_TIMESTAMP, 0, 0, "Modified"}, {"SQL_MODE", 65535, MYSQL_TYPE_STRING, 0, 0, 0}, - {"ROUTINE_COMMENT", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Comment"}, + {"ROUTINE_COMMENT", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Comment"}, {"DEFINER", 77, MYSQL_TYPE_STRING, 0, 0, "Definer"}, {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} }; @@ -5478,15 +5524,15 @@ ST_FIELD_INFO proc_fields_info[]= ST_FIELD_INFO stat_fields_info[]= { {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Table"}, + {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Table"}, {"NON_UNIQUE", 1, MYSQL_TYPE_LONG, 0, 0, "Non_unique"}, - {"INDEX_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"INDEX_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Key_name"}, + {"INDEX_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"INDEX_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Key_name"}, {"SEQ_IN_INDEX", 2, MYSQL_TYPE_LONG, 0, 0, "Seq_in_index"}, - {"COLUMN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Column_name"}, + {"COLUMN_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Column_name"}, {"COLLATION", 1, MYSQL_TYPE_STRING, 0, 1, "Collation"}, - {"CARDINALITY", 21, MYSQL_TYPE_LONG, 0, 1, "Cardinality"}, + {"CARDINALITY", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 1, "Cardinality"}, {"SUB_PART", 3, MYSQL_TYPE_LONG, 0, 1, "Sub_part"}, {"PACKED", 10, MYSQL_TYPE_STRING, 0, 1, "Packed"}, {"NULLABLE", 3, MYSQL_TYPE_STRING, 0, 0, "Null"}, @@ -5499,8 +5545,8 @@ ST_FIELD_INFO stat_fields_info[]= ST_FIELD_INFO view_fields_info[]= { {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, {"VIEW_DEFINITION", 65535, MYSQL_TYPE_STRING, 0, 0, 0}, {"CHECK_OPTION", 8, MYSQL_TYPE_STRING, 0, 0, 0}, {"IS_UPDATABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0}, @@ -5514,7 +5560,7 @@ ST_FIELD_INFO user_privileges_fields_info[]= { {"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0}, {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"PRIVILEGE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"PRIVILEGE_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, {"IS_GRANTABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0}, {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} }; @@ -5524,8 +5570,8 @@ ST_FIELD_INFO schema_privileges_fields_info[]= { {"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0}, {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"PRIVILEGE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"PRIVILEGE_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, {"IS_GRANTABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0}, {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} }; @@ -5535,9 +5581,9 @@ ST_FIELD_INFO table_privileges_fields_info[]= { {"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0}, {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"PRIVILEGE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"PRIVILEGE_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, {"IS_GRANTABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0}, {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} }; @@ -5547,10 +5593,10 @@ ST_FIELD_INFO column_privileges_fields_info[]= { {"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0}, {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"COLUMN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"PRIVILEGE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"COLUMN_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"PRIVILEGE_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, {"IS_GRANTABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0}, {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} }; @@ -5559,11 +5605,11 @@ ST_FIELD_INFO column_privileges_fields_info[]= ST_FIELD_INFO table_constraints_fields_info[]= { {"CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"CONSTRAINT_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"CONSTRAINT_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"CONSTRAINT_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CONSTRAINT_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CONSTRAINT_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CONSTRAINT_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} }; @@ -5571,17 +5617,17 @@ ST_FIELD_INFO table_constraints_fields_info[]= ST_FIELD_INFO key_column_usage_fields_info[]= { {"CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"CONSTRAINT_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"CONSTRAINT_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CONSTRAINT_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CONSTRAINT_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"COLUMN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"COLUMN_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, {"ORDINAL_POSITION", 10 ,MYSQL_TYPE_LONG, 0, 0, 0}, {"POSITION_IN_UNIQUE_CONSTRAINT", 10 ,MYSQL_TYPE_LONG, 0, 1, 0}, - {"REFERENCED_TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"REFERENCED_TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"REFERENCED_COLUMN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"REFERENCED_TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"REFERENCED_TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"REFERENCED_COLUMN_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} }; @@ -5589,17 +5635,17 @@ ST_FIELD_INFO key_column_usage_fields_info[]= ST_FIELD_INFO table_names_fields_info[]= { {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"TABLE_SCHEMA",NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Tables_in_"}, - {"TABLE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Table_type"}, + {"TABLE_SCHEMA",NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Tables_in_"}, + {"TABLE_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Table_type"}, {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} }; ST_FIELD_INFO open_tables_fields_info[]= { - {"Database", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Database"}, - {"Table",NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Table"}, + {"Database", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Database"}, + {"Table",NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Table"}, {"In_use", 1, MYSQL_TYPE_LONG, 0, 0, "In_use"}, {"Name_locked", 4, MYSQL_TYPE_LONG, 0, 0, "Name_locked"}, {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} @@ -5609,19 +5655,19 @@ ST_FIELD_INFO open_tables_fields_info[]= ST_FIELD_INFO triggers_fields_info[]= { {"TRIGGER_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"TRIGGER_SCHEMA",NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"TRIGGER_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Trigger"}, + {"TRIGGER_SCHEMA",NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TRIGGER_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Trigger"}, {"EVENT_MANIPULATION", 6, MYSQL_TYPE_STRING, 0, 0, "Event"}, {"EVENT_OBJECT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"EVENT_OBJECT_SCHEMA",NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"EVENT_OBJECT_TABLE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Table"}, + {"EVENT_OBJECT_SCHEMA",NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"EVENT_OBJECT_TABLE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Table"}, {"ACTION_ORDER", 4, MYSQL_TYPE_LONG, 0, 0, 0}, {"ACTION_CONDITION", 65535, MYSQL_TYPE_STRING, 0, 1, 0}, {"ACTION_STATEMENT", 65535, MYSQL_TYPE_STRING, 0, 0, "Statement"}, {"ACTION_ORIENTATION", 9, MYSQL_TYPE_STRING, 0, 0, 0}, {"ACTION_TIMING", 6, MYSQL_TYPE_STRING, 0, 0, "Timing"}, - {"ACTION_REFERENCE_OLD_TABLE", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"ACTION_REFERENCE_NEW_TABLE", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"ACTION_REFERENCE_OLD_TABLE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"ACTION_REFERENCE_NEW_TABLE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, {"ACTION_REFERENCE_OLD_ROW", 3, MYSQL_TYPE_STRING, 0, 0, 0}, {"ACTION_REFERENCE_NEW_ROW", 3, MYSQL_TYPE_STRING, 0, 0, 0}, {"CREATED", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Created"}, @@ -5634,10 +5680,10 @@ ST_FIELD_INFO triggers_fields_info[]= ST_FIELD_INFO partitions_fields_info[]= { {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"TABLE_SCHEMA",NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"PARTITION_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"SUBPARTITION_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA",NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"PARTITION_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"SUBPARTITION_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, {"PARTITION_ORDINAL_POSITION", 21 , MYSQL_TYPE_LONG, 0, 1, 0}, {"SUBPARTITION_ORDINAL_POSITION", 21 , MYSQL_TYPE_LONG, 0, 1, 0}, {"PARTITION_METHOD", 12, MYSQL_TYPE_STRING, 0, 1, 0}, @@ -5657,7 +5703,7 @@ ST_FIELD_INFO partitions_fields_info[]= {"CHECKSUM", 21 , MYSQL_TYPE_LONG, 0, 1, 0}, {"PARTITION_COMMENT", 80, MYSQL_TYPE_STRING, 0, 0, 0}, {"NODEGROUP", 12 , MYSQL_TYPE_STRING, 0, 0, 0}, - {"TABLESPACE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLESPACE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} }; @@ -5691,7 +5737,7 @@ ST_FIELD_INFO processlist_fields_info[]= {"ID", 4, MYSQL_TYPE_LONG, 0, 0, "Id"}, {"USER", 16, MYSQL_TYPE_STRING, 0, 0, "User"}, {"HOST", LIST_PROCESS_HOST_LEN, MYSQL_TYPE_STRING, 0, 0, "Host"}, - {"DB", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, "Db"}, + {"DB", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, "Db"}, {"COMMAND", 16, MYSQL_TYPE_STRING, 0, 0, "Command"}, {"TIME", 7, MYSQL_TYPE_LONG, 0, 0, "Time"}, {"STATE", 64, MYSQL_TYPE_STRING, 0, 1, "State"}, @@ -5702,14 +5748,14 @@ ST_FIELD_INFO processlist_fields_info[]= ST_FIELD_INFO plugin_fields_info[]= { - {"PLUGIN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Name"}, + {"PLUGIN_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Name"}, {"PLUGIN_VERSION", 20, MYSQL_TYPE_STRING, 0, 0, 0}, {"PLUGIN_STATUS", 10, MYSQL_TYPE_STRING, 0, 0, "Status"}, {"PLUGIN_TYPE", 80, MYSQL_TYPE_STRING, 0, 0, "Type"}, {"PLUGIN_TYPE_VERSION", 20, MYSQL_TYPE_STRING, 0, 0, 0}, - {"PLUGIN_LIBRARY", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, "Library"}, + {"PLUGIN_LIBRARY", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, "Library"}, {"PLUGIN_LIBRARY_VERSION", 20, MYSQL_TYPE_STRING, 0, 1, 0}, - {"PLUGIN_AUTHOR", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"PLUGIN_AUTHOR", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, {"PLUGIN_DESCRIPTION", 65535, MYSQL_TYPE_STRING, 0, 1, 0}, {"PLUGIN_LICENSE", 80, MYSQL_TYPE_STRING, 0, 1, "License"}, {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} @@ -5718,16 +5764,16 @@ ST_FIELD_INFO plugin_fields_info[]= ST_FIELD_INFO files_fields_info[]= { {"FILE_ID", 4, MYSQL_TYPE_LONG, 0, 0, 0}, - {"FILE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"FILE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, {"FILE_TYPE", 20, MYSQL_TYPE_STRING, 0, 0, 0}, - {"TABLESPACE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"TABLE_CATALOG", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"LOGFILE_GROUP_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLESPACE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_CATALOG", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"LOGFILE_GROUP_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, {"LOGFILE_GROUP_NUMBER", 4, MYSQL_TYPE_LONG, 0, 1, 0}, - {"ENGINE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"FULLTEXT_KEYS", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"ENGINE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"FULLTEXT_KEYS", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, {"DELETED_ROWS", 4, MYSQL_TYPE_LONG, 0, 1, 0}, {"UPDATE_COUNT", 4, MYSQL_TYPE_LONG, 0, 1, 0}, {"FREE_EXTENTS", 4, MYSQL_TYPE_LONG, 0, 1, 0}, @@ -5771,16 +5817,16 @@ void init_fill_schema_files_row(TABLE* table) ST_FIELD_INFO referential_constraints_fields_info[]= { {"CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"CONSTRAINT_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"CONSTRAINT_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CONSTRAINT_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CONSTRAINT_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, {"UNIQUE_CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, - {"UNIQUE_CONSTRAINT_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"UNIQUE_CONSTRAINT_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"MATCH_OPTION", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"UPDATE_RULE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"DELETE_RULE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, - {"REFERENCED_TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"UNIQUE_CONSTRAINT_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"UNIQUE_CONSTRAINT_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"MATCH_OPTION", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"UPDATE_RULE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"DELETE_RULE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"REFERENCED_TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} }; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 0697fdd79b4..a2b25f0ffd0 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1922,22 +1922,25 @@ static int sort_keys(KEY *a, KEY *b) set_or_name "SET" or "ENUM" string for warning message name name of the checked column typelib list of values for the column + dup_val_count returns count of duplicate elements DESCRIPTION This function prints an warning for each value in list which has some duplicates on its right RETURN VALUES - void + 0 ok + 1 Error */ -void check_duplicates_in_interval(const char *set_or_name, +bool check_duplicates_in_interval(const char *set_or_name, const char *name, TYPELIB *typelib, - CHARSET_INFO *cs) + CHARSET_INFO *cs, unsigned int *dup_val_count) { TYPELIB tmp= *typelib; const char **cur_value= typelib->type_names; unsigned int *cur_length= typelib->type_lengths; + *dup_val_count= 0; for ( ; tmp.count > 1; cur_value++, cur_length++) { @@ -1946,12 +1949,21 @@ void check_duplicates_in_interval(const char *set_or_name, tmp.count--; if (find_type2(&tmp, (const char*)*cur_value, *cur_length, cs)) { + if ((current_thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))) + { + my_error(ER_DUPLICATED_VALUE_IN_TYPE, MYF(0), + name,*cur_value,set_or_name); + return 1; + } push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_NOTE, ER_DUPLICATED_VALUE_IN_TYPE, ER(ER_DUPLICATED_VALUE_IN_TYPE), name,*cur_value,set_or_name); + (*dup_val_count)++; } } + return 0; } @@ -2013,6 +2025,7 @@ int prepare_create_field(create_field *sql_field, int *timestamps, int *timestamps_with_niladic, longlong table_flags) { + unsigned int dup_val_count; DBUG_ENTER("prepare_field"); /* @@ -2086,9 +2099,10 @@ int prepare_create_field(create_field *sql_field, if (sql_field->charset->state & MY_CS_BINSORT) sql_field->pack_flag|=FIELDFLAG_BINARY; sql_field->unireg_check=Field::INTERVAL_FIELD; - check_duplicates_in_interval("ENUM",sql_field->field_name, - sql_field->interval, - sql_field->charset); + if (check_duplicates_in_interval("ENUM",sql_field->field_name, + sql_field->interval, + sql_field->charset, &dup_val_count)) + DBUG_RETURN(1); break; case MYSQL_TYPE_SET: sql_field->pack_flag=pack_length_to_packflag(sql_field->pack_length) | @@ -2096,9 +2110,16 @@ int prepare_create_field(create_field *sql_field, if (sql_field->charset->state & MY_CS_BINSORT) sql_field->pack_flag|=FIELDFLAG_BINARY; sql_field->unireg_check=Field::BIT_FIELD; - check_duplicates_in_interval("SET",sql_field->field_name, - sql_field->interval, - sql_field->charset); + if (check_duplicates_in_interval("SET",sql_field->field_name, + sql_field->interval, + sql_field->charset, &dup_val_count)) + DBUG_RETURN(1); + /* Check that count of unique members is not more then 64 */ + if (sql_field->interval->count - dup_val_count > sizeof(longlong)*8) + { + my_error(ER_TOO_BIG_SET, MYF(0), sql_field->field_name); + DBUG_RETURN(1); + } break; case MYSQL_TYPE_DATE: // Rest of string types case MYSQL_TYPE_NEWDATE: @@ -2541,6 +2562,7 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, { DBUG_PRINT("info", ("key name: '%s' type: %d", key->name ? key->name : "(none)" , key->type)); + LEX_STRING key_name_str; if (key->type == Key::FOREIGN_KEY) { fk_key_count++; @@ -2562,7 +2584,10 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, my_error(ER_TOO_MANY_KEY_PARTS,MYF(0),tmp); DBUG_RETURN(-1); } - if (key->name && strlen(key->name) > NAME_LEN) + key_name_str.str= (char*) key->name; + key_name_str.length= key->name ? strlen(key->name) : 0; + if (check_string_char_length(&key_name_str, "", NAME_CHAR_LEN, + system_charset_info, 1)) { my_error(ER_TOO_LONG_IDENT, MYF(0), key->name); DBUG_RETURN(-1); @@ -2791,6 +2816,12 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, { column->length*= sql_field->charset->mbmaxlen; + if (key->type == Key::SPATIAL && column->length) + { + my_error(ER_WRONG_SUB_KEY, MYF(0)); + DBUG_RETURN(-1); + } + if (f_is_blob(sql_field->pack_flag) || (f_is_geom(sql_field->pack_flag) && key->type != Key::SPATIAL)) { @@ -2884,6 +2915,7 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, } else if (!f_is_geom(sql_field->pack_flag) && (column->length > length || + !Field::type_can_have_key_part (sql_field->sql_type) || ((f_is_packed(sql_field->pack_flag) || ((file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS) && (key_info->flags & HA_NOSAME))) && @@ -3481,6 +3513,7 @@ bool mysql_create_table_internal(THD *thd, { bool create_if_not_exists = create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS; + if (ha_table_exists_in_engine(thd, db, table_name)) { DBUG_PRINT("info", ("Table with same name already existed in handler")); @@ -3931,7 +3964,9 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list, /* Check if this is a table type that stores index and data separately, - like ISAM or MyISAM + like ISAM or MyISAM. We assume fixed order of engine file name + extentions array. First element of engine file name extentions array + is meta/index file extention. Second element - data file extention. */ ext= table->file->bas_ext(); if (!ext[0] || !ext[1]) @@ -4042,7 +4077,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, if (end_active_trans(thd)) DBUG_RETURN(1); - field_list.push_back(item = new Item_empty_string("Table", NAME_LEN*2)); + field_list.push_back(item = new Item_empty_string("Table", NAME_CHAR_LEN*2)); item->maybe_null = 1; field_list.push_back(item = new Item_empty_string("Op", 10)); item->maybe_null = 1; @@ -4611,6 +4646,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, #ifdef WITH_PARTITION_STORAGE_ENGINE char tmp_path[FN_REFLEN]; #endif + char ts_name[FN_LEN]; TABLE_LIST src_tables_list, dst_tables_list; DBUG_ENTER("mysql_create_like_table"); @@ -4624,7 +4660,8 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, /* Validate the source table */ - if (table_ident->table.length > NAME_LEN || + if (check_string_char_length(&table_ident->table, "", NAME_CHAR_LEN, + system_charset_info, 1) || (table_ident->table.length && check_table_name(src_table,table_ident->table.length))) { @@ -4692,6 +4729,18 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, DBUG_RETURN(TRUE); /* + For bug#25875, Newly created table through CREATE TABLE .. LIKE + has no ndb_dd attributes; + Add something to get possible tablespace info from src table, + it can get valid tablespace name only for disk-base ndb table + */ + if ((src_tables_list.table->file->get_tablespace_name(thd, ts_name, FN_LEN))) + { + create_info->tablespace= ts_name; + create_info->storage_media= HA_SM_DISK; + } + + /* Validate the destination table skip the destination table name checking as this is already @@ -5859,6 +5908,8 @@ view_err: */ if (!Field::type_can_have_key_part(cfield->field->type()) || !Field::type_can_have_key_part(cfield->sql_type) || + /* spatial keys can't have sub-key length */ + (key_info->flags & HA_SPATIAL) || (cfield->field->field_length == key_part_length && !f_is_blob(key_part->key_type)) || (cfield->length && (cfield->length < key_part_length / @@ -6742,7 +6793,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, alter_table_manage_keys(to, from->file->indexes_are_disabled(), keys_onoff); /* We can abort alter table for any table type */ - thd->no_trans_update= 0; + thd->no_trans_update.stmt= FALSE; thd->abort_on_warning= !ignore && test(thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES)); @@ -6829,7 +6880,9 @@ copy_data_between_tables(TABLE *from,TABLE *to, copy_ptr->do_copy(copy_ptr); } prev_insert_id= to->file->next_insert_id; - if ((error=to->file->ha_write_row((byte*) to->record[0]))) + error=to->file->write_row((byte*) to->record[0]); + to->auto_increment_field_not_null= FALSE; + if (error) { if (!ignore || to->file->is_fatal_error(error, HA_CHECK_DUP)) @@ -6939,7 +6992,8 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, field_list.push_back(item = new Item_empty_string("Table", NAME_LEN*2)); item->maybe_null= 1; - field_list.push_back(item=new Item_int("Checksum",(longlong) 1,21)); + field_list.push_back(item= new Item_int("Checksum", (longlong) 1, + MY_INT64_NUM_DECIMAL_DIGITS)); item->maybe_null= 1; if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) diff --git a/sql/sql_test.cc b/sql/sql_test.cc index 9e30cf5878c..d7573b42c5f 100644 --- a/sql/sql_test.cc +++ b/sql/sql_test.cc @@ -537,6 +537,6 @@ Estimated memory (with thread stack): %ld\n", (long) (thread_count * thread_stack + info.hblkhd + info.arena)); #endif - Events::get_instance()->dump_internal_status(); + Events::dump_internal_status(); puts(""); } diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index 311bd089c64..7b4deba527a 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -976,7 +976,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, LEX_STRING *trg_definer= it_definer++; thd->variables.sql_mode= (ulong)*trg_sql_mode; - lex_start(thd, (uchar*)trg_create_str->str, trg_create_str->length); + lex_start(thd, trg_create_str->str, trg_create_str->length); thd->spcont= 0; if (MYSQLparse((void *)thd) || thd->is_fatal_error) @@ -1543,9 +1543,16 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event, old_field= trigger_table->field; } #ifndef NO_EMBEDDED_ACCESS_CHECKS - Security_context *save_ctx; + Security_context *sctx= &sp_trigger->m_security_ctx; + Security_context *save_ctx= NULL; - if (sp_change_security_context(thd, sp_trigger, &save_ctx)) + + if (sp_trigger->m_chistics->suid != SP_IS_NOT_SUID && + sctx->change_security_context(thd, + &sp_trigger->m_definer_user, + &sp_trigger->m_definer_host, + &sp_trigger->m_db, + &save_ctx)) return TRUE; /* @@ -1570,7 +1577,7 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event, thd->security_ctx->priv_user, thd->security_ctx->host_or_ip, trigger_table->s->table_name.str); - sp_restore_security_context(thd, save_ctx); + sctx->restore_security_context(thd, save_ctx); return TRUE; } #endif // NO_EMBEDDED_ACCESS_CHECKS @@ -1582,7 +1589,7 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event, thd->restore_sub_statement_state(&statement_state); #ifndef NO_EMBEDDED_ACCESS_CHECKS - sp_restore_security_context(thd, save_ctx); + sctx->restore_security_context(thd, save_ctx); #endif // NO_EMBEDDED_ACCESS_CHECKS } diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h index 707fcc4e380..7d99dd811cd 100644 --- a/sql/sql_trigger.h +++ b/sql/sql_trigger.h @@ -110,6 +110,11 @@ public: const char *old_table, const char *new_db, const char *new_table); + bool has_triggers(trg_event_type event_type, + trg_action_time_type action_time) + { + return (bodies[event_type][action_time] != NULL); + } bool has_delete_triggers() { return (bodies[TRG_EVENT_DELETE][TRG_ACTION_BEFORE] || diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc index cacfdd5adf8..89084c21c0c 100644 --- a/sql/sql_udf.cc +++ b/sql/sql_udf.cc @@ -170,8 +170,10 @@ void udf_init() This is done to ensure that only approved dll from the system directories are used (to make this even remotely secure). */ - if (my_strchr(files_charset_info, dl_name, dl_name + strlen(dl_name), FN_LIBCHAR) || - strlen(name.str) > NAME_LEN) + if (my_strchr(files_charset_info, dl_name, + dl_name + strlen(dl_name), FN_LIBCHAR) || + check_string_char_length(&name, "", NAME_CHAR_LEN, + system_charset_info, 1)) { sql_print_error("Invalid row in mysql.func table for function '%.64s'", name.str); @@ -398,12 +400,20 @@ int mysql_create_function(THD *thd,udf_func *udf) my_message(ER_UDF_NO_PATHS, ER(ER_UDF_NO_PATHS), MYF(0)); DBUG_RETURN(1); } - if (udf->name.length > NAME_LEN) + if (check_string_char_length(&udf->name, "", NAME_CHAR_LEN, + system_charset_info, 1)) { my_error(ER_TOO_LONG_IDENT, MYF(0), udf->name); DBUG_RETURN(1); } + /* + Turn off row binlogging of this statement and use statement-based + so that all supporting tables are updated for CREATE FUNCTION command. + */ + if (thd->current_stmt_binlog_row_based) + thd->clear_current_stmt_binlog_row_based(); + rw_wrlock(&THR_LOCK_udf); if ((hash_search(&udf_hash,(byte*) udf->name.str, udf->name.length))) { @@ -467,6 +477,15 @@ int mysql_create_function(THD *thd,udf_func *udf) goto err; } rw_unlock(&THR_LOCK_udf); + + /* Binlog the create function. */ + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + thd->binlog_query(THD::MYSQL_QUERY_TYPE, + thd->query, thd->query_length, FALSE, FALSE); + } + DBUG_RETURN(0); err: @@ -485,11 +504,20 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name) char *exact_name_str; uint exact_name_len; DBUG_ENTER("mysql_drop_function"); + if (!initialized) { my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); DBUG_RETURN(1); } + + /* + Turn off row binlogging of this statement and use statement-based + so that all supporting tables are updated for DROP FUNCTION command. + */ + if (thd->current_stmt_binlog_row_based) + thd->clear_current_stmt_binlog_row_based(); + rw_wrlock(&THR_LOCK_udf); if (!(udf=(udf_func*) hash_search(&udf_hash,(byte*) udf_name->str, (uint) udf_name->length))) @@ -515,8 +543,7 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name) table->use_all_columns(); table->field[0]->store(exact_name_str, exact_name_len, &my_charset_bin); if (!table->file->index_read_idx(table->record[0], 0, - (byte*) table->field[0]->ptr, - table->key_info[0].key_length, + (byte*) table->field[0]->ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { int error; @@ -525,7 +552,16 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name) } close_thread_tables(thd); - rw_unlock(&THR_LOCK_udf); + rw_unlock(&THR_LOCK_udf); + + /* Binlog the drop function. */ + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + thd->binlog_query(THD::MYSQL_QUERY_TYPE, + thd->query, thd->query_length, FALSE, FALSE); + } + DBUG_RETURN(0); err: rw_unlock(&THR_LOCK_udf); diff --git a/sql/sql_union.cc b/sql/sql_union.cc index f020168d742..bc81679a7fd 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -623,6 +623,12 @@ bool st_select_lex_unit::cleanup() join->tables= 0; } error|= fake_select_lex->cleanup(); + if (fake_select_lex->order_list.elements) + { + ORDER *ord; + for (ord= (ORDER*)fake_select_lex->order_list.first; ord; ord= ord->next) + (*ord->item)->cleanup(); + } } DBUG_RETURN(error); @@ -739,6 +745,7 @@ bool st_select_lex::cleanup() error= (bool) ((uint) error | (uint) lex_unit->cleanup()); } non_agg_fields.empty(); + inner_refs_list.empty(); DBUG_RETURN(error); } diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 27a58a295ff..d0da52827e0 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -126,7 +126,7 @@ int mysql_update(THD *thd, #endif uint table_count= 0; ha_rows updated, found; - key_map old_used_keys; + key_map old_covering_keys; TABLE *table; SQL_SELECT *select; READ_RECORD info; @@ -165,8 +165,8 @@ int mysql_update(THD *thd, thd->proc_info="init"; table= table_list->table; - /* Calculate "table->used_keys" based on the WHERE */ - table->used_keys= table->s->keys_in_use; + /* Calculate "table->covering_keys" based on the WHERE */ + table->covering_keys= table->s->keys_in_use; table->quick_keys.clear_all(); #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -176,7 +176,7 @@ int mysql_update(THD *thd, if (mysql_prepare_update(thd, table_list, &conds, order_num, order)) DBUG_RETURN(1); - old_used_keys= table->used_keys; // Keys used in WHERE + old_covering_keys= table->covering_keys; // Keys used in WHERE /* Check the fields we are going to modify */ #ifndef NO_EMBEDDED_ACCESS_CHECKS table_list->grant.want_privilege= table->grant.want_privilege= want_privilege; @@ -201,8 +201,10 @@ int mysql_update(THD *thd, table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; else { - bitmap_set_bit(table->write_set, - table->timestamp_field->field_index); + if (table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_UPDATE || + table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH) + bitmap_set_bit(table->write_set, + table->timestamp_field->field_index); } } @@ -229,7 +231,7 @@ int mysql_update(THD *thd, limit= 0; // Impossible WHERE } // Don't count on usage of 'only index' when calculating which key to use - table->used_keys.clear_all(); + table->covering_keys.clear_all(); #ifdef WITH_PARTITION_STORAGE_ENGINE if (prune_partitions(thd, table, conds)) @@ -304,7 +306,7 @@ int mysql_update(THD *thd, We can't update table directly; We must first search after all matching rows before updating the table! */ - if (used_index < MAX_KEY && old_used_keys.is_set(used_index)) + if (used_index < MAX_KEY && old_covering_keys.is_set(used_index)) { table->key_read=1; table->mark_columns_used_by_index(used_index); @@ -447,12 +449,25 @@ int mysql_update(THD *thd, thd->proc_info="Updating"; transactional_table= table->file->has_transactions(); - thd->no_trans_update= 0; + thd->no_trans_update.stmt= FALSE; thd->abort_on_warning= test(!ignore && (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))); - will_batch= !table->file->start_bulk_update(); + if (table->triggers && + table->triggers->has_triggers(TRG_EVENT_UPDATE, + TRG_ACTION_AFTER)) + { + /* + The table has AFTER UPDATE triggers that might access to subject + table and therefore might need update to be done immediately. + So we turn-off the batching. + */ + (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH); + will_batch= FALSE; + } + else + will_batch= !table->file->start_bulk_update(); /* We can use compare_record() to optimize away updates if @@ -474,7 +489,7 @@ int mysql_update(THD *thd, if (fill_record_n_invoke_before_triggers(thd, fields, values, 0, table->triggers, TRG_EVENT_UPDATE)) - break; /* purecov: inspected */ + break; /* purecov: inspected */ found++; @@ -535,7 +550,7 @@ int mysql_update(THD *thd, if (!error) { updated++; - thd->no_trans_update= !transactional_table; + thd->no_trans_update.stmt= !transactional_table; if (table->triggers && table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, @@ -661,11 +676,11 @@ int mysql_update(THD *thd, transactional_table, FALSE) && transactional_table) { - error=1; // Rollback update + error=1; // Rollback update } } if (!transactional_table) - thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; + thd->no_trans_update.all= TRUE; } free_underlaid_joins(thd, select_lex); if (transactional_table) @@ -761,7 +776,7 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list, /* Check that we are not using table that we are updating in a sub select */ { TABLE_LIST *duplicate; - if ((duplicate= unique_table(thd, table_list, table_list->next_global))) + if ((duplicate= unique_table(thd, table_list, table_list->next_global, 0))) { update_non_unique_table_error(table_list, "UPDATE", duplicate); my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name); @@ -987,7 +1002,7 @@ reopen_tables: tl->lock_type != TL_READ_NO_INSERT) { TABLE_LIST *duplicate; - if ((duplicate= unique_table(thd, tl, table_list))) + if ((duplicate= unique_table(thd, tl, table_list, 0))) { update_non_unique_table_error(table_list, "UPDATE", duplicate); DBUG_RETURN(TRUE); @@ -1030,7 +1045,7 @@ bool mysql_multi_update(THD *thd, handle_duplicates, ignore))) DBUG_RETURN(TRUE); - thd->no_trans_update= 0; + thd->no_trans_update.stmt= FALSE; thd->abort_on_warning= test(thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES)); @@ -1092,7 +1107,7 @@ int multi_update::prepare(List<Item> ¬_used_values, } /* - We have to check values after setup_tables to get used_keys right in + We have to check values after setup_tables to get covering_keys right in reference tables */ @@ -1119,8 +1134,19 @@ int multi_update::prepare(List<Item> ¬_used_values, update.link_in_list((byte*) tl, (byte**) &tl->next_local); tl->shared= table_count++; table->no_keyread=1; - table->used_keys.clear_all(); + table->covering_keys.clear_all(); table->pos_in_table_list= tl; + if (table->triggers && + table->triggers->has_triggers(TRG_EVENT_UPDATE, + TRG_ACTION_AFTER)) + { + /* + The table has AFTER UPDATE triggers that might access to subject + table and therefore might need update to be done immediately. + So we turn-off the batching. + */ + (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH); + } } } @@ -1203,7 +1229,7 @@ static bool safe_update_on_fly(THD *thd, JOIN_TAB *join_tab, TABLE_LIST *table_ref, TABLE_LIST *all_tables) { TABLE *table= join_tab->table; - if (unique_table(thd, table_ref, all_tables)) + if (unique_table(thd, table_ref, all_tables, 0)) return 0; switch (join_tab->type) { case JT_SYSTEM: @@ -1344,7 +1370,7 @@ multi_update::~multi_update() delete [] copy_field; thd->count_cuted_fields= CHECK_FIELD_IGNORE; // Restore this setting if (!trans_safe) - thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; + thd->no_trans_update.all= TRUE; } @@ -1405,40 +1431,40 @@ bool multi_update::send_data(List<Item> ¬_used_values) else if (error == VIEW_CHECK_ERROR) DBUG_RETURN(1); } - if (!updated++) - { - /* - Inform the main table that we are going to update the table even - while we may be scanning it. This will flush the read cache - if it's used. - */ - main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE); - } - if ((error=table->file->ha_update_row(table->record[1], - table->record[0]))) - { - updated--; + if (!updated++) + { + /* + Inform the main table that we are going to update the table even + while we may be scanning it. This will flush the read cache + if it's used. + */ + main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE); + } + if ((error=table->file->ha_update_row(table->record[1], + table->record[0]))) + { + updated--; if (!ignore || table->file->is_fatal_error(error, HA_CHECK_DUP_KEY)) - { + { /* If (ignore && error == is ignorable) we don't have to do anything; otherwise... */ if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY)) thd->fatal_error(); /* Other handler errors are fatal */ - table->file->print_error(error,MYF(0)); - DBUG_RETURN(1); - } - } + table->file->print_error(error,MYF(0)); + DBUG_RETURN(1); + } + } else { if (!table->file->has_transactions()) - thd->no_trans_update= 1; + thd->no_trans_update.stmt= TRUE; if (table->triggers && table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, TRG_ACTION_AFTER, TRUE)) - DBUG_RETURN(1); + DBUG_RETURN(1); } } } @@ -1674,7 +1700,7 @@ bool multi_update::send_eof() } } if (!transactional_tables) - thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; + thd->no_trans_update.all= TRUE; } if (transactional_tables) diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 622f7b99d33..453cbbc091e 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -224,6 +224,9 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, { LEX *lex= thd->lex; bool link_to_local; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + bool definer_check_is_needed= mode != VIEW_ALTER || lex->definer; +#endif /* first table in list is target VIEW name => cut off it */ TABLE_LIST *view= lex->unlink_first_table(&link_to_local); TABLE_LIST *tables= lex->query_tables; @@ -256,8 +259,9 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, /* DEFINER-clause is missing; we have to create default definer in persistent arena to be PS/SP friendly. + If this is an ALTER VIEW then the current user should be set as + the definer. */ - Query_arena original_arena; Query_arena *ps_arena = thd->activate_stmt_arena_if_needed(&original_arena); @@ -277,11 +281,11 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, - same as current user - current user has SUPER_ACL */ - if (strcmp(lex->definer->user.str, - thd->security_ctx->priv_user) != 0 || - my_strcasecmp(system_charset_info, - lex->definer->host.str, - thd->security_ctx->priv_host) != 0) + if (definer_check_is_needed && + (strcmp(lex->definer->user.str, thd->security_ctx->priv_user) != 0 || + my_strcasecmp(system_charset_info, + lex->definer->host.str, + thd->security_ctx->priv_host) != 0)) { if (!(thd->security_ctx->master_access & SUPER_ACL)) { @@ -492,35 +496,46 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, /* Compare/check grants on view with grants of underlying tables */ + + fill_effective_table_privileges(thd, &view->grant, view->db, + view->table_name); + + { + Item *report_item= NULL; + uint final_priv= VIEW_ANY_ACL; + for (sl= select_lex; sl; sl= sl->next_select()) { DBUG_ASSERT(view->db); /* Must be set in the parser */ List_iterator_fast<Item> it(sl->item_list); Item *item; - fill_effective_table_privileges(thd, &view->grant, view->db, - view->table_name); while ((item= it++)) { - Item_field *fld; + Item_field *fld= item->filed_for_view_update(); uint priv= (get_column_grant(thd, &view->grant, view->db, view->table_name, item->name) & VIEW_ANY_ACL); - if ((fld= item->filed_for_view_update())) + + if (fld && !fld->field->table->s->tmp_table) { - /* - Do we have more privileges on view field then underlying table field? - */ - if (!fld->field->table->s->tmp_table && (~fld->have_privileges & priv)) + final_priv&= fld->have_privileges; + + if (~fld->have_privileges & priv) + report_item= item; + } + } + } + + if (!final_priv) { - /* VIEW column has more privileges */ + DBUG_ASSERT(report_item); + my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), "create view", thd->security_ctx->priv_user, - thd->security_ctx->priv_host, item->name, + thd->security_ctx->priv_host, report_item->name, view->table_name); res= TRUE; goto err; - } - } } } #endif @@ -675,7 +690,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view, char md5[MD5_BUFF_LENGTH]; bool can_be_merged; char dir_buff[FN_REFLEN], path_buff[FN_REFLEN]; - const uchar *endp; + const char *endp; LEX_STRING dir, file, path; DBUG_ENTER("mysql_register_view"); @@ -759,9 +774,9 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view, view->query.str= (char*)str.ptr(); view->query.length= str.length()-1; // we do not need last \0 view->source.str= thd->query + thd->lex->create_view_select_start; - endp= (uchar*) view->source.str; - endp= skip_rear_comments(endp, (uchar*) (thd->query + thd->query_length)); - view->source.length= endp - (uchar*) view->source.str; + endp= view->source.str; + endp= skip_rear_comments(endp, thd->query + thd->query_length); + view->source.length= endp - view->source.str; view->file_version= 1; view->calc_md5(md5); view->md5.str= md5; @@ -970,7 +985,7 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table, now Lex placed in statement memory */ table->view= lex= thd->lex= (LEX*) new(thd->mem_root) st_lex_local; - lex_start(thd, (uchar*)table->query.str, table->query.length); + lex_start(thd, table->query.str, table->query.length); view_select= &lex->select_lex; view_select->select_number= ++thd->select_number; { @@ -1004,6 +1019,11 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table, CHARSET_INFO *save_cs= thd->variables.character_set_client; thd->variables.character_set_client= system_charset_info; res= MYSQLparse((void *)thd); + + if ((old_lex->sql_command == SQLCOM_SHOW_FIELDS) || + (old_lex->sql_command == SQLCOM_SHOW_CREATE)) + lex->sql_command= old_lex->sql_command; + thd->variables.character_set_client= save_cs; thd->variables.sql_mode= save_mode; } @@ -1029,7 +1049,7 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table, } } else if (!table->prelocking_placeholder && - old_lex->sql_command == SQLCOM_SHOW_CREATE && + (old_lex->sql_command == SQLCOM_SHOW_CREATE) && !table->belong_to_view) { if (check_table_access(thd, SHOW_VIEW_ACL, table, 0)) @@ -1266,13 +1286,18 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table, unit->slave= save_slave; // fix include_down initialisation } + /* + We can safely ignore the VIEW's ORDER BY if we merge into union + branch, as order is not important there. + */ + if (!table->select_lex->master_unit()->is_union()) + table->select_lex->order_list.push_back(&lex->select_lex.order_list); /* This SELECT_LEX will be linked in global SELECT_LEX list to make it processed by mysql_handle_derived(), but it will not be included to SELECT_LEX tree, because it will not be executed - */ - table->select_lex->order_list.push_back(&lex->select_lex.order_list); + */ goto ok; } @@ -1302,8 +1327,6 @@ ok: (st_select_lex_node**)&old_lex->all_selects_list; ok2: - if (!old_lex->time_zone_tables_used && thd->lex->time_zone_tables_used) - old_lex->time_zone_tables_used= thd->lex->time_zone_tables_used; DBUG_ASSERT(lex == thd->lex); thd->lex= old_lex; // Needed for prepare_security result= !table->prelocking_placeholder && table->prepare_security(thd); diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 5d24fb4fa65..0c7d2fc2187 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -481,6 +481,7 @@ Item* handle_sql2003_note184_exception(THD *thd, Item* left, bool equal, struct st_lex *lex; sp_head *sphead; struct p_elem_val *p_elem_value; + enum index_hint_type index_hint; } %{ @@ -492,7 +493,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); Currently there is 287 shift/reduce conflict. We should not introduce new conflicts any more. */ -%expect 287 +%expect 286 /* Comments for TOKENS. @@ -779,6 +780,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token MASTER_SSL_CIPHER_SYM %token MASTER_SSL_KEY_SYM %token MASTER_SSL_SYM +%token MASTER_SSL_VERIFY_SERVER_CERT_SYM %token MASTER_SYM %token MASTER_USER_SYM %token MATCH /* SQL-2003-R */ @@ -1162,7 +1164,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); btree_or_rtree %type <string_list> - key_usage_list using_list + using_list %type <key_part> key_part @@ -1233,7 +1235,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); opt_column_list grant_privileges grant_ident grant_list grant_option object_privilege object_privilege_list user_list rename_list clear_privileges flush_options flush_option - equal optional_braces opt_key_definition key_usage_list2 + equal optional_braces opt_mi_check_type opt_to mi_check_types normal_join db_to_db table_to_table_list table_to_table opt_table_list opt_as handler_rkey_function handler_read_or_scan @@ -1246,7 +1248,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); statement sp_suid sp_c_chistics sp_a_chistics sp_chistic sp_c_chistic xa load_data opt_field_or_var_spec fields_or_vars opt_load_data_set_spec - definer view_replace_or_algorithm view_replace view_algorithm_opt + definer view_replace_or_algorithm view_replace view_algorithm view_or_trigger_or_sp_or_event view_or_trigger_or_sp_or_event_tail view_suid view_tail view_list_opt view_list view_select @@ -1269,6 +1271,8 @@ END_OF_INPUT %type <spblock> sp_decls sp_decl %type <lex> sp_cursor_stmt %type <spname> sp_name +%type <index_hint> index_hint_type +%type <num> index_hint_clause %type <NONE> '-' '+' '*' '/' '%' '(' ')' @@ -1525,6 +1529,11 @@ master_def: { Lex->mi.ssl_key= $3.str; } + | MASTER_SSL_VERIFY_SERVER_CERT_SYM EQ ulong_num + { + Lex->mi.ssl_verify_server_cert= $3 ? + LEX_MASTER_INFO::SSL_ENABLE : LEX_MASTER_INFO::SSL_DISABLE; + } | master_file_def ; @@ -1767,12 +1776,17 @@ ev_schedule_time: EVERY_SYM expr interval opt_ev_status: /* empty */ { $$= 0; } | ENABLE_SYM { - Lex->event_parse_data->status= Event_parse_data::ENABLED; + Lex->event_parse_data->status= Event_basic::ENABLED; + $$= 1; + } + | DISABLE_SYM ON SLAVE + { + Lex->event_parse_data->status= Event_basic::SLAVESIDE_DISABLED; $$= 1; } | DISABLE_SYM { - Lex->event_parse_data->status= Event_parse_data::DISABLED; + Lex->event_parse_data->status= Event_basic::DISABLED; $$= 1; } ; @@ -1802,13 +1816,13 @@ ev_on_completion: ON COMPLETION_SYM PRESERVE_SYM { Lex->event_parse_data->on_completion= - Event_parse_data::ON_COMPLETION_PRESERVE; + Event_basic::ON_COMPLETION_PRESERVE; $$= 1; } | ON COMPLETION_SYM NOT_SYM PRESERVE_SYM { Lex->event_parse_data->on_completion= - Event_parse_data::ON_COMPLETION_DROP; + Event_basic::ON_COMPLETION_DROP; $$= 1; } ; @@ -1844,7 +1858,7 @@ ev_sql_stmt: */ if (lex->sphead) { - my_error(ER_EVENT_RECURSIVITY_FORBIDDEN, MYF(0)); + my_error(ER_EVENT_RECURSION_FORBIDDEN, MYF(0)); MYSQL_YYABORT; } @@ -1917,26 +1931,24 @@ sp_name: my_error(ER_WRONG_DB_NAME, MYF(0), $1.str); MYSQL_YYABORT; } - if (check_routine_name($3)) + if (check_routine_name(&$3)) { - my_error(ER_SP_WRONG_NAME, MYF(0), $3.str); MYSQL_YYABORT; } - $$= new sp_name($1, $3); + $$= new sp_name($1, $3, true); $$->init_qname(YYTHD); } | ident { THD *thd= YYTHD; LEX_STRING db; - if (check_routine_name($1)) + if (check_routine_name(&$1)) { - my_error(ER_SP_WRONG_NAME, MYF(0), $1.str); MYSQL_YYABORT; } if (thd->copy_db_to(&db.str, &db.length)) MYSQL_YYABORT; - $$= new sp_name(db, $1); + $$= new sp_name(db, $1, false); if ($$) $$->init_qname(YYTHD); } @@ -2404,6 +2416,9 @@ sp_decl: { LEX *lex= Lex; sp_head *sp= lex->sphead; + + lex->spcont= lex->spcont->push_context(LABEL_HANDLER_SCOPE); + sp_pcontext *ctx= lex->spcont; sp_instr_hpush_jump *i= new sp_instr_hpush_jump(sp->instructions(), ctx, $2, @@ -2411,7 +2426,6 @@ sp_decl: sp->add_instr(i); sp->push_backpatch(i, ctx->push_label((char *)"", 0)); - sp->m_flags|= sp_head::IN_HANDLER; } sp_hcond_list sp_proc_stmt { @@ -2435,10 +2449,12 @@ sp_decl: sp->push_backpatch(i, lex->spcont->last_label()); /* Block end */ } lex->sphead->backpatch(hlab); - sp->m_flags&= ~sp_head::IN_HANDLER; + + lex->spcont= ctx->pop_context(); + $$.vars= $$.conds= $$.curs= 0; $$.hndlrs= $6; - ctx->add_handlers($6); + lex->spcont->add_handlers($6); } | DECLARE_SYM ident CURSOR_SYM FOR_SYM sp_cursor_stmt { @@ -2504,11 +2520,18 @@ sp_handler_type: ; sp_hcond_list: + sp_hcond_element + { $$= 1; } + | sp_hcond_list ',' sp_hcond_element + { $$+= 1; } + ; + +sp_hcond_element: sp_hcond { LEX *lex= Lex; sp_head *sp= lex->sphead; - sp_pcontext *ctx= lex->spcont; + sp_pcontext *ctx= lex->spcont->parent_context(); if (ctx->find_handler($1)) { @@ -2522,28 +2545,6 @@ sp_hcond_list: i->add_condition($1); ctx->push_handler($1); - $$= 1; - } - } - | sp_hcond_list ',' sp_hcond - { - LEX *lex= Lex; - sp_head *sp= lex->sphead; - sp_pcontext *ctx= lex->spcont; - - if (ctx->find_handler($3)) - { - my_message(ER_SP_DUP_HANDLER, ER(ER_SP_DUP_HANDLER), MYF(0)); - MYSQL_YYABORT; - } - else - { - sp_instr_hpush_jump *i= - (sp_instr_hpush_jump *)sp->last_instruction(); - - i->add_condition($3); - ctx->push_handler($3); - $$= $1 + 1; } } ; @@ -2703,7 +2704,7 @@ sp_proc_stmt_statement: else i->m_query.length= lex->tok_end - sp->m_tmp_query; i->m_query.str= strmake_root(YYTHD->mem_root, - (char *)sp->m_tmp_query, + sp->m_tmp_query, i->m_query.length); sp->add_instr(i); } @@ -3122,7 +3123,7 @@ sp_unlabeled_control: sp_label_t *lab= lex->spcont->last_label(); lab->type= SP_LAB_BEGIN; - lex->spcont= lex->spcont->push_context(); + lex->spcont= lex->spcont->push_context(LABEL_DEFAULT_SCOPE); } sp_decls sp_proc_stmts @@ -4537,8 +4538,7 @@ field_spec: type opt_attribute { LEX *lex=Lex; - if (add_field_to_list(lex->thd, $1.str, - (enum enum_field_types) $3, + if (add_field_to_list(lex->thd, &$1, (enum enum_field_types) $3, lex->length,lex->dec,lex->type, lex->default_value, lex->on_update_value, &lex->comment, @@ -5154,18 +5154,25 @@ alter: lex->sql_command= SQLCOM_ALTER_FUNCTION; lex->spname= $3; } - | ALTER view_algorithm_opt definer view_suid - VIEW_SYM table_ident - { - THD *thd= YYTHD; - LEX *lex= thd->lex; - lex->sql_command= SQLCOM_CREATE_VIEW; - lex->create_view_mode= VIEW_ALTER; - /* first table in list is target VIEW name */ - lex->select_lex.add_table_to_list(thd, $6, NULL, TL_OPTION_UPDATING); - } - view_list_opt AS view_select view_check_option - {} + | ALTER view_algorithm definer + { + Lex->create_view_mode= VIEW_ALTER; + } + view_tail + {} + | ALTER definer + /* + We have two separate rules for ALTER VIEW rather that + optional view_algorithm above, to resolve the ambiguity + with the ALTER EVENT below. + */ + { + LEX *lex= Lex; + lex->create_view_algorithm= VIEW_ALGORITHM_UNDEFINED; + lex->create_view_mode= VIEW_ALTER; + } + view_tail + {} | ALTER definer EVENT_SYM sp_name /* BE CAREFUL when you add a new rule to update the block where @@ -5487,7 +5494,7 @@ alter_list_item: type opt_attribute { LEX *lex=Lex; - if (add_field_to_list(lex->thd,$3.str, + if (add_field_to_list(lex->thd,&$3, (enum enum_field_types) $5, lex->length,lex->dec,lex->type, lex->default_value, lex->on_update_value, @@ -5940,12 +5947,8 @@ keycache_list: assign_to_keycache: table_ident cache_keys_spec { - LEX *lex=Lex; - SELECT_LEX *sel= &lex->select_lex; - if (!sel->add_table_to_list(lex->thd, $1, NULL, 0, - TL_READ, - sel->get_use_index(), - (List<String> *)0)) + if (!Select->add_table_to_list(YYTHD, $1, NULL, 0, TL_READ, + Select->pop_index_hints())) MYSQL_YYABORT; } ; @@ -5972,33 +5975,26 @@ preload_list: preload_keys: table_ident cache_keys_spec opt_ignore_leaves { - LEX *lex=Lex; - SELECT_LEX *sel= &lex->select_lex; - if (!sel->add_table_to_list(lex->thd, $1, NULL, $3, - TL_READ, - sel->get_use_index(), - (List<String> *)0)) + if (!Select->add_table_to_list(YYTHD, $1, NULL, $3, TL_READ, + Select->pop_index_hints())) MYSQL_YYABORT; } ; cache_keys_spec: - { Select->interval_list.empty(); } - cache_key_list_or_empty - { - LEX *lex=Lex; - SELECT_LEX *sel= &lex->select_lex; - sel->use_index= sel->interval_list; + { + Lex->select_lex.alloc_index_hints(YYTHD); + Select->set_index_hint_type(INDEX_HINT_USE, + global_system_variables.old_mode ? + INDEX_HINT_MASK_JOIN : + INDEX_HINT_MASK_ALL); } + cache_key_list_or_empty ; cache_key_list_or_empty: - /* empty */ { Lex->select_lex.use_index_ptr= 0; } - | opt_key_or_index '(' key_usage_list2 ')' - { - SELECT_LEX *sel= &Lex->select_lex; - sel->use_index_ptr= &sel->use_index; - } + /* empty */ { } + | key_or_index '(' opt_key_usage_list ')' ; opt_ignore_leaves: @@ -6929,7 +6925,7 @@ function_call_generic: builder= find_qualified_function_builder(thd); DBUG_ASSERT(builder); - item= builder->create(thd, $1, $3, $5); + item= builder->create(thd, $1, $3, true, $5); if (! ($$= item)) { @@ -7371,20 +7367,16 @@ normal_join: table_factor: { SELECT_LEX *sel= Select; - sel->use_index_ptr=sel->ignore_index_ptr=0; sel->table_join_options= 0; } table_ident opt_table_alias opt_key_definition { - LEX *lex= Lex; - SELECT_LEX *sel= lex->current_select; - if (!($$= sel->add_table_to_list(lex->thd, $2, $3, - sel->get_table_join_options(), - lex->lock_option, - sel->get_use_index(), - sel->get_ignore_index()))) + if (!($$= Select->add_table_to_list(YYTHD, $2, $3, + Select->get_table_join_options(), + Lex->lock_option, + Select->pop_index_hints()))) MYSQL_YYABORT; - sel->add_joined_table($$); + Select->add_joined_table($$); } | '{' ident table_ref LEFT OUTER JOIN_SYM table_ref ON @@ -7453,8 +7445,7 @@ table_factor: lex->current_select= sel= unit->outer_select(); if (!($$= sel-> add_table_to_list(lex->thd, new Table_ident(unit), $6, 0, - TL_READ,(List<String> *)0, - (List<String> *)0))) + TL_READ))) MYSQL_YYABORT; sel->add_joined_table($$); @@ -7553,52 +7544,67 @@ opt_outer: /* empty */ {} | OUTER {}; +index_hint_clause: + /* empty */ + { + $$= global_system_variables.old_mode ? + INDEX_HINT_MASK_JOIN : INDEX_HINT_MASK_ALL; + } + | FOR_SYM JOIN_SYM { $$= INDEX_HINT_MASK_JOIN; } + | FOR_SYM ORDER_SYM BY { $$= INDEX_HINT_MASK_ORDER; } + | FOR_SYM GROUP_SYM BY { $$= INDEX_HINT_MASK_GROUP; } + ; + +index_hint_type: + FORCE_SYM { $$= INDEX_HINT_FORCE; } + | IGNORE_SYM { $$= INDEX_HINT_IGNORE; } + ; + +index_hint_definition: + index_hint_type key_or_index index_hint_clause + { + Select->set_index_hint_type($1, $3); + } + '(' key_usage_list ')' + | USE_SYM key_or_index index_hint_clause + { + Select->set_index_hint_type(INDEX_HINT_USE, $3); + } + '(' opt_key_usage_list ')' + ; + +index_hints_list: + index_hint_definition + | index_hints_list index_hint_definition + ; + +opt_index_hints_list: + /* empty */ + | { Select->alloc_index_hints(YYTHD); } index_hints_list + ; + opt_key_definition: - /* empty */ {} - | USE_SYM key_usage_list - { - SELECT_LEX *sel= Select; - sel->use_index= *$2; - sel->use_index_ptr= &sel->use_index; - } - | FORCE_SYM key_usage_list - { - SELECT_LEX *sel= Select; - sel->use_index= *$2; - sel->use_index_ptr= &sel->use_index; - sel->table_join_options|= TL_OPTION_FORCE_INDEX; - } - | IGNORE_SYM key_usage_list - { - SELECT_LEX *sel= Select; - sel->ignore_index= *$2; - sel->ignore_index_ptr= &sel->ignore_index; - }; + { Select->clear_index_hints(); } + opt_index_hints_list + ; -key_usage_list: - key_or_index { Select->interval_list.empty(); } - '(' key_list_or_empty ')' - { $$= &Select->interval_list; } +opt_key_usage_list: + /* empty */ { Select->add_index_hint(YYTHD, NULL, 0); } + | key_usage_list {} ; -key_list_or_empty: - /* empty */ {} - | key_usage_list2 {} - ; +key_usage_element: + ident { Select->add_index_hint(YYTHD, $1.str, $1.length); } + | PRIMARY_SYM + { + Select->add_index_hint(YYTHD, (char *)"PRIMARY", 7); + } + ; -key_usage_list2: - key_usage_list2 ',' ident - { Select-> - interval_list.push_back(new (YYTHD->mem_root) String((const char*) $3.str, $3.length, - system_charset_info)); } - | ident - { Select-> - interval_list.push_back(new (YYTHD->mem_root) String((const char*) $1.str, $1.length, - system_charset_info)); } - | PRIMARY_SYM - { Select-> - interval_list.push_back(new (YYTHD->mem_root) String("PRIMARY", 7, - system_charset_info)); }; +key_usage_list: + key_usage_element + | key_usage_list ',' key_usage_element + ; using_list: ident @@ -9301,7 +9307,7 @@ param_marker: my_error(ER_VIEW_SELECT_VARIABLE, MYF(0)); MYSQL_YYABORT; } - item= new Item_param((uint) (lex->tok_start - (uchar *) thd->query)); + item= new Item_param((uint) (lex->tok_start - thd->query)); if (!($$= item) || lex->param_list.push_back(item)) { my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); @@ -9713,8 +9719,9 @@ user: $$->host.str= (char *) "%"; $$->host.length= 1; - if (check_string_length(&$$->user, - ER(ER_USERNAME), USERNAME_LENGTH)) + if (check_string_char_length(&$$->user, ER(ER_USERNAME), + USERNAME_CHAR_LENGTH, + system_charset_info, 0)) MYSQL_YYABORT; } | ident_or_text '@' ident_or_text @@ -9724,10 +9731,11 @@ user: MYSQL_YYABORT; $$->user = $1; $$->host=$3; - if (check_string_length(&$$->user, - ER(ER_USERNAME), USERNAME_LENGTH) || - check_string_length(&$$->host, - ER(ER_HOSTNAME), HOSTNAME_LENGTH)) + if (check_string_char_length(&$$->user, ER(ER_USERNAME), + USERNAME_CHAR_LENGTH, + system_charset_info, 0) || + check_string_byte_length(&$$->host, ER(ER_HOSTNAME), + HOSTNAME_LENGTH)) MYSQL_YYABORT; } | CURRENT_USER optional_braces @@ -10137,7 +10145,7 @@ option_type_value: if (!(qbuff.str= alloc_root(YYTHD->mem_root, qbuff.length + 5))) MYSQL_YYABORT; - strmake(strmake(qbuff.str, "SET ", 4), (char *)sp->m_tmp_query, + strmake(strmake(qbuff.str, "SET ", 4), sp->m_tmp_query, qbuff.length); qbuff.length+= 4; i->m_query= qbuff; @@ -10357,14 +10365,7 @@ internal_variable_name: MYSQL_YYABORT; $$.var= tmp; $$.base_name= null_lex_str; - /* - If this is time_zone variable we should open time zone - describing tables - */ - if (tmp == &sys_time_zone && - lex->add_time_zone_tables_to_query_tables(YYTHD)) - MYSQL_YYABORT; - else if (spc && tmp == &sys_autocommit) + if (spc && tmp == &sys_autocommit) { /* We don't allow setting AUTOCOMMIT from a stored function @@ -10842,7 +10843,8 @@ grant_ident: | table_ident { LEX *lex=Lex; - if (!lex->current_select->add_table_to_list(lex->thd, $1,NULL,0)) + if (!lex->current_select->add_table_to_list(lex->thd, $1,NULL, + TL_OPTION_UPDATING)) MYSQL_YYABORT; if (lex->grant == GLOBAL_ACLS) lex->grant = TABLE_ACLS & ~GRANT_ACL; @@ -11084,7 +11086,7 @@ union_list: UNION_SYM union_option { LEX *lex=Lex; - if (lex->exchange) + if (lex->result) { /* Only the last SELECT can have INTO...... */ my_error(ER_WRONG_USAGE, MYF(0), "UNION", "INTO"); @@ -11298,13 +11300,6 @@ view_algorithm: { Lex->create_view_algorithm= VIEW_ALGORITHM_TMPTABLE; } ; -view_algorithm_opt: - /* empty */ - { Lex->create_view_algorithm= VIEW_ALGORITHM_UNDEFINED; } - | view_algorithm - {} - ; - view_suid: /* empty */ { Lex->create_view_suid= VIEW_SUID_DEFAULT; } @@ -11370,18 +11365,16 @@ view_select_aux: { THD *thd= YYTHD; LEX *lex= thd->lex; - char *stmt_beg= (lex->sphead ? - (char *)lex->sphead->m_tmp_query : - thd->query); + const char *stmt_beg= (lex->sphead ? + lex->sphead->m_tmp_query : thd->query); lex->create_view_select_start= $2 - stmt_beg; } | '(' remember_name select_paren ')' union_opt { THD *thd= YYTHD; LEX *lex= thd->lex; - char *stmt_beg= (lex->sphead ? - (char *)lex->sphead->m_tmp_query : - thd->query); + const char *stmt_beg= (lex->sphead ? + lex->sphead->m_tmp_query : thd->query); lex->create_view_select_start= $2 - stmt_beg; } ; diff --git a/sql/stacktrace.c b/sql/stacktrace.c index d8e9b7fd883..078f62c6b2b 100644 --- a/sql/stacktrace.c +++ b/sql/stacktrace.c @@ -53,21 +53,6 @@ void safe_print_str(const char* name, const char* val, int max_len) #define SIGRETURN_FRAME_OFFSET 23 #endif -static my_bool is_nptl; - -/* Check if we are using NPTL or LinuxThreads on Linux */ -void check_thread_lib(void) -{ - char buf[5]; - -#ifdef _CS_GNU_LIBPTHREAD_VERSION - confstr(_CS_GNU_LIBPTHREAD_VERSION, buf, sizeof(buf)); - is_nptl = !strncasecmp(buf, "NPTL", sizeof(buf)); -#else - is_nptl = 0; -#endif -} - #if defined(__alpha__) && defined(__GNUC__) /* The only way to backtrace without a symbol table on alpha @@ -173,8 +158,8 @@ terribly wrong...\n"); #endif /* __alpha__ */ /* We are 1 frame above signal frame with NPTL and 2 frames above with LT */ - sigreturn_frame_count = is_nptl ? 1 : 2; - + sigreturn_frame_count = thd_lib_detected == THD_LIB_LT ? 2 : 1; + while (fp < (uchar**) stack_bottom) { #if defined(__i386__) || defined(__x86_64__) diff --git a/sql/stacktrace.h b/sql/stacktrace.h index f5c92e54e1c..56b9877180a 100644 --- a/sql/stacktrace.h +++ b/sql/stacktrace.h @@ -27,11 +27,9 @@ extern char* heap_start; #define init_stacktrace() do { \ heap_start = (char*) &__bss_start; \ - check_thread_lib(); \ } while(0); void print_stacktrace(gptr stack_bottom, ulong thread_stack); void safe_print_str(const char* name, const char* val, int max_len); -void check_thread_lib(void); #endif /* (defined (__i386__) || (defined(__alpha__) && defined(__GNUC__))) */ #endif /* TARGET_OS_LINUX */ diff --git a/sql/strfunc.cc b/sql/strfunc.cc index 71b52a5145d..9ffc5fd127f 100644 --- a/sql/strfunc.cc +++ b/sql/strfunc.cc @@ -104,7 +104,8 @@ ulonglong find_set(TYPELIB *lib, const char *str, uint length, CHARSET_INFO *cs, > 0 position in TYPELIB->type_names +1 */ -uint find_type(TYPELIB *lib, const char *find, uint length, bool part_match) +uint find_type(const TYPELIB *lib, const char *find, uint length, + bool part_match) { uint found_count=0, found_pos=0; const char *end= find+length; @@ -144,7 +145,8 @@ uint find_type(TYPELIB *lib, const char *find, uint length, bool part_match) >0 Offset+1 in typelib for matched string */ -uint find_type2(TYPELIB *typelib, const char *x, uint length, CHARSET_INFO *cs) +uint find_type2(const TYPELIB *typelib, const char *x, uint length, + CHARSET_INFO *cs) { int pos; const char *j; diff --git a/sql/structs.h b/sql/structs.h index 377d337dcfa..4cf9379d2bb 100644 --- a/sql/structs.h +++ b/sql/structs.h @@ -136,12 +136,11 @@ typedef struct st_read_record { /* Parameter to read_record */ /* - Originally MySQL used TIME structure inside server only, but since + Originally MySQL used MYSQL_TIME structure inside server only, but since 4.1 it's exported to user in the new client API. Define aliases for new names to keep existing code simple. */ -typedef struct st_mysql_time TIME; typedef enum enum_mysql_timestamp_type timestamp_type; diff --git a/sql/table.cc b/sql/table.cc index 560f53bae26..39bdbb4cbb9 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -245,6 +245,57 @@ void free_table_share(TABLE_SHARE *share) } +/** + Return TRUE if a table name matches one of the system table names. + Currently these are: + + help_category, help_keyword, help_relation, help_topic, + proc, event + time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, + time_zone_transition_type + + This function trades accuracy for speed, so may return false + positives. Presumably mysql.* database is for internal purposes only + and should not contain user tables. +*/ + +inline bool is_system_table_name(const char *name, uint length) +{ + CHARSET_INFO *ci= system_charset_info; + + return ( + /* mysql.proc table */ + length == 4 && + my_tolower(ci, name[0]) == 'p' && + my_tolower(ci, name[1]) == 'r' && + my_tolower(ci, name[2]) == 'o' && + my_tolower(ci, name[3]) == 'c' || + + length > 4 && + ( + /* one of mysql.help* tables */ + my_tolower(ci, name[0]) == 'h' && + my_tolower(ci, name[1]) == 'e' && + my_tolower(ci, name[2]) == 'l' && + my_tolower(ci, name[3]) == 'p' || + + /* one of mysql.time_zone* tables */ + my_tolower(ci, name[0]) == 't' && + my_tolower(ci, name[1]) == 'i' && + my_tolower(ci, name[2]) == 'm' && + my_tolower(ci, name[3]) == 'e' || + + /* mysql.event table */ + my_tolower(ci, name[0]) == 'e' && + my_tolower(ci, name[1]) == 'v' && + my_tolower(ci, name[2]) == 'e' && + my_tolower(ci, name[3]) == 'n' && + my_tolower(ci, name[4]) == 't' + ) + ); +} + + /* Read table definition from a binary / text based .frm file @@ -365,11 +416,9 @@ int open_table_def(THD *thd, TABLE_SHARE *share, uint db_flags) allow to lock such tables for writing with any other tables (even with other system tables) and some privilege tables need this. */ - if (!(lower_case_table_names ? - my_strcasecmp(system_charset_info, share->table_name.str, "proc") : - strcmp(share->table_name.str, "proc"))) - share->system_table= 1; - else + share->system_table= is_system_table_name(share->table_name.str, + share->table_name.length); + if (!share->system_table) { share->log_table= check_if_log_table(share->db.length, share->db.str, share->table_name.length, @@ -640,8 +689,8 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, if ((share->partition_info_len= partition_info_len)) { if (!(share->partition_info= - (uchar*) memdup_root(&share->mem_root, next_chunk + 4, - partition_info_len + 1))) + memdup_root(&share->mem_root, next_chunk + 4, + partition_info_len + 1))) { my_free(buff, MYF(0)); goto err; @@ -1222,12 +1271,12 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, if ((int) (share->next_number_index= (uint) find_ref_key(share->key_info, share->keys, share->default_values, reg_field, - &share->next_number_key_offset)) < 0) + &share->next_number_key_offset, + &share->next_number_keypart)) < 0) { /* Wrong field definition */ - DBUG_ASSERT(0); - reg_field->unireg_check= Field::NONE; /* purecov: inspected */ - share->found_next_number_field= 0; + error= 4; + goto err; } else reg_field->flags |= AUTO_INCREMENT_FLAG; @@ -1338,7 +1387,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, if (!(outparam->alias= my_strdup(alias, MYF(MY_WME)))) goto err; outparam->quick_keys.init(); - outparam->used_keys.init(); + outparam->covering_keys.init(); outparam->keys_in_use_for_query.init(); /* Allocate handler */ @@ -1486,7 +1535,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, tmp= mysql_unpack_partition(thd, share->partition_info, share->partition_info_len, - (uchar*)share->part_state, + share->part_state, share->part_state_len, outparam, is_create_table, share->default_part_db_type); @@ -2244,6 +2293,30 @@ char *get_field(MEM_ROOT *mem, Field *field) return to; } +/* + DESCRIPTION + given a buffer with a key value, and a map of keyparts + that are present in this value, returns the length of the value +*/ +uint calculate_key_len(TABLE *table, uint key, const byte *buf, + key_part_map keypart_map) +{ + /* works only with key prefixes */ + DBUG_ASSERT(((keypart_map + 1) & keypart_map) == 0); + + KEY *key_info= table->s->key_info+key; + KEY_PART_INFO *key_part= key_info->key_part; + KEY_PART_INFO *end_key_part= key_part + key_info->key_parts; + uint length= 0; + + while (key_part < end_key_part && keypart_map) + { + length+= key_part->store_length; + keypart_map >>= 1; + key_part++; + } + return length; +} /* Check if database name is valid @@ -2263,8 +2336,9 @@ char *get_field(MEM_ROOT *mem, Field *field) bool check_db_name(LEX_STRING *org_name) { char *name= org_name->str; + uint name_length= org_name->length; - if (!org_name->length || org_name->length > NAME_LEN) + if (!name_length || name_length > NAME_LEN) return 1; if (lower_case_table_names && name != any_db) @@ -2273,6 +2347,7 @@ bool check_db_name(LEX_STRING *org_name) #if defined(USE_MB) && defined(USE_MB_IDENT) if (use_mb(system_charset_info)) { + name_length= 0; bool last_char_is_space= TRUE; char *end= name + org_name->length; while (name < end) @@ -2283,12 +2358,14 @@ bool check_db_name(LEX_STRING *org_name) if (!len) len= 1; name+= len; + name_length++; } - return last_char_is_space; + return (last_char_is_space || name_length > NAME_CHAR_LEN); } else #endif - return org_name->str[org_name->length - 1] != ' '; /* purecov: inspected */ + return ((org_name->str[org_name->length - 1] != ' ') || + (name_length > NAME_CHAR_LEN)); /* purecov: inspected */ } @@ -2301,6 +2378,7 @@ bool check_db_name(LEX_STRING *org_name) bool check_table_name(const char *name, uint length) { + uint name_length= 0; // name length in symbols const char *end= name+length; if (!length || length > NAME_LEN) return 1; @@ -2321,14 +2399,16 @@ bool check_table_name(const char *name, uint length) if (len) { name += len; + name_length++; continue; } } #endif name++; + name_length++; } #if defined(USE_MB) && defined(USE_MB_IDENT) - return last_char_is_space; + return (last_char_is_space || name_length > NAME_CHAR_LEN) ; #else return 0; #endif @@ -2337,7 +2417,7 @@ bool check_table_name(const char *name, uint length) bool check_column_name(const char *name) { - const char *start= name; + uint name_length= 0; // name length in symbols bool last_char_is_space= TRUE; while (*name) @@ -2351,6 +2431,7 @@ bool check_column_name(const char *name) if (len) { name += len; + name_length++; continue; } } @@ -2360,159 +2441,150 @@ bool check_column_name(const char *name) if (*name == NAMES_SEP_CHAR) return 1; name++; + name_length++; } /* Error if empty or too long column name */ - return last_char_is_space || (uint) (name - start) > NAME_LEN; + return last_char_is_space || (uint) name_length > NAME_CHAR_LEN; } -/* +/** Checks whether a table is intact. Should be done *just* after the table has been opened. - - SYNOPSIS - table_check_intact() - table The table to check - table_f_count Expected number of columns in the table - table_def Expected structure of the table (column name and type) - last_create_time The table->file->create_time of the table in memory - we have checked last time - error_num ER_XXXX from the error messages file. When 0 no error - is sent to the client in case types does not match. - If different col number either - ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE or - ER_COL_COUNT_DOESNT_MATCH_CORRUPTED is used - - RETURNS - FALSE OK - TRUE There was an error + + @param[in] table The table to check + @param[in] table_f_count Expected number of columns in the table + @param[in] table_def Expected structure of the table (column name + and type) + + @retval FALSE OK + @retval TRUE There was an error. An error message is output + to the error log. We do not push an error + message into the error stack because this + function is currently only called at start up, + and such errors never reach the user. */ my_bool table_check_intact(TABLE *table, const uint table_f_count, - const TABLE_FIELD_W_TYPE *table_def, - time_t *last_create_time, int error_num) + const TABLE_FIELD_W_TYPE *table_def) { uint i; my_bool error= FALSE; my_bool fields_diff_count; DBUG_ENTER("table_check_intact"); - DBUG_PRINT("info",("table: %s expected_count: %d last_create_time: %ld", - table->alias, table_f_count, *last_create_time)); - - if ((fields_diff_count= (table->s->fields != table_f_count)) || - (*last_create_time != table->file->stats.create_time)) + DBUG_PRINT("info",("table: %s expected_count: %d", + table->alias, table_f_count)); + + fields_diff_count= (table->s->fields != table_f_count); + if (fields_diff_count) { - DBUG_PRINT("info", ("I am suspecting, checking table")); - if (fields_diff_count) + DBUG_PRINT("info", ("Column count has changed, checking the definition")); + + /* previous MySQL version */ + if (MYSQL_VERSION_ID > table->s->mysql_version) { - /* previous MySQL version */ - error= TRUE; - if (MYSQL_VERSION_ID > table->s->mysql_version) - { - my_error(ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE, MYF(0), table->alias, - table_f_count, table->s->fields, table->s->mysql_version, - MYSQL_VERSION_ID); - sql_print_error(ER(ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE), - table->alias, table_f_count, table->s->fields, - table->s->mysql_version, MYSQL_VERSION_ID); - DBUG_RETURN(error); + sql_print_error(ER(ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE), + table->alias, table_f_count, table->s->fields, + table->s->mysql_version, MYSQL_VERSION_ID); + DBUG_RETURN(TRUE); + } + else if (MYSQL_VERSION_ID == table->s->mysql_version) + { + sql_print_error(ER(ER_COL_COUNT_DOESNT_MATCH_CORRUPTED), table->alias, + table_f_count, table->s->fields); + DBUG_RETURN(TRUE); + } + /* + Something has definitely changed, but we're running an older + version of MySQL with new system tables. + Let's check column definitions. If a column was added at + the end of the table, then we don't care much since such change + is backward compatible. + */ + } + char buffer[STRING_BUFFER_USUAL_SIZE]; + for (i=0 ; i < table_f_count; i++, table_def++) + { + String sql_type(buffer, sizeof(buffer), system_charset_info); + sql_type.length(0); + if (i < table->s->fields) + { + Field *field= table->field[i]; - } - else if (MYSQL_VERSION_ID == table->s->mysql_version) - { - my_error(ER_COL_COUNT_DOESNT_MATCH_CORRUPTED,MYF(0), table->alias, - table_f_count, table->s->fields); - sql_print_error(ER(ER_COL_COUNT_DOESNT_MATCH_CORRUPTED), table->alias, - table_f_count, table->s->fields); - } - else + if (strncmp(field->field_name, table_def->name.str, + table_def->name.length)) { /* - Moving from newer mysql to older one -> let's say not an error but - will check the definition afterwards. If a column was added at the - end then we don't care much since it's not in the middle. + Name changes are not fatal, we use ordinal numbers to access columns. + Still this can be a sign of a tampered table, output an error + to the error log. */ - error= FALSE; + sql_print_error("Incorrect definition of table %s.%s: " + "expected column '%s' at position %d, found '%s'.", + table->s->db.str, table->alias, table_def->name.str, i, + field->field_name); } - } - /* definitely something has changed */ - char buffer[255]; - for (i=0 ; i < table_f_count; i++, table_def++) - { - String sql_type(buffer, sizeof(buffer), system_charset_info); - sql_type.length(0); + field->sql_type(sql_type); /* - Name changes are not fatal, we use sequence numbers => no problem - for us but this can show tampered table or broken table. - */ - if (i < table->s->fields) + Generally, if column types don't match, then something is + wrong. + + However, we only compare column definitions up to the + length of the original definition, since we consider the + following definitions compatible: + + 1. DATETIME and DATETIM + 2. INT(11) and INT(11 + 3. SET('one', 'two') and SET('one', 'two', 'more') + + For SETs or ENUMs, if the same prefix is there it's OK to + add more elements - they will get higher ordinal numbers and + the new table definition is backward compatible with the + original one. + */ + if (strncmp(sql_type.c_ptr_safe(), table_def->type.str, + table_def->type.length - 1)) { - Field *field= table->field[i]; - if (strncmp(field->field_name, table_def->name.str, - table_def->name.length)) - { - sql_print_error("(%s) Expected field %s at position %d, found %s", - table->alias, table_def->name.str, i, - field->field_name); - } - - /* - If the type does not match than something is really wrong - Check up to length - 1. Why? - 1. datetime -> datetim -> the same - 2. int(11) -> int(11 -> the same - 3. set('one','two') -> set('one','two' - so for sets if the same prefix is there it's ok if more are - added as part of the set. The same is valid for enum. So a new - table running on a old server will be valid. - */ - field->sql_type(sql_type); - if (strncmp(sql_type.c_ptr_safe(), table_def->type.str, - table_def->type.length - 1)) - { - sql_print_error("(%s) Expected field %s at position %d to have type " - "%s, found %s", table->alias, table_def->name.str, - i, table_def->type.str, sql_type.c_ptr_safe()); - error= TRUE; - } - else if (table_def->cset.str && !field->has_charset()) - { - sql_print_error("(%s) Expected field %s at position %d to have " - "character set '%s' but found no such", table->alias, - table_def->name.str, i, table_def->cset.str); - error= TRUE; - } - else if (table_def->cset.str && - strcmp(field->charset()->csname, table_def->cset.str)) - { - sql_print_error("(%s) Expected field %s at position %d to have " - "character set '%s' but found '%s'", table->alias, - table_def->name.str, i, table_def->cset.str, - field->charset()->csname); - error= TRUE; - } + sql_print_error("Incorrect definition of table %s.%s: " + "expected column '%s' at position %d to have type " + "%s, found type %s.", table->s->db.str, table->alias, + table_def->name.str, i, table_def->type.str, + sql_type.c_ptr_safe()); + error= TRUE; } - else + else if (table_def->cset.str && !field->has_charset()) + { + sql_print_error("Incorrect definition of table %s.%s: " + "expected the type of column '%s' at position %d " + "to have character set '%s' but the type has no " + "character set.", table->s->db.str, table->alias, + table_def->name.str, i, table_def->cset.str); + error= TRUE; + } + else if (table_def->cset.str && + strcmp(field->charset()->csname, table_def->cset.str)) { - sql_print_error("(%s) Expected field %s at position %d to have type %s " - " but no field found.", table->alias, - table_def->name.str, i, table_def->type.str); - error= TRUE; + sql_print_error("Incorrect definition of table %s.%s: " + "expected the type of column '%s' at position %d " + "to have character set '%s' but found " + "character set '%s'.", table->s->db.str, table->alias, + table_def->name.str, i, table_def->cset.str, + field->charset()->csname); + error= TRUE; } } - if (!error) - *last_create_time= table->file->stats.create_time; - else if (!fields_diff_count && error_num) - my_error(error_num,MYF(0), table->alias, table_f_count, table->s->fields); - } - else - { - DBUG_PRINT("info", ("Table seems ok without thorough checking.")); - *last_create_time= table->file->stats.create_time; + else + { + sql_print_error("Incorrect definition of table %s.%s: " + "expected column '%s' at position %d to have type %s " + " but the column is not found.", + table->s->db.str, table->alias, + table_def->name.str, i, table_def->type.str); + error= TRUE; + } } - - DBUG_RETURN(error); + DBUG_RETURN(error); } @@ -2895,7 +2967,7 @@ void st_table_list::hide_view_error(THD *thd) thd->net.last_errno == ER_NO_SUCH_TABLE) { TABLE_LIST *top= top_table(); - thd->clear_error(); + thd->clear_error(); my_error(ER_VIEW_INVALID, MYF(0), top->view_db.str, top->view_name.str); } else if (thd->net.last_errno == ER_NO_DEFAULT_FOR_FIELD) @@ -3251,7 +3323,8 @@ bool st_table_list::prepare_view_securety_context(THD *thd) definer.host.str, thd->db)) { - if (thd->lex->sql_command == SQLCOM_SHOW_CREATE) + if ((thd->lex->sql_command == SQLCOM_SHOW_CREATE) || + (thd->lex->sql_command == SQLCOM_SHOW_FIELDS)) { push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_NO_SUCH_USER, @@ -3938,7 +4011,7 @@ void st_table::mark_auto_increment_column() */ bitmap_set_bit(read_set, found_next_number_field->field_index); bitmap_set_bit(write_set, found_next_number_field->field_index); - if (s->next_number_key_offset) + if (s->next_number_keypart) mark_columns_used_by_index_no_reset(s->next_number_index, read_set); file->column_bitmaps_signal(); } @@ -4120,6 +4193,175 @@ Item_subselect *st_table_list::containing_subselect() return (select_lex ? select_lex->master_unit()->item : 0); } +/* + Compiles the tagged hints list and fills up the bitmasks. + + SYNOPSIS + process_index_hints() + table the TABLE to operate on. + + DESCRIPTION + The parser collects the index hints for each table in a "tagged list" + (st_table_list::index_hints). Using the information in this tagged list + this function sets the members st_table::keys_in_use_for_query, + st_table::keys_in_use_for_group_by, st_table::keys_in_use_for_order_by, + st_table::force_index and st_table::covering_keys. + + Current implementation of the runtime does not allow mixing FORCE INDEX + and USE INDEX, so this is checked here. Then the FORCE INDEX list + (if non-empty) is appended to the USE INDEX list and a flag is set. + + Multiple hints of the same kind are processed so that each clause + is applied to what is computed in the previous clause. + For example: + USE INDEX (i1) USE INDEX (i2) + is equivalent to + USE INDEX (i1,i2) + and means "consider only i1 and i2". + + Similarly + USE INDEX () USE INDEX (i1) + is equivalent to + USE INDEX (i1) + and means "consider only the index i1" + + It is OK to have the same index several times, e.g. "USE INDEX (i1,i1)" is + not an error. + + Different kind of hints (USE/FORCE/IGNORE) are processed in the following + order: + 1. All indexes in USE (or FORCE) INDEX are added to the mask. + 2. All IGNORE INDEX + + e.g. "USE INDEX i1, IGNORE INDEX i1, USE INDEX i1" will not use i1 at all + as if we had "USE INDEX i1, USE INDEX i1, IGNORE INDEX i1". + + As an optimization if there is a covering index, and we have + IGNORE INDEX FOR GROUP/ORDER, and this index is used for the JOIN part, + then we have to ignore the IGNORE INDEX FROM GROUP/ORDER. + + RETURN VALUE + FALSE no errors found + TRUE found and reported an error. +*/ +bool st_table_list::process_index_hints(TABLE *table) +{ + /* initialize the result variables */ + table->keys_in_use_for_query= table->keys_in_use_for_group_by= + table->keys_in_use_for_order_by= table->s->keys_in_use; + + /* index hint list processing */ + if (index_hints) + { + key_map index_join[INDEX_HINT_FORCE + 1]; + key_map index_order[INDEX_HINT_FORCE + 1]; + key_map index_group[INDEX_HINT_FORCE + 1]; + index_hint *hint; + int type; + bool have_empty_use_join= FALSE, have_empty_use_order= FALSE, + have_empty_use_group= FALSE; + List_iterator <index_hint> iter(*index_hints); + + /* initialize temporary variables used to collect hints of each kind */ + for (type= INDEX_HINT_IGNORE; type <= INDEX_HINT_FORCE; type++) + { + index_join[type].clear_all(); + index_order[type].clear_all(); + index_group[type].clear_all(); + } + + /* iterate over the hints list */ + while ((hint= iter++)) + { + uint pos; + + /* process empty USE INDEX () */ + if (hint->type == INDEX_HINT_USE && !hint->key_name.str) + { + if (hint->clause & INDEX_HINT_MASK_JOIN) + { + index_join[hint->type].clear_all(); + have_empty_use_join= TRUE; + } + if (hint->clause & INDEX_HINT_MASK_ORDER) + { + index_order[hint->type].clear_all(); + have_empty_use_order= TRUE; + } + if (hint->clause & INDEX_HINT_MASK_GROUP) + { + index_group[hint->type].clear_all(); + have_empty_use_group= TRUE; + } + continue; + } + + /* + Check if an index with the given name exists and get his offset in + the keys bitmask for the table + */ + if (table->s->keynames.type_names == 0 || + (pos= find_type(&table->s->keynames, hint->key_name.str, + hint->key_name.length, 1)) <= 0) + { + my_error(ER_KEY_DOES_NOT_EXITS, MYF(0), hint->key_name.str, alias); + return 1; + } + + pos--; + + /* add to the appropriate clause mask */ + if (hint->clause & INDEX_HINT_MASK_JOIN) + index_join[hint->type].set_bit (pos); + if (hint->clause & INDEX_HINT_MASK_ORDER) + index_order[hint->type].set_bit (pos); + if (hint->clause & INDEX_HINT_MASK_GROUP) + index_group[hint->type].set_bit (pos); + } + + /* cannot mix USE INDEX and FORCE INDEX */ + if ((!index_join[INDEX_HINT_FORCE].is_clear_all() || + !index_order[INDEX_HINT_FORCE].is_clear_all() || + !index_group[INDEX_HINT_FORCE].is_clear_all()) && + (!index_join[INDEX_HINT_USE].is_clear_all() || have_empty_use_join || + !index_order[INDEX_HINT_USE].is_clear_all() || have_empty_use_order || + !index_group[INDEX_HINT_USE].is_clear_all() || have_empty_use_group)) + { + my_error(ER_WRONG_USAGE, MYF(0), index_hint_type_name[INDEX_HINT_USE], + index_hint_type_name[INDEX_HINT_FORCE]); + return 1; + } + + /* process FORCE INDEX as USE INDEX with a flag */ + if (!index_join[INDEX_HINT_FORCE].is_clear_all() || + !index_order[INDEX_HINT_FORCE].is_clear_all() || + !index_group[INDEX_HINT_FORCE].is_clear_all()) + { + table->force_index= TRUE; + index_join[INDEX_HINT_USE].merge(index_join[INDEX_HINT_FORCE]); + index_order[INDEX_HINT_USE].merge(index_order[INDEX_HINT_FORCE]); + index_group[INDEX_HINT_USE].merge(index_group[INDEX_HINT_FORCE]); + } + + /* apply USE INDEX */ + if (!index_join[INDEX_HINT_USE].is_clear_all() || have_empty_use_join) + table->keys_in_use_for_query.intersect(index_join[INDEX_HINT_USE]); + if (!index_order[INDEX_HINT_USE].is_clear_all() || have_empty_use_order) + table->keys_in_use_for_order_by.intersect (index_order[INDEX_HINT_USE]); + if (!index_group[INDEX_HINT_USE].is_clear_all() || have_empty_use_group) + table->keys_in_use_for_group_by.intersect (index_group[INDEX_HINT_USE]); + + /* apply IGNORE INDEX */ + table->keys_in_use_for_query.subtract (index_join[INDEX_HINT_IGNORE]); + table->keys_in_use_for_order_by.subtract (index_order[INDEX_HINT_IGNORE]); + table->keys_in_use_for_group_by.subtract (index_group[INDEX_HINT_IGNORE]); + } + + /* make sure covering_keys don't include indexes disabled with a hint */ + table->covering_keys.intersect(table->keys_in_use_for_query); + return 0; +} + /***************************************************************************** ** Instansiate templates *****************************************************************************/ diff --git a/sql/table.h b/sql/table.h index 54c820d391c..bb9ced2e450 100644 --- a/sql/table.h +++ b/sql/table.h @@ -58,7 +58,7 @@ typedef struct st_grant_info enum tmp_table_type { - NO_TMP_TABLE, TMP_TABLE, TRANSACTIONAL_TMP_TABLE, + NO_TMP_TABLE, NON_TRANSACTIONAL_TMP_TABLE, TRANSACTIONAL_TMP_TABLE, INTERNAL_TMP_TABLE, SYSTEM_TMP_TABLE }; @@ -197,8 +197,9 @@ typedef struct st_table_share uint rowid_field_offset; /* Field_nr +1 to rowid field */ /* Index of auto-updated TIMESTAMP field in field array */ uint primary_key; - uint next_number_index; - uint next_number_key_offset; + uint next_number_index; /* autoincrement key number */ + uint next_number_key_offset; /* autoinc keypart offset in a key */ + uint next_number_keypart; /* autoinc keypart number in a key */ uint error, open_errno, errarg; /* error from open_table_def() */ uint column_bitmap_size; uchar frm_version; @@ -235,9 +236,9 @@ typedef struct st_table_share bool log_table; #ifdef WITH_PARTITION_STORAGE_ENGINE bool auto_partitioned; - const uchar *partition_info; + const char *partition_info; uint partition_info_len; - const uchar *part_state; + const char *part_state; uint part_state_len; handlerton *default_part_db_type; #endif @@ -299,6 +300,12 @@ typedef struct st_table_share /* Information for one open table */ +enum index_hint_type +{ + INDEX_HINT_IGNORE, + INDEX_HINT_USE, + INDEX_HINT_FORCE +}; struct st_table { st_table() {} /* Remove gcc warning */ @@ -318,8 +325,12 @@ struct st_table { byte *write_row_record; /* Used as optimisation in THD::write_row */ byte *insert_values; /* used by INSERT ... UPDATE */ - key_map quick_keys, used_keys; - + /* + Map of keys that can be used to retrieve all data from this table + needed by the query without reading the row. + */ + key_map covering_keys; + key_map quick_keys, merge_keys; /* A set of keys that can be used in the query that references this table. @@ -332,7 +343,10 @@ struct st_table { The set is implemented as a bitmap. */ key_map keys_in_use_for_query; - key_map merge_keys; + /* Map of keys that can be used to calculate GROUP BY without sorting */ + key_map keys_in_use_for_group_by; + /* Map of keys that can be used to calculate ORDER BY without sorting */ + key_map keys_in_use_for_order_by; KEY *key_info; /* data of keys in database */ Field *next_number_field; /* Set if next_number is activated */ @@ -425,6 +439,11 @@ struct st_table { my_bool no_cache; /* To signal that we should reset query_id for tables and cols */ my_bool clear_query_id; + /* + To indicate that a non-null value of the auto_increment field + was provided by the user or retrieved from the current record. + Used only in the MODE_NO_AUTO_VALUE_ON_ZERO mode. + */ my_bool auto_increment_field_not_null; my_bool insert_or_update; /* Can be used by the handler */ my_bool alias_name_used; /* true if table_name is alias */ @@ -666,9 +685,25 @@ public: (TABLE_LIST::join_using_fields != NULL) */ +class index_hint; typedef struct st_table_list { st_table_list() {} /* Remove gcc warning */ + + /** + Prepare TABLE_LIST that consists of one table instance to use in + simple_open_and_lock_tables + */ + inline void init_one_table(const char *db_name_arg, + const char *table_name_arg, + enum thr_lock_type lock_type_arg) + { + bzero((char*) this, sizeof(*this)); + db= (char*) db_name_arg; + table_name= alias= (char*) table_name_arg; + lock_type= lock_type_arg; + } + /* List of tables local to a subquery (used by SQL_LIST). Considers views as leaves (unlike 'next_leaf' below). Created at parse time @@ -722,7 +757,7 @@ typedef struct st_table_list */ struct st_table_list *next_name_resolution_table; /* Index names in a "... JOIN ... USE/IGNORE INDEX ..." clause. */ - List<String> *use_index, *ignore_index; + List<index_hint> *index_hints; TABLE *table; /* opened table */ uint table_id; /* table id (from binlog) for opened table */ /* @@ -896,6 +931,13 @@ typedef struct st_table_list void reinit_before_use(THD *thd); Item_subselect *containing_subselect(); + /* + Compiles the tagged hints list and fills up st_table::keys_in_use_for_query, + st_table::keys_in_use_for_group_by, st_table::keys_in_use_for_order_by, + st_table::force_index and st_table::covering_keys. + */ + bool process_index_hints(TABLE *table); + private: bool prep_check_option(THD *thd, uint8 check_opt_type); bool prep_where(THD *thd, Item **conds, bool no_where_clause); @@ -1070,8 +1112,7 @@ typedef struct st_table_field_w_type my_bool table_check_intact(TABLE *table, const uint table_f_count, - const TABLE_FIELD_W_TYPE *table_def, - time_t *last_create_time, int error_num); + const TABLE_FIELD_W_TYPE *table_def); static inline my_bitmap_map *tmp_use_all_columns(TABLE *table, MY_BITMAP *bitmap) diff --git a/sql/time.cc b/sql/time.cc index 4854206b1c8..249de3b879b 100644 --- a/sql/time.cc +++ b/sql/time.cc @@ -96,7 +96,7 @@ int calc_weekday(long daynr,bool sunday_first_day_of_week) next week is week 1. */ -uint calc_week(TIME *l_time, uint week_behaviour, uint *year) +uint calc_week(MYSQL_TIME *l_time, uint week_behaviour, uint *year) { uint days; ulong daynr=calc_daynr(l_time->year,l_time->month,l_time->day); @@ -214,7 +214,7 @@ ulong convert_month_to_period(ulong month) /* - Convert a timestamp string to a TIME value and produce a warning + Convert a timestamp string to a MYSQL_TIME value and produce a warning if string was truncated during conversion. NOTE @@ -222,7 +222,7 @@ ulong convert_month_to_period(ulong month) */ timestamp_type -str_to_datetime_with_warn(const char *str, uint length, TIME *l_time, +str_to_datetime_with_warn(const char *str, uint length, MYSQL_TIME *l_time, uint flags) { int was_cut; @@ -235,13 +235,14 @@ str_to_datetime_with_warn(const char *str, uint length, TIME *l_time, MODE_NO_ZERO_DATE))), &was_cut); if (was_cut || ts_type <= MYSQL_TIMESTAMP_ERROR) - make_truncated_value_warning(current_thd, str, length, ts_type, NullS); + make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + str, length, ts_type, NullS); return ts_type; } /* - Convert a datetime from broken-down TIME representation to corresponding + Convert a datetime from broken-down MYSQL_TIME representation to corresponding TIMESTAMP value. SYNOPSIS @@ -257,7 +258,7 @@ str_to_datetime_with_warn(const char *str, uint length, TIME *l_time, 0 - t contains datetime value which is out of TIMESTAMP range. */ -my_time_t TIME_to_timestamp(THD *thd, const TIME *t, my_bool *in_dst_time_gap) +my_time_t TIME_to_timestamp(THD *thd, const MYSQL_TIME *t, my_bool *in_dst_time_gap) { my_time_t timestamp; @@ -276,20 +277,20 @@ my_time_t TIME_to_timestamp(THD *thd, const TIME *t, my_bool *in_dst_time_gap) /* - Convert a time string to a TIME struct and produce a warning + Convert a time string to a MYSQL_TIME struct and produce a warning if string was cut during conversion. NOTE See str_to_time() for more info. */ bool -str_to_time_with_warn(const char *str, uint length, TIME *l_time) +str_to_time_with_warn(const char *str, uint length, MYSQL_TIME *l_time) { int warning; bool ret_val= str_to_time(str, length, l_time, &warning); if (ret_val || warning) - make_truncated_value_warning(current_thd, str, length, - MYSQL_TIMESTAMP_TIME, NullS); + make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + str, length, MYSQL_TIMESTAMP_TIME, NullS); return ret_val; } @@ -298,7 +299,7 @@ str_to_time_with_warn(const char *str, uint length, TIME *l_time) Convert a system time structure to TIME */ -void localtime_to_TIME(TIME *to, struct tm *from) +void localtime_to_TIME(MYSQL_TIME *to, struct tm *from) { to->neg=0; to->second_part=0; @@ -310,9 +311,14 @@ void localtime_to_TIME(TIME *to, struct tm *from) to->second= (int) from->tm_sec; } -void calc_time_from_sec(TIME *to, long seconds, long microseconds) +void calc_time_from_sec(MYSQL_TIME *to, long seconds, long microseconds) { long t_seconds; + // to->neg is not cleared, it may already be set to a useful value + to->time_type= MYSQL_TIMESTAMP_TIME; + to->year= 0; + to->month= 0; + to->day= 0; to->hour= seconds/3600L; t_seconds= seconds%3600L; to->minute= t_seconds/60L; @@ -679,7 +685,7 @@ const char *get_date_time_format_str(KNOWN_DATE_TIME_FORMAT *format, MySQL doesn't support comparing of date/time/datetime strings that are not in arbutary order as dates are compared as strings in some context) - This functions don't check that given TIME structure members are + This functions don't check that given MYSQL_TIME structure members are in valid range. If they are not, return value won't reflect any valid date either. Additionally, make_time doesn't take into account time->day member: it's assumed that days have been converted @@ -687,7 +693,7 @@ const char *get_date_time_format_str(KNOWN_DATE_TIME_FORMAT *format, ****************************************************************************/ void make_time(const DATE_TIME_FORMAT *format __attribute__((unused)), - const TIME *l_time, String *str) + const MYSQL_TIME *l_time, String *str) { uint length= (uint) my_time_to_str(l_time, (char*) str->ptr()); str->length(length); @@ -696,7 +702,7 @@ void make_time(const DATE_TIME_FORMAT *format __attribute__((unused)), void make_date(const DATE_TIME_FORMAT *format __attribute__((unused)), - const TIME *l_time, String *str) + const MYSQL_TIME *l_time, String *str) { uint length= (uint) my_date_to_str(l_time, (char*) str->ptr()); str->length(length); @@ -705,7 +711,7 @@ void make_date(const DATE_TIME_FORMAT *format __attribute__((unused)), void make_datetime(const DATE_TIME_FORMAT *format __attribute__((unused)), - const TIME *l_time, String *str) + const MYSQL_TIME *l_time, String *str) { uint length= (uint) my_datetime_to_str(l_time, (char*) str->ptr()); str->length(length); @@ -713,7 +719,8 @@ void make_datetime(const DATE_TIME_FORMAT *format __attribute__((unused)), } -void make_truncated_value_warning(THD *thd, const char *str_val, +void make_truncated_value_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, + const char *str_val, uint str_length, timestamp_type time_type, const char *field_name) { @@ -752,14 +759,14 @@ void make_truncated_value_warning(THD *thd, const char *str_val, cs->cset->snprintf(cs, warn_buff, sizeof(warn_buff), ER(ER_WRONG_VALUE), type_str, str.c_ptr()); } - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, level, ER_TRUNCATED_WRONG_VALUE, warn_buff); } /* Daynumber from year 0 to 9999-12-31 */ #define MAX_DAY_NUMBER 3652424L -bool date_add_interval(TIME *ltime, interval_type int_type, INTERVAL interval) +bool date_add_interval(MYSQL_TIME *ltime, interval_type int_type, INTERVAL interval) { long period, sign; @@ -883,7 +890,7 @@ invalid_date: NOTE This function calculates difference between l_time1 and l_time2 absolute values. So one should set l_sign and correct result if he want to take - signs into account (i.e. for TIME values). + signs into account (i.e. for MYSQL_TIME values). RETURN VALUES Returns sign of difference. @@ -893,7 +900,7 @@ invalid_date: */ bool -calc_time_diff(TIME *l_time1, TIME *l_time2, int l_sign, longlong *seconds_out, +calc_time_diff(MYSQL_TIME *l_time1, MYSQL_TIME *l_time2, int l_sign, longlong *seconds_out, long *microseconds_out) { long days; @@ -943,7 +950,7 @@ calc_time_diff(TIME *l_time1, TIME *l_time2, int l_sign, longlong *seconds_out, /* - Compares 2 TIME structures + Compares 2 MYSQL_TIME structures SYNOPSIS my_time_compare() @@ -961,7 +968,7 @@ calc_time_diff(TIME *l_time1, TIME *l_time2, int l_sign, longlong *seconds_out, */ int -my_time_compare(TIME *a, TIME *b) +my_time_compare(MYSQL_TIME *a, MYSQL_TIME *b) { my_ulonglong a_t= TIME_to_ulonglong_datetime(a); my_ulonglong b_t= TIME_to_ulonglong_datetime(b); diff --git a/sql/tztime.cc b/sql/tztime.cc index 914c7be46de..08b93cfd203 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -77,7 +77,7 @@ typedef struct lsinfo /* Structure with information describing ranges of my_time_t shifted to local - time (my_time_t + offset). Used for local TIME -> my_time_t conversion. + time (my_time_t + offset). Used for local MYSQL_TIME -> my_time_t conversion. See comments for TIME_to_gmt_sec() for more info. */ typedef struct revtinfo @@ -292,9 +292,9 @@ tz_load(const char *name, TIME_ZONE_INFO *sp, MEM_ROOT *storage) be used if there are no transitions or we have moment in time before any transitions. Second task is to build "shifted my_time_t" -> my_time_t map used in - TIME -> my_time_t conversion. + MYSQL_TIME -> my_time_t conversion. Note: See description of TIME_to_gmt_sec() function first. - In order to perform TIME -> my_time_t conversion we need to build table + In order to perform MYSQL_TIME -> my_time_t conversion we need to build table which defines "shifted by tz offset and leap seconds my_time_t" -> my_time_t function wich is almost the same (except ranges of ambiguity) as reverse function to piecewise linear function used for my_time_t -> @@ -531,14 +531,14 @@ static const uint year_lengths[2]= offset - local time zone offset DESCRIPTION - Convert my_time_t with offset to TIME struct. Differs from timesub + Convert my_time_t with offset to MYSQL_TIME struct. Differs from timesub (from elsie code) because doesn't contain any leap correction and TM_GMTOFF and is_dst setting and contains some MySQL specific initialization. Funny but with removing of these we almost have glibc's offtime function. */ static void -sec_to_TIME(TIME * tmp, my_time_t t, long offset) +sec_to_TIME(MYSQL_TIME * tmp, my_time_t t, long offset) { long days; long rem; @@ -594,7 +594,7 @@ sec_to_TIME(TIME * tmp, my_time_t t, long offset) tmp->month++; tmp->day= (uint)(days + 1); - /* filling MySQL specific TIME members */ + /* filling MySQL specific MYSQL_TIME members */ tmp->neg= 0; tmp->second_part= 0; tmp->time_type= MYSQL_TIMESTAMP_DATETIME; } @@ -686,7 +686,7 @@ find_transition_type(my_time_t t, const TIME_ZONE_INFO *sp) /* Converts time in my_time_t representation (seconds in UTC since Epoch) to - broken down TIME representation in local time zone. + broken down MYSQL_TIME representation in local time zone. SYNOPSIS gmt_sec_to_TIME() @@ -701,12 +701,12 @@ find_transition_type(my_time_t t, const TIME_ZONE_INFO *sp) (60th and 61st second, look how we calculate them as "hit" in this function). Under realistic assumptions about frequency of transitions the same array - can be used fot TIME -> my_time_t conversion. For this we need to + can be used fot MYSQL_TIME -> my_time_t conversion. For this we need to implement tweaked binary search which will take into account that some - TIME has two matching my_time_t ranges and some of them have none. + MYSQL_TIME has two matching my_time_t ranges and some of them have none. */ static void -gmt_sec_to_TIME(TIME *tmp, my_time_t sec_in_utc, const TIME_ZONE_INFO *sp) +gmt_sec_to_TIME(MYSQL_TIME *tmp, my_time_t sec_in_utc, const TIME_ZONE_INFO *sp) { const TRAN_TYPE_INFO *ttisp; const LS_INFO *lp; @@ -809,11 +809,11 @@ sec_since_epoch(int year, int mon, int mday, int hour, int min ,int sec) /* - Works like sec_since_epoch but expects TIME structure as parameter. + Works like sec_since_epoch but expects MYSQL_TIME structure as parameter. */ my_time_t -sec_since_epoch_TIME(TIME *t) +sec_since_epoch_TIME(MYSQL_TIME *t) { return sec_since_epoch(t->year, t->month, t->day, t->hour, t->minute, t->second); @@ -821,7 +821,7 @@ sec_since_epoch_TIME(TIME *t) /* - Converts local time in broken down TIME representation to my_time_t + Converts local time in broken down MYSQL_TIME representation to my_time_t representation. SYNOPSIS @@ -863,7 +863,7 @@ sec_since_epoch_TIME(TIME *t) We use completely different approach. It is better since it is both faster than iterative implementations and fully determenistic. If you - look at my_time_t to TIME conversion then you'll find that it consist + look at my_time_t to MYSQL_TIME conversion then you'll find that it consist of two steps: The first is calculating shifted my_time_t value and the second - TIME calculation from shifted my_time_t value (well it is a bit simplified @@ -893,7 +893,7 @@ sec_since_epoch_TIME(TIME *t) 0 in case of error. */ static my_time_t -TIME_to_gmt_sec(const TIME *t, const TIME_ZONE_INFO *sp, +TIME_to_gmt_sec(const MYSQL_TIME *t, const TIME_ZONE_INFO *sp, my_bool *in_dst_time_gap) { my_time_t local_t; @@ -1020,20 +1020,20 @@ class Time_zone_system : public Time_zone { public: Time_zone_system() {} /* Remove gcc warning */ - virtual my_time_t TIME_to_gmt_sec(const TIME *t, + virtual my_time_t TIME_to_gmt_sec(const MYSQL_TIME *t, my_bool *in_dst_time_gap) const; - virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const; + virtual void gmt_sec_to_TIME(MYSQL_TIME *tmp, my_time_t t) const; virtual const String * get_name() const; }; /* - Converts local time in system time zone in TIME representation + Converts local time in system time zone in MYSQL_TIME representation to its my_time_t representation. SYNOPSIS TIME_to_gmt_sec() - t - pointer to TIME structure with local time in + t - pointer to MYSQL_TIME structure with local time in broken-down representation. in_dst_time_gap - pointer to bool which is set to true if datetime value passed doesn't really exist (i.e. falls into @@ -1041,7 +1041,7 @@ public: DESCRIPTION This method uses system function (localtime_r()) for conversion - local time in system time zone in TIME structure to its my_time_t + local time in system time zone in MYSQL_TIME structure to its my_time_t representation. Unlike the same function for Time_zone_db class it it won't handle unnormalized input properly. Still it will return lowest possible my_time_t in case of ambiguity or if we @@ -1053,7 +1053,7 @@ public: Corresponding my_time_t value or 0 in case of error */ my_time_t -Time_zone_system::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const +Time_zone_system::TIME_to_gmt_sec(const MYSQL_TIME *t, my_bool *in_dst_time_gap) const { long not_used; return my_system_gmt_sec(t, ¬_used, in_dst_time_gap); @@ -1066,7 +1066,7 @@ Time_zone_system::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const SYNOPSIS gmt_sec_to_TIME() - tmp - pointer to TIME structure to fill-in + tmp - pointer to MYSQL_TIME structure to fill-in t - my_time_t value to be converted NOTE @@ -1077,7 +1077,7 @@ Time_zone_system::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const the 1902 easily. */ void -Time_zone_system::gmt_sec_to_TIME(TIME *tmp, my_time_t t) const +Time_zone_system::gmt_sec_to_TIME(MYSQL_TIME *tmp, my_time_t t) const { struct tm tmp_tm; time_t tmp_t= (time_t)t; @@ -1107,26 +1107,26 @@ Time_zone_system::get_name() const /* Instance of this class represents UTC time zone. It uses system gmtime_r function for conversions and is always available. It is used only for - my_time_t -> TIME conversions in various UTC_... functions, it is not - intended for TIME -> my_time_t conversions and shouldn't be exposed to user. + my_time_t -> MYSQL_TIME conversions in various UTC_... functions, it is not + intended for MYSQL_TIME -> my_time_t conversions and shouldn't be exposed to user. */ class Time_zone_utc : public Time_zone { public: Time_zone_utc() {} /* Remove gcc warning */ - virtual my_time_t TIME_to_gmt_sec(const TIME *t, + virtual my_time_t TIME_to_gmt_sec(const MYSQL_TIME *t, my_bool *in_dst_time_gap) const; - virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const; + virtual void gmt_sec_to_TIME(MYSQL_TIME *tmp, my_time_t t) const; virtual const String * get_name() const; }; /* - Convert UTC time from TIME representation to its my_time_t representation. + Convert UTC time from MYSQL_TIME representation to its my_time_t representation. SYNOPSIS TIME_to_gmt_sec() - t - pointer to TIME structure with local time + t - pointer to MYSQL_TIME structure with local time in broken-down representation. in_dst_time_gap - pointer to bool which is set to true if datetime value passed doesn't really exist (i.e. falls into @@ -1141,7 +1141,7 @@ public: 0 */ my_time_t -Time_zone_utc::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const +Time_zone_utc::TIME_to_gmt_sec(const MYSQL_TIME *t, my_bool *in_dst_time_gap) const { /* Should be never called */ DBUG_ASSERT(0); @@ -1155,14 +1155,14 @@ Time_zone_utc::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const SYNOPSIS gmt_sec_to_TIME() - tmp - pointer to TIME structure to fill-in + tmp - pointer to MYSQL_TIME structure to fill-in t - my_time_t value to be converted NOTE See note for apropriate Time_zone_system method. */ void -Time_zone_utc::gmt_sec_to_TIME(TIME *tmp, my_time_t t) const +Time_zone_utc::gmt_sec_to_TIME(MYSQL_TIME *tmp, my_time_t t) const { struct tm tmp_tm; time_t tmp_t= (time_t)t; @@ -1203,9 +1203,9 @@ class Time_zone_db : public Time_zone { public: Time_zone_db(TIME_ZONE_INFO *tz_info_arg, const String * tz_name_arg); - virtual my_time_t TIME_to_gmt_sec(const TIME *t, + virtual my_time_t TIME_to_gmt_sec(const MYSQL_TIME *t, my_bool *in_dst_time_gap) const; - virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const; + virtual void gmt_sec_to_TIME(MYSQL_TIME *tmp, my_time_t t) const; virtual const String * get_name() const; private: TIME_ZONE_INFO *tz_info; @@ -1239,7 +1239,7 @@ Time_zone_db::Time_zone_db(TIME_ZONE_INFO *tz_info_arg, SYNOPSIS TIME_to_gmt_sec() - t - pointer to TIME structure with local time + t - pointer to MYSQL_TIME structure with local time in broken-down representation. in_dst_time_gap - pointer to bool which is set to true if datetime value passed doesn't really exist (i.e. falls into @@ -1253,7 +1253,7 @@ Time_zone_db::Time_zone_db(TIME_ZONE_INFO *tz_info_arg, Corresponding my_time_t value or 0 in case of error */ my_time_t -Time_zone_db::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const +Time_zone_db::TIME_to_gmt_sec(const MYSQL_TIME *t, my_bool *in_dst_time_gap) const { return ::TIME_to_gmt_sec(t, tz_info, in_dst_time_gap); } @@ -1265,11 +1265,11 @@ Time_zone_db::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const SYNOPSIS gmt_sec_to_TIME() - tmp - pointer to TIME structure to fill-in + tmp - pointer to MYSQL_TIME structure to fill-in t - my_time_t value to be converted */ void -Time_zone_db::gmt_sec_to_TIME(TIME *tmp, my_time_t t) const +Time_zone_db::gmt_sec_to_TIME(MYSQL_TIME *tmp, my_time_t t) const { ::gmt_sec_to_TIME(tmp, t, tz_info); } @@ -1299,9 +1299,9 @@ class Time_zone_offset : public Time_zone { public: Time_zone_offset(long tz_offset_arg); - virtual my_time_t TIME_to_gmt_sec(const TIME *t, + virtual my_time_t TIME_to_gmt_sec(const MYSQL_TIME *t, my_bool *in_dst_time_gap) const; - virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const; + virtual void gmt_sec_to_TIME(MYSQL_TIME *tmp, my_time_t t) const; virtual const String * get_name() const; /* This have to be public because we want to be able to access it from @@ -1336,11 +1336,11 @@ Time_zone_offset::Time_zone_offset(long tz_offset_arg): /* Converts local time in time zone described as offset from UTC - from TIME representation to its my_time_t representation. + from MYSQL_TIME representation to its my_time_t representation. SYNOPSIS TIME_to_gmt_sec() - t - pointer to TIME structure with local time + t - pointer to MYSQL_TIME structure with local time in broken-down representation. in_dst_time_gap - pointer to bool which should be set to true if datetime value passed doesn't really exist @@ -1352,7 +1352,7 @@ Time_zone_offset::Time_zone_offset(long tz_offset_arg): Corresponding my_time_t value or 0 in case of error */ my_time_t -Time_zone_offset::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const +Time_zone_offset::TIME_to_gmt_sec(const MYSQL_TIME *t, my_bool *in_dst_time_gap) const { my_time_t local_t; int shift= 0; @@ -1397,11 +1397,11 @@ Time_zone_offset::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const SYNOPSIS gmt_sec_to_TIME() - tmp - pointer to TIME structure to fill-in + tmp - pointer to MYSQL_TIME structure to fill-in t - my_time_t value to be converted */ void -Time_zone_offset::gmt_sec_to_TIME(TIME *tmp, my_time_t t) const +Time_zone_offset::gmt_sec_to_TIME(MYSQL_TIME *tmp, my_time_t t) const { sec_to_TIME(tmp, t, offset); } @@ -1505,26 +1505,20 @@ extern "C" byte* my_offset_tzs_get_key(Time_zone_offset *entry, uint *length, /* - Prepare table list with time zone related tables from preallocated array - and add to global table list. + Prepare table list with time zone related tables from preallocated array. SYNOPSIS tz_init_table_list() tz_tabs - pointer to preallocated array of MY_TZ_TABLES_COUNT TABLE_LIST objects - global_next_ptr - pointer to variable which points to global_next member - of last element of global table list (or list root - then list is empty) (in/out). DESCRIPTION This function prepares list of TABLE_LIST objects which can be used - for opening of time zone tables from preallocated array. It also links - this list to the end of global table list (it will read and update - accordingly variable pointed by global_next_ptr for this). + for opening of time zone tables from preallocated array. */ static void -tz_init_table_list(TABLE_LIST *tz_tabs, TABLE_LIST ***global_next_ptr) +tz_init_table_list(TABLE_LIST *tz_tabs) { bzero(tz_tabs, sizeof(TABLE_LIST) * MY_TZ_TABLES_COUNT); @@ -1541,64 +1535,6 @@ tz_init_table_list(TABLE_LIST *tz_tabs, TABLE_LIST ***global_next_ptr) if (i != 0) tz_tabs[i].prev_global= &tz_tabs[i-1].next_global; } - - /* Link into global list */ - tz_tabs[0].prev_global= *global_next_ptr; - **global_next_ptr= tz_tabs; - /* Update last-global-pointer to point to pointer in last table */ - *global_next_ptr= &tz_tabs[MY_TZ_TABLES_COUNT-1].next_global; -} - - -/* - Fake table list object, pointer to which is returned by - my_tz_get_tables_list() as indication of error. -*/ -TABLE_LIST fake_time_zone_tables_list; - -/* - Create table list with time zone related tables and add it to the end - of global table list. - - SYNOPSIS - my_tz_get_table_list() - thd - current thread object - global_next_ptr - pointer to variable which points to global_next member - of last element of global table list (or list root - then list is empty) (in/out). - - DESCRIPTION - This function creates list of TABLE_LIST objects allocated in thd's - memroot, which can be used for opening of time zone tables. It will also - link this list to the end of global table list (it will read and update - accordingly variable pointed by global_next_ptr for this). - - NOTE - my_tz_check_n_skip_implicit_tables() function depends on fact that - elements of list created are allocated as TABLE_LIST[MY_TZ_TABLES_COUNT] - array. - - RETURN VALUES - Returns pointer to first TABLE_LIST object, (could be 0 if time zone - tables don't exist) and &fake_time_zone_tables_list in case of error. -*/ - -TABLE_LIST * -my_tz_get_table_list(THD *thd, TABLE_LIST ***global_next_ptr) -{ - TABLE_LIST *tz_tabs; - DBUG_ENTER("my_tz_get_table_list"); - - if (!time_zone_tables_exist) - DBUG_RETURN(0); - - if (!(tz_tabs= (TABLE_LIST *)thd->alloc(sizeof(TABLE_LIST) * - MY_TZ_TABLES_COUNT))) - DBUG_RETURN(&fake_time_zone_tables_list); - - tz_init_table_list(tz_tabs, global_next_ptr); - - DBUG_RETURN(tz_tabs); } @@ -1631,8 +1567,8 @@ my_bool my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) { THD *thd; - TABLE_LIST *tables= 0; - TABLE_LIST tables_buff[1+MY_TZ_TABLES_COUNT], **last_global_next_ptr; + TABLE_LIST tz_tables[1+MY_TZ_TABLES_COUNT]; + Open_tables_state open_tables_state_backup; TABLE *table; Tz_names_entry *tmp_tzname; my_bool return_val= 1; @@ -1694,19 +1630,23 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) */ thd->set_db(db, sizeof(db)-1); - bzero((char*) &tables_buff, sizeof(TABLE_LIST)); - tables_buff[0].alias= tables_buff[0].table_name= + bzero((char*) &tz_tables[0], sizeof(TABLE_LIST)); + tz_tables[0].alias= tz_tables[0].table_name= (char*)"time_zone_leap_second"; - tables_buff[0].lock_type= TL_READ; - tables_buff[0].db= db; + tz_tables[0].table_name_length= 21; + tz_tables[0].db= db; + tz_tables[0].db_length= sizeof(db)-1; + tz_tables[0].lock_type= TL_READ; + + tz_init_table_list(tz_tables+1); + tz_tables[0].next_global= tz_tables[0].next_local= &tz_tables[1]; + tz_tables[1].prev_global= &tz_tables[0].next_global; + /* - Fill TABLE_LIST for the rest of the time zone describing tables - and link it to first one. + We need to open only mysql.time_zone_leap_second, but we try to + open all time zone tables to see if they exist. */ - last_global_next_ptr= &(tables_buff[0].next_global); - tz_init_table_list(tables_buff + 1, &last_global_next_ptr); - - if (simple_open_n_lock_tables(thd, tables_buff)) + if (open_system_tables_for_read(thd, tz_tables, &open_tables_state_backup)) { sql_print_warning("Can't open and lock time zone table: %s " "trying to live without them", thd->net.last_error); @@ -1714,7 +1654,6 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) return_val= time_zone_tables_exist= 0; goto end_with_setting_default_tz; } - tables= tables_buff + 1; /* Now we are going to load leap seconds descriptions that are shared @@ -1730,7 +1669,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) goto end_with_close; } - table= tables_buff[0].table; + table= tz_tables[0].table; /* It is OK to ignore ha_index_init()/ha_index_end() return values since mysql.time_zone* tables are MyISAM and these operations always succeed @@ -1787,7 +1726,12 @@ end_with_setting_default_tz: if (default_tzname) { String tmp_tzname2(default_tzname, &my_charset_latin1); - if (!(global_system_variables.time_zone= my_tz_find(&tmp_tzname2, tables))) + /* + Time zone tables may be open here, and my_tz_find() may open + most of them once more, but this is OK for system tables open + for READ. + */ + if (!(global_system_variables.time_zone= my_tz_find(thd, &tmp_tzname2))) { sql_print_error("Fatal error: Illegal or unknown default time zone '%s'", default_tzname); @@ -1796,8 +1740,11 @@ end_with_setting_default_tz: } end_with_close: - thd->version--; /* Force close to free memory */ - close_thread_tables(thd); + if (time_zone_tables_exist) + { + thd->version--; /* Force close to free memory */ + close_system_tables(thd, &open_tables_state_backup); + } end_with_cleanup: @@ -1906,7 +1853,6 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) */ table= tz_tables->table; tz_tables= tz_tables->next_local; - table->use_all_columns(); table->field[0]->store(tz_name->ptr(), tz_name->length(), &my_charset_latin1); /* @@ -1917,9 +1863,9 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) (void)table->file->ha_index_init(0, 1); if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr, - 0, HA_READ_KEY_EXACT)) + HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { -#ifdef EXTRA_DEBUG +#ifdef EXTRA_DEBUG /* Most probably user has mistyped time zone name, so no need to bark here unless we need it for debugging. @@ -1939,13 +1885,12 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) using the only index in this table). */ table= tz_tables->table; - table->use_all_columns(); tz_tables= tz_tables->next_local; table->field[0]->store((longlong) tzid, TRUE); (void)table->file->ha_index_init(0, 1); if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr, - 0, HA_READ_KEY_EXACT)) + HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { sql_print_error("Can't find description of time zone '%u'", tzid); goto end; @@ -1967,14 +1912,12 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) Right - using special index. */ table= tz_tables->table; - table->use_all_columns(); tz_tables= tz_tables->next_local; table->field[0]->store((longlong) tzid, TRUE); (void)table->file->ha_index_init(0, 1); - // FIXME Is there any better approach than explicitly specifying 4 ??? res= table->file->index_read(table->record[0], (byte*)table->field[0]->ptr, - 4, HA_READ_KEY_EXACT); + (key_part_map)1, HA_READ_KEY_EXACT); while (!res) { ttid= (uint)table->field[1]->val_int(); @@ -2041,13 +1984,11 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) in ascending order by index scan also satisfies us. */ table= tz_tables->table; - table->use_all_columns(); table->field[0]->store((longlong) tzid, TRUE); (void)table->file->ha_index_init(0, 1); - // FIXME Is there any better approach than explicitly specifying 4 ??? res= table->file->index_read(table->record[0], (byte*)table->field[0]->ptr, - 4, HA_READ_KEY_EXACT); + (key_part_map)1, HA_READ_KEY_EXACT); while (!res) { ttime= (my_time_t)table->field[1]->val_int(); @@ -2251,8 +2192,8 @@ str_to_offset(const char *str, uint length, long *offset) SYNOPSIS my_tz_find() + thd - pointer to thread THD structure name - time zone specification - tz_tables - list of opened'n'locked time zone describing tables DESCRIPTION This function checks if name is one of time zones described in db, @@ -2274,11 +2215,10 @@ str_to_offset(const char *str, uint length, long *offset) values as parameter without additional external check and this property is used by @@time_zone variable handling code). - It will perform lookup in system tables (mysql.time_zone*) if needed - using tz_tables as list of already opened tables (for info about this - list look at tz_load_from_open_tables() description). It won't perform - such lookup if no time zone describing tables were found during server - start up. + It will perform lookup in system tables (mysql.time_zone*), + opening and locking them, and closing afterwards. It won't perform + such lookup if no time zone describing tables were found during + server start up. RETURN VALUE Pointer to corresponding Time_zone object. 0 - in case of bad time zone @@ -2286,7 +2226,7 @@ str_to_offset(const char *str, uint length, long *offset) */ Time_zone * -my_tz_find(const String * name, TABLE_LIST *tz_tables) +my_tz_find(THD *thd, const String *name) { Tz_names_entry *tmp_tzname; Time_zone *result_tz= 0; @@ -2294,8 +2234,6 @@ my_tz_find(const String * name, TABLE_LIST *tz_tables) DBUG_ENTER("my_tz_find"); DBUG_PRINT("enter", ("time zone name='%s'", name ? ((String *)name)->c_ptr_safe() : "NULL")); - DBUG_ASSERT(!time_zone_tables_exist || tz_tables || - current_thd->slave_thread); if (!name) DBUG_RETURN(0); @@ -2327,8 +2265,19 @@ my_tz_find(const String * name, TABLE_LIST *tz_tables) (const byte *)name->ptr(), name->length()))) result_tz= tmp_tzname->tz; - else if (time_zone_tables_exist && tz_tables) - result_tz= tz_load_from_open_tables(name, tz_tables); + else if (time_zone_tables_exist) + { + TABLE_LIST tz_tables[MY_TZ_TABLES_COUNT]; + Open_tables_state open_tables_state_backup; + + tz_init_table_list(tz_tables); + if (!open_system_tables_for_read(thd, tz_tables, + &open_tables_state_backup)) + { + result_tz= tz_load_from_open_tables(name, tz_tables); + close_system_tables(thd, &open_tables_state_backup); + } + } } VOID(pthread_mutex_unlock(&tz_LOCK)); @@ -2337,58 +2286,6 @@ my_tz_find(const String * name, TABLE_LIST *tz_tables) } -/* - A more standalone version of my_tz_find(): will open tz tables if needed. - This is so far only used by replication, where time zone setting does not - happen in the usual query context. - - SYNOPSIS - my_tz_find_with_opening_tz_tables() - thd - pointer to thread's THD structure - name - time zone specification - - DESCRIPTION - This function tries to find a time zone which matches the named passed in - argument. If it fails, it will open time zone tables and re-try the - search. - This function is needed for the slave SQL thread, which does not do the - addition of time zone tables which is usually done during query parsing - (as time zone setting by slave does not happen in mysql_parse() but - before). So it needs to open tz tables by itself if needed. - See notes of my_tz_find() as they also apply here. - - RETURN VALUE - Pointer to corresponding Time_zone object. 0 - in case of bad time zone - specification or other error. -*/ - -Time_zone *my_tz_find_with_opening_tz_tables(THD *thd, const String *name) -{ - Time_zone *tz; - DBUG_ENTER("my_tz_find_with_opening_tables"); - DBUG_ASSERT(thd); - DBUG_ASSERT(thd->slave_thread); // intended for use with slave thread only - - if (!(tz= my_tz_find(name, 0)) && time_zone_tables_exist) - { - /* - Probably we have not loaded this time zone yet so let us look it up in - our time zone tables. Note that if we don't have tz tables on this - slave, we don't even try. - */ - TABLE_LIST tables[MY_TZ_TABLES_COUNT]; - TABLE_LIST *dummy; - TABLE_LIST **dummyp= &dummy; - tz_init_table_list(tables, &dummyp); - if (simple_open_n_lock_tables(thd, tables)) - DBUG_RETURN(0); - tz= my_tz_find(name, tables); - /* We need to close tables _now_ to not pollute coming query */ - close_thread_tables(thd); - } - DBUG_RETURN(tz); -} - #endif /* !defined(TESTTIME) && !defined(TZINFO2SQL) */ @@ -2667,7 +2564,7 @@ main(int argc, char **argv) my_bool localtime_negative; TIME_ZONE_INFO tz_info; struct tm tmp; - TIME time_tmp; + MYSQL_TIME time_tmp; time_t t, t1, t2; char fullname[FN_REFLEN+1]; char *str_end; diff --git a/sql/tztime.h b/sql/tztime.h index 248a638074b..f7cc7042d79 100644 --- a/sql/tztime.h +++ b/sql/tztime.h @@ -22,7 +22,7 @@ /* This class represents abstract time zone and provides - basic interface for TIME <-> my_time_t conversion. + basic interface for MYSQL_TIME <-> my_time_t conversion. Actual time zones which are specified by DB, or via offset or use system functions are its descendants. */ @@ -31,18 +31,18 @@ class Time_zone: public Sql_alloc public: Time_zone() {} /* Remove gcc warning */ /* - Converts local time in broken down TIME representation to + Converts local time in broken down MYSQL_TIME representation to my_time_t (UTC seconds since Epoch) represenation. Returns 0 in case of error. Sets in_dst_time_gap to true if date provided falls into spring time-gap (or lefts it untouched otherwise). */ - virtual my_time_t TIME_to_gmt_sec(const TIME *t, + virtual my_time_t TIME_to_gmt_sec(const MYSQL_TIME *t, my_bool *in_dst_time_gap) const = 0; /* Converts time in my_time_t representation to local time in - broken down TIME representation. + broken down MYSQL_TIME representation. */ - virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const = 0; + virtual void gmt_sec_to_TIME(MYSQL_TIME *tmp, my_time_t t) const = 0; /* Because of constness of String returned by get_name() time zone name have to be already zeroended to be able to use String::ptr() instead @@ -59,14 +59,10 @@ public: extern Time_zone * my_tz_UTC; extern Time_zone * my_tz_SYSTEM; -extern TABLE_LIST * my_tz_get_table_list(THD *thd, TABLE_LIST ***global_next_ptr); -extern Time_zone * my_tz_find(const String *name, TABLE_LIST *tz_tables); -extern Time_zone * my_tz_find_with_opening_tz_tables(THD *thd, const String *name); +extern Time_zone * my_tz_find(THD *thd, const String *name); extern my_bool my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap); extern void my_tz_free(); -extern my_time_t sec_since_epoch_TIME(TIME *t); - -extern TABLE_LIST fake_time_zone_tables_list; +extern my_time_t sec_since_epoch_TIME(MYSQL_TIME *t); /* Number of elements in table list produced by my_tz_get_table_list() @@ -77,34 +73,5 @@ extern TABLE_LIST fake_time_zone_tables_list; static const int MY_TZ_TABLES_COUNT= 4; -/* - Check if we have pointer to the begining of list of implicitly used time - zone tables, set SELECT_ACL for them and fast-forward to its end. - - SYNOPSIS - my_tz_check_n_skip_implicit_tables() - table - (in/out) pointer to element of table list to check - tz_tables - list of implicitly used time zone tables received - from my_tz_get_table_list() function. - - NOTE - This function relies on my_tz_get_table_list() implementation. - - RETURN VALUE - TRUE - if table points to the beggining of tz_tables list - FALSE - otherwise. -*/ -inline bool my_tz_check_n_skip_implicit_tables(TABLE_LIST **table, - TABLE_LIST *tz_tables) -{ - if (*table == tz_tables) - { - for (int i= 0; i < MY_TZ_TABLES_COUNT; i++) - (*table)[i].grant.privilege= SELECT_ACL; - (*table)+= MY_TZ_TABLES_COUNT - 1; - return TRUE; - } - return FALSE; -} #endif /* !defined(TESTTIME) && !defined(TZINFO2SQL) */ diff --git a/sql/unireg.cc b/sql/unireg.cc index d90420313a6..a69a9be6a43 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -936,7 +936,9 @@ static bool make_empty_rec(THD *thd, File file,enum legacy_db_type table_type, (regfield->real_type() != MYSQL_TYPE_YEAR || field->def->val_int() != 0)) { - if (field->def->save_in_field(regfield, 1)) + int res= field->def->save_in_field(regfield, 1); + /* If not ok or warning of level 'note' */ + if (res != 0 && res != 3) { my_error(ER_INVALID_DEFAULT, MYF(0), regfield->field_name); error= 1; |