summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorAlexander Nozdrin <alik@sun.com>2009-12-11 12:39:38 +0300
committerAlexander Nozdrin <alik@sun.com>2009-12-11 12:39:38 +0300
commit567671368723c704d60902b4d0ccff951b414552 (patch)
tree965519a5b0af3f33624c7e16fd61b58d15f42372 /sql
parentefee0608316e4cc034a3e62d05980eef8530843d (diff)
parentceefe7bb50b17b72e88851e3b98642e89a4cddae (diff)
downloadmariadb-git-567671368723c704d60902b4d0ccff951b414552.tar.gz
Manual merge from mysql-trunk.
Conflicts: - client/mysqltest.cc - mysql-test/collections/default.experimental - mysql-test/suite/rpl/t/disabled.def - sql/mysqld.cc - sql/opt_range.cc - sql/sp.cc - sql/sql_acl.cc - sql/sql_partition.cc - sql/sql_table.cc
Diffstat (limited to 'sql')
-rwxr-xr-xsql/CMakeLists.txt12
-rw-r--r--sql/Makefile.am8
-rwxr-xr-xsql/add_errmsg4
-rw-r--r--sql/authors.h6
-rw-r--r--sql/client_settings.h6
-rw-r--r--sql/contributors.h5
-rw-r--r--sql/debug_sync.cc8
-rw-r--r--sql/derror.cc61
-rw-r--r--sql/event_data_objects.cc4
-rw-r--r--sql/event_scheduler.cc18
-rw-r--r--sql/events.cc2
-rw-r--r--sql/field.cc200
-rw-r--r--sql/field.h36
-rw-r--r--sql/gstream.h5
-rw-r--r--sql/ha_ndbcluster.cc137
-rw-r--r--sql/ha_ndbcluster.h5
-rw-r--r--sql/ha_ndbcluster_binlog.cc124
-rw-r--r--sql/ha_ndbcluster_binlog.h5
-rw-r--r--sql/ha_ndbcluster_cond.h5
-rw-r--r--sql/ha_ndbcluster_tables.h5
-rw-r--r--sql/ha_partition.cc294
-rw-r--r--sql/ha_partition.h22
-rw-r--r--sql/handler.cc141
-rw-r--r--sql/handler.h23
-rw-r--r--sql/hash_filo.h16
-rw-r--r--sql/hostname.cc2
-rw-r--r--sql/item.cc182
-rw-r--r--sql/item.h35
-rw-r--r--sql/item_cmpfunc.h7
-rw-r--r--sql/item_create.cc81
-rw-r--r--sql/item_func.cc52
-rw-r--r--sql/item_func.h4
-rw-r--r--sql/item_geofunc.h4
-rw-r--r--sql/item_row.h5
-rw-r--r--sql/item_strfunc.cc112
-rw-r--r--sql/item_strfunc.h11
-rw-r--r--sql/item_subselect.cc6
-rw-r--r--sql/item_subselect.h5
-rw-r--r--sql/item_sum.cc1249
-rw-r--r--sql/item_sum.h546
-rw-r--r--sql/item_timefunc.cc58
-rw-r--r--sql/item_timefunc.h29
-rw-r--r--sql/item_xmlfunc.h4
-rw-r--r--sql/key.cc21
-rw-r--r--sql/lex.h23
-rw-r--r--sql/lock.cc18
-rw-r--r--sql/log.cc185
-rw-r--r--sql/log.h34
-rw-r--r--sql/log_event.cc160
-rw-r--r--sql/log_event.h57
-rw-r--r--sql/log_event_old.cc44
-rw-r--r--sql/message.h7
-rw-r--r--sql/my_decimal.cc6
-rw-r--r--sql/mysql_priv.h199
-rw-r--r--sql/mysqld.cc407
-rw-r--r--sql/mysqld_suffix.h4
-rw-r--r--sql/net_serv.cc15
-rw-r--r--sql/nt_servc.h5
-rw-r--r--sql/opt_range.cc526
-rw-r--r--sql/opt_range.h17
-rw-r--r--sql/opt_sum.cc10
-rw-r--r--sql/partition_element.h46
-rw-r--r--sql/partition_info.cc1200
-rw-r--r--sql/partition_info.h87
-rw-r--r--sql/procedure.h7
-rw-r--r--sql/protocol.cc449
-rw-r--r--sql/protocol.h46
-rw-r--r--sql/records.cc5
-rw-r--r--sql/records.h79
-rw-r--r--sql/repl_failsafe.cc27
-rw-r--r--sql/repl_failsafe.h4
-rw-r--r--sql/replication.h550
-rw-r--r--sql/rpl_filter.cc12
-rw-r--r--sql/rpl_handler.cc518
-rw-r--r--sql/rpl_handler.h213
-rw-r--r--sql/rpl_injector.h3
-rw-r--r--sql/rpl_mi.cc138
-rw-r--r--sql/rpl_mi.h15
-rw-r--r--sql/rpl_rli.cc22
-rw-r--r--sql/rpl_rli.h25
-rw-r--r--sql/rpl_tblmap.cc14
-rw-r--r--sql/rpl_tblmap.h9
-rw-r--r--sql/scheduler.h5
-rw-r--r--sql/set_var.cc257
-rw-r--r--sql/set_var.h77
-rw-r--r--sql/share/Makefile.am8
-rw-r--r--sql/share/errmsg-cnv.sh61
-rw-r--r--sql/share/errmsg-utf8.txt6262
-rw-r--r--sql/share/errmsg.txt62
-rw-r--r--sql/slave.cc668
-rw-r--r--sql/slave.h13
-rw-r--r--sql/sp.cc40
-rw-r--r--sql/sp_cache.cc11
-rw-r--r--sql/sp_head.cc98
-rw-r--r--sql/sp_pcontext.cc3
-rw-r--r--sql/sp_pcontext.h2
-rw-r--r--sql/sp_rcontext.cc94
-rw-r--r--sql/sp_rcontext.h61
-rw-r--r--sql/sql_acl.cc328
-rw-r--r--sql/sql_acl.h20
-rw-r--r--sql/sql_analyse.h5
-rw-r--r--sql/sql_array.h4
-rw-r--r--sql/sql_base.cc161
-rw-r--r--sql/sql_binlog.cc2
-rw-r--r--sql/sql_cache.cc274
-rw-r--r--sql/sql_cache.h32
-rw-r--r--sql/sql_class.cc560
-rw-r--r--sql/sql_class.h487
-rw-r--r--sql/sql_connect.cc71
-rw-r--r--sql/sql_crypt.h5
-rw-r--r--sql/sql_cursor.cc30
-rw-r--r--sql/sql_db.cc57
-rw-r--r--sql/sql_delete.cc19
-rw-r--r--sql/sql_derived.cc6
-rw-r--r--sql/sql_error.cc786
-rw-r--r--sql/sql_error.h548
-rw-r--r--sql/sql_handler.cc59
-rw-r--r--sql/sql_help.cc6
-rw-r--r--sql/sql_insert.cc96
-rw-r--r--sql/sql_lex.cc68
-rw-r--r--sql/sql_lex.h147
-rw-r--r--sql/sql_list.cc2
-rw-r--r--sql/sql_list.h6
-rw-r--r--sql/sql_load.cc568
-rw-r--r--sql/sql_locale.cc886
-rw-r--r--sql/sql_map.h5
-rw-r--r--sql/sql_parse.cc583
-rw-r--r--sql/sql_partition.cc2028
-rw-r--r--sql/sql_partition.h41
-rw-r--r--sql/sql_plugin.cc177
-rw-r--r--sql/sql_plugin.h18
-rw-r--r--sql/sql_plugin_services.h44
-rw-r--r--sql/sql_prepare.cc885
-rw-r--r--sql/sql_prepare.h367
-rw-r--r--sql/sql_profile.cc6
-rw-r--r--sql/sql_profile.h2
-rw-r--r--sql/sql_repl.cc495
-rw-r--r--sql/sql_repl.h4
-rw-r--r--sql/sql_select.cc300
-rw-r--r--sql/sql_select.h22
-rw-r--r--sql/sql_servers.cc37
-rw-r--r--sql/sql_servers.h5
-rw-r--r--sql/sql_show.cc546
-rw-r--r--sql/sql_signal.cc510
-rw-r--r--sql/sql_signal.h152
-rw-r--r--sql/sql_sort.h5
-rw-r--r--sql/sql_string.cc68
-rw-r--r--sql/sql_string.h10
-rw-r--r--sql/sql_table.cc248
-rw-r--r--sql/sql_tablespace.cc4
-rw-r--r--sql/sql_test.cc4
-rw-r--r--sql/sql_trigger.cc2
-rw-r--r--sql/sql_trigger.h4
-rw-r--r--sql/sql_udf.cc28
-rw-r--r--sql/sql_udf.h4
-rw-r--r--sql/sql_update.cc10
-rw-r--r--sql/sql_view.cc13
-rw-r--r--sql/sql_view.h4
-rw-r--r--sql/sql_yacc.yy1001
-rw-r--r--sql/structs.h40
-rw-r--r--sql/table.cc110
-rw-r--r--sql/table.h42
-rw-r--r--sql/thr_malloc.cc7
-rw-r--r--sql/time.cc2
-rw-r--r--sql/tzfile.h5
-rw-r--r--sql/tztime.cc29
-rw-r--r--sql/tztime.h4
-rw-r--r--sql/udf_example.c5
-rw-r--r--sql/unireg.cc32
-rw-r--r--sql/unireg.h11
170 files changed, 23726 insertions, 5899 deletions
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index 7f6074c903c..15c2d950ff9 100755
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -15,9 +15,9 @@
INCLUDE("${PROJECT_SOURCE_DIR}/win/mysql_manifest.cmake")
SET(CMAKE_CXX_FLAGS_DEBUG
- "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX -DUSE_SYMDIR /Zi")
+ "${CMAKE_CXX_FLAGS_DEBUG} -DUSE_SYMDIR /Zi")
SET(CMAKE_C_FLAGS_DEBUG
- "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX -DUSE_SYMDIR /Zi")
+ "${CMAKE_C_FLAGS_DEBUG} -DUSE_SYMDIR /Zi")
SET(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG} /MAP /MAPINFO:EXPORTS")
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include
@@ -45,7 +45,7 @@ SET (SQL_SOURCE
discover.cc ../libmysql/errmsg.c field.cc field_conv.cc
filesort.cc gstream.cc
ha_partition.cc
- handler.cc hash_filo.cc hash_filo.h
+ handler.cc hash_filo.cc hash_filo.h sql_plugin_services.h
hostname.cc init.cc item.cc item_buff.cc item_cmpfunc.cc
item_create.cc item_func.cc item_geofunc.cc item_row.cc
item_strfunc.cc item_subselect.cc item_sum.cc item_timefunc.cc
@@ -76,6 +76,7 @@ SET (SQL_SOURCE
rpl_rli.cc rpl_mi.cc sql_servers.cc
sql_connect.cc scheduler.cc
sql_profile.cc event_parse_data.cc
+ sql_signal.cc rpl_handler.cc
${PROJECT_SOURCE_DIR}/sql/sql_yacc.cc
${PROJECT_SOURCE_DIR}/sql/sql_yacc.h
${PROJECT_SOURCE_DIR}/include/mysqld_error.h
@@ -96,7 +97,6 @@ SET_TARGET_PROPERTIES(mysqld PROPERTIES ENABLE_EXPORTS TRUE)
SET (MYSQLD_CORE_LIBS mysys zlib dbug strings yassl taocrypt vio regex sql)
TARGET_LINK_LIBRARIES(mysqld ${MYSQLD_CORE_LIBS} ${MYSQLD_STATIC_ENGINE_LIBS})
-TARGET_LINK_LIBRARIES(mysqld ws2_32.lib)
IF(MSVC AND NOT WITHOUT_DYNAMIC_PLUGINS)
@@ -129,7 +129,7 @@ ADD_CUSTOM_COMMAND(
# Gen_lex_hash
ADD_EXECUTABLE(gen_lex_hash gen_lex_hash.cc)
-TARGET_LINK_LIBRARIES(gen_lex_hash debug dbug mysqlclient wsock32)
+TARGET_LINK_LIBRARIES(gen_lex_hash dbug mysqlclient)
GET_TARGET_PROPERTY(GEN_LEX_HASH_EXE gen_lex_hash LOCATION)
ADD_CUSTOM_COMMAND(
OUTPUT ${PROJECT_SOURCE_DIR}/sql/lex_hash.h
@@ -152,4 +152,4 @@ SET_DIRECTORY_PROPERTIES(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES
ADD_LIBRARY(udf_example MODULE udf_example.c udf_example.def)
ADD_DEPENDENCIES(udf_example strings GenError)
-TARGET_LINK_LIBRARIES(udf_example strings wsock32)
+TARGET_LINK_LIBRARIES(udf_example strings)
diff --git a/sql/Makefile.am b/sql/Makefile.am
index a6d883a27dc..15ee0d588c4 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -82,7 +82,7 @@ mysqld_LDADD = libndb.la \
noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
item_strfunc.h item_timefunc.h \
- item_xmlfunc.h \
+ item_xmlfunc.h sql_plugin_services.h \
item_create.h item_subselect.h item_row.h \
mysql_priv.h item_geofunc.h sql_bitmap.h \
procedure.h sql_class.h sql_lex.h sql_list.h \
@@ -111,7 +111,8 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
sql_plugin.h authors.h event_parse_data.h \
event_data_objects.h event_scheduler.h \
sql_partition.h partition_info.h partition_element.h \
- contributors.h sql_servers.h
+ contributors.h sql_servers.h sql_signal.h records.h \
+ sql_prepare.h rpl_handler.h replication.h
mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \
item.cc item_sum.cc item_buff.cc item_func.cc \
@@ -156,7 +157,8 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \
event_queue.cc event_db_repository.cc events.cc \
sql_plugin.cc sql_binlog.cc \
sql_builtin.cc sql_tablespace.cc partition_info.cc \
- sql_servers.cc event_parse_data.cc
+ sql_servers.cc event_parse_data.cc sql_signal.cc \
+ rpl_handler.cc
nodist_mysqld_SOURCES = mini_client_errors.c pack.c client.c my_time.c my_user.c
diff --git a/sql/add_errmsg b/sql/add_errmsg
index cf54ede5dce..86226926d38 100755
--- a/sql/add_errmsg
+++ b/sql/add_errmsg
@@ -8,8 +8,8 @@ then
fi
FILE=/tmp/add.$$
-tail -$1 share/english/errmsg.txt > $FILE
-for i in `ls share/*/errmsg.txt | grep -v english`
+tail -$1 share/english/errmsg-utf8.txt > $FILE
+for i in `ls share/*/errmsg-utf8.txt | grep -v english`
do
cat $FILE >> $i
done
diff --git a/sql/authors.h b/sql/authors.h
index dfe3b143e2f..ec46e368f5f 100644
--- a/sql/authors.h
+++ b/sql/authors.h
@@ -1,3 +1,6 @@
+#ifndef AUTHORS_INCLUDED
+#define AUTHORS_INCLUDED
+
/* Copyright (C) 2005-2006 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -36,6 +39,7 @@ struct show_table_authors_st {
struct show_table_authors_st show_table_authors[]= {
{ "Brian (Krow) Aker", "Seattle, WA, USA",
"Architecture, archive, federated, bunch of little stuff :)" },
+ { "Marc Alff", "Denver, CO, USA", "Signal, Resignal" },
{ "Venu Anuganti", "", "Client/server protocol (4.1)" },
{ "David Axmark", "Uppsala, Sweden",
"Small stuff long time ago, Monty ripped it out!" },
@@ -150,3 +154,5 @@ struct show_table_authors_st show_table_authors[]= {
"SHA1(), AES_ENCRYPT(), AES_DECRYPT(), bug fixing" },
{NULL, NULL, NULL}
};
+
+#endif /* AUTHORS_INCLUDED */
diff --git a/sql/client_settings.h b/sql/client_settings.h
index 4f06c15a29e..fd50bfdbb88 100644
--- a/sql/client_settings.h
+++ b/sql/client_settings.h
@@ -14,6 +14,12 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#ifndef CLIENT_SETTINGS_INCLUDED
+#define CLIENT_SETTINGS_INCLUDED
+#else
+#error You have already included an client_settings.h and it should not be included twice
+#endif /* CLIENT_SETTINGS_INCLUDED */
+
#include <thr_alarm.h>
#define CLIENT_CAPABILITIES (CLIENT_LONG_PASSWORD | CLIENT_LONG_FLAG | \
diff --git a/sql/contributors.h b/sql/contributors.h
index 87001e29d88..6cf8bb88e3b 100644
--- a/sql/contributors.h
+++ b/sql/contributors.h
@@ -1,3 +1,6 @@
+#ifndef CONTRIBUTORS_INCLUDED
+#define CONTRIBUTORS_INCLUDED
+
/* Copyright (C) 2006 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -37,3 +40,5 @@ struct show_table_contributors_st show_table_contributors[]= {
{"Mark Shuttleworth", "London, UK.", "EFF contribution for UC2006 Auction"},
{NULL, NULL, NULL}
};
+
+#endif /* CONTRIBUTORS_INCLUDED */
diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc
index 2580d526b52..81870e6b7a3 100644
--- a/sql/debug_sync.cc
+++ b/sql/debug_sync.cc
@@ -1701,9 +1701,11 @@ uchar *sys_var_debug_sync::value_ptr(THD *thd,
static void debug_sync_execute(THD *thd, st_debug_sync_action *action)
{
- IF_DBUG(const char *dsp_name= action->sync_point.c_ptr());
- IF_DBUG(const char *sig_emit= action->signal.c_ptr());
- IF_DBUG(const char *sig_wait= action->wait_for.c_ptr());
+#ifndef DBUG_OFF
+ const char *dsp_name= action->sync_point.c_ptr();
+ const char *sig_emit= action->signal.c_ptr();
+ const char *sig_wait= action->wait_for.c_ptr();
+#endif
DBUG_ENTER("debug_sync_execute");
DBUG_ASSERT(thd);
DBUG_ASSERT(action);
diff --git a/sql/derror.cc b/sql/derror.cc
index a8cfa00ad1d..3073f37eba3 100644
--- a/sql/derror.cc
+++ b/sql/derror.cc
@@ -24,10 +24,15 @@
#include "mysql_priv.h"
#include "mysys_err.h"
-static bool read_texts(const char *file_name,const char ***point,
- uint error_messages);
static void init_myfunc_errs(void);
+
+const char **get_server_errmsgs()
+{
+ return CURRENT_THD_ERRMSGS;
+}
+
+
/**
Read messages from errorfile.
@@ -54,7 +59,8 @@ bool init_errmessage(void)
errmsgs= my_error_unregister(ER_ERROR_FIRST, ER_ERROR_LAST);
/* Read messages from file. */
- if (read_texts(ERRMSG_FILE, &errmsgs, ER_ERROR_LAST - ER_ERROR_FIRST + 1) &&
+ if (read_texts(ERRMSG_FILE, my_default_lc_messages->errmsgs->language,
+ &errmsgs, ER_ERROR_LAST - ER_ERROR_FIRST + 1) &&
!errmsgs)
{
if (!(errmsgs= (const char**) my_malloc((ER_ERROR_LAST-ER_ERROR_FIRST+1)*
@@ -65,13 +71,13 @@ bool init_errmessage(void)
}
/* Register messages for use with my_error(). */
- if (my_error_register(errmsgs, ER_ERROR_FIRST, ER_ERROR_LAST))
+ if (my_error_register(get_server_errmsgs, ER_ERROR_FIRST, ER_ERROR_LAST))
{
x_free((uchar*) errmsgs);
DBUG_RETURN(TRUE);
}
- errmesg= errmsgs; /* Init global variabel */
+ DEFAULT_ERRMSGS= errmsgs; /* Init global variable */
init_myfunc_errs(); /* Init myfunc messages */
DBUG_RETURN(FALSE);
}
@@ -81,19 +87,17 @@ bool init_errmessage(void)
Read text from packed textfile in language-directory.
If we can't read messagefile then it's panic- we can't continue.
-
- @todo
- Convert the character set to server system character set
*/
-static bool read_texts(const char *file_name,const char ***point,
- uint error_messages)
+bool read_texts(const char *file_name, const char *language,
+ const char ***point, uint error_messages)
{
register uint i;
uint count,funktpos,textcount;
size_t length;
File file;
char name[FN_REFLEN];
+ char lang_path[FN_REFLEN];
uchar *buff;
uchar head[32],*pos;
const char *errmsg;
@@ -101,10 +105,26 @@ static bool read_texts(const char *file_name,const char ***point,
LINT_INIT(buff);
funktpos=0;
- if ((file=my_open(fn_format(name,file_name,language,"",4),
+ convert_dirname(lang_path, language, NullS);
+ (void) my_load_path(lang_path, lang_path, lc_messages_dir);
+ if ((file=my_open(fn_format(name,file_name,
+ lang_path, "", 4),
O_RDONLY | O_SHARE | O_BINARY,
MYF(0))) < 0)
- goto err; /* purecov: inspected */
+ {
+ /*
+ Trying pre-5.4 sematics of the --language parameter.
+ It included the language-specific part, e.g.:
+
+ --language=/path/to/english/
+ */
+ if ((file= my_open(fn_format(name, file_name, lc_messages_dir, "", 4),
+ O_RDONLY | O_SHARE | O_BINARY,
+ MYF(0))) < 0)
+ goto err;
+ sql_print_error("An old style --language value with language specific part detected: %s", lc_messages_dir);
+ sql_print_error("Use --lc-messages-dir without language specific part instead.");
+ }
funktpos=1;
if (my_read(file,(uchar*) head,32,MYF(MY_NABP))) goto err;
@@ -113,21 +133,7 @@ static bool read_texts(const char *file_name,const char ***point,
goto err; /* purecov: inspected */
textcount=head[4];
- if (!head[30])
- {
- sql_print_error("Character set information not found in '%s'. \
-Please install the latest version of this file.",name);
- goto err1;
- }
-
- /* TODO: Convert the character set to server system character set */
- if (!get_charset(head[30],MYF(MY_WME)))
- {
- sql_print_error("Character set #%d is not supported for messagefile '%s'",
- (int)head[30],name);
- goto err1;
- }
-
+ error_message_charset_info= system_charset_info;
length=uint2korr(head+6); count=uint2korr(head+8);
if (count < error_messages)
@@ -180,7 +186,6 @@ err:
break;
}
sql_print_error(errmsg, name);
-err1:
if (file != FERR)
VOID(my_close(file,MYF(MY_WME)));
DBUG_RETURN(1);
diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc
index f2ec0e8cf64..23eec749a30 100644
--- a/sql/event_data_objects.cc
+++ b/sql/event_data_objects.cc
@@ -1225,7 +1225,9 @@ Event_timed::get_create_event(THD *thd, String *buf)
expression))
DBUG_RETURN(EVEX_MICROSECOND_UNSUP);
- buf->append(STRING_WITH_LEN("CREATE EVENT "));
+ buf->append(STRING_WITH_LEN("CREATE "));
+ append_definer(thd, buf, &definer_user, &definer_host);
+ buf->append(STRING_WITH_LEN("EVENT "));
append_identifier(thd, buf, name.str, name.length);
if (expression)
diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc
index 8c0025f9ed4..ea20270b457 100644
--- a/sql/event_scheduler.cc
+++ b/sql/event_scheduler.cc
@@ -74,7 +74,7 @@ Event_worker_thread::print_warnings(THD *thd, Event_job_data *et)
{
MYSQL_ERROR *err;
DBUG_ENTER("evex_print_warnings");
- if (!thd->warn_list.elements)
+ if (thd->warning_info->is_empty())
DBUG_VOID_RETURN;
char msg_buf[10 * STRING_BUFFER_USUAL_SIZE];
@@ -90,17 +90,18 @@ Event_worker_thread::print_warnings(THD *thd, Event_job_data *et)
prefix.append(et->name.str, et->name.length, system_charset_info);
prefix.append("] ", 2);
- List_iterator_fast<MYSQL_ERROR> it(thd->warn_list);
+ List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list());
while ((err= it++))
{
String err_msg(msg_buf, sizeof(msg_buf), system_charset_info);
/* set it to 0 or we start adding at the end. That's the trick ;) */
err_msg.length(0);
err_msg.append(prefix);
- err_msg.append(err->msg, strlen(err->msg), system_charset_info);
- DBUG_ASSERT(err->level < 3);
- (sql_print_message_handlers[err->level])("%*s", err_msg.length(),
- err_msg.c_ptr());
+ err_msg.append(err->get_message_text(),
+ err->get_message_octet_length(), system_charset_info);
+ DBUG_ASSERT(err->get_level() < 3);
+ (sql_print_message_handlers[err->get_level()])("%*s", err_msg.length(),
+ err_msg.c_ptr());
}
DBUG_VOID_RETURN;
}
@@ -606,7 +607,12 @@ Event_scheduler::stop()
LOCK_DATA();
DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state].str));
if (state != RUNNING)
+ {
+ /* Synchronously wait until the scheduler stops. */
+ while (state != INITIALIZED)
+ COND_STATE_WAIT(thd, NULL, "Waiting for the scheduler to stop");
goto end;
+ }
/* Guarantee we don't catch spurious signals */
do {
diff --git a/sql/events.cc b/sql/events.cc
index 458ad61718d..f7ff2b0ccf1 100644
--- a/sql/events.cc
+++ b/sql/events.cc
@@ -757,7 +757,7 @@ send_show_create_event(THD *thd, Event_timed *et, Protocol *protocol)
field_list.push_back(
new Item_empty_string("Database Collation", MY_CS_NAME_SIZE));
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
diff --git a/sql/field.cc b/sql/field.cc
index 01ccc338782..1a480b7135e 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1016,6 +1016,36 @@ Item_result Field::result_merge_type(enum_field_types field_type)
Static help functions
*****************************************************************************/
+/**
+ Output a warning for erroneous conversion of strings to numerical
+ values. For use with ER_TRUNCATED_WRONG_VALUE[_FOR_FIELD]
+
+ @param thd THD object
+ @param str pointer to string that failed to be converted
+ @param length length of string
+ @param cs charset for string
+ @param typestr string describing type converted to
+ @param error error value to output
+ @param field_name (for *_FOR_FIELD) name of field
+ @param row_num (for *_FOR_FIELD) row number
+ */
+static void push_numerical_conversion_warning(THD* thd, const char* str,
+ uint length, CHARSET_INFO* cs,
+ const char* typestr, int error,
+ const char* field_name="UNKNOWN",
+ ulong row_num=0)
+{
+ char buf[max(max(DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE,
+ LONGLONG_TO_STRING_CONVERSION_BUFFER_SIZE),
+ DECIMAL_TO_STRING_CONVERSION_BUFFER_SIZE)];
+
+ String tmp(buf, sizeof(buf), cs);
+ tmp.copy(str, length, cs);
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ error, ER(error), typestr, tmp.c_ptr(),
+ field_name, row_num);
+}
+
/**
Check whether a field type can be partially indexed by a key.
@@ -1109,14 +1139,12 @@ int Field_num::check_int(CHARSET_INFO *cs, const char *str, int length,
/* Test if we get an empty string or wrong integer */
if (str == int_end || error == MY_ERRNO_EDOM)
{
- char buff[128];
- String tmp(buff, (uint32) sizeof(buff), system_charset_info);
- tmp.copy(str, length, system_charset_info);
+ ErrConvString err(str, length, cs);
push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
- "integer", tmp.c_ptr(), field_name,
- (ulong) table->in_use->row_count);
+ "integer", err.ptr(), field_name,
+ (ulong) table->in_use->warning_info->current_row_for_warning());
return 1;
}
/* Test if we have garbage at the end of the given string. */
@@ -1529,7 +1557,12 @@ void Field::make_field(Send_field *field)
if (orig_table && orig_table->s->db.str && *orig_table->s->db.str)
{
field->db_name= orig_table->s->db.str;
- field->org_table_name= orig_table->s->table_name.str;
+ if (orig_table->pos_in_table_list &&
+ orig_table->pos_in_table_list->schema_table)
+ field->org_table_name= (orig_table->pos_in_table_list->
+ schema_table->table_name);
+ else
+ field->org_table_name= orig_table->s->table_name.str;
}
else
field->org_table_name= field->db_name= "";
@@ -1772,7 +1805,7 @@ bool Field::optimize_range(uint idx, uint part)
}
-Field *Field::new_field(MEM_ROOT *root, struct st_table *new_table,
+Field *Field::new_field(MEM_ROOT *root, TABLE *new_table,
bool keep_type __attribute__((unused)))
{
Field *tmp;
@@ -1793,7 +1826,7 @@ Field *Field::new_field(MEM_ROOT *root, struct st_table *new_table,
}
-Field *Field::new_key_field(MEM_ROOT *root, struct st_table *new_table,
+Field *Field::new_key_field(MEM_ROOT *root, TABLE *new_table,
uchar *new_ptr, uchar *new_null_ptr,
uint new_null_bit)
{
@@ -1810,7 +1843,7 @@ Field *Field::new_key_field(MEM_ROOT *root, struct st_table *new_table,
/* This is used to generate a field in TABLE from TABLE_SHARE */
-Field *Field::clone(MEM_ROOT *root, struct st_table *new_table)
+Field *Field::clone(MEM_ROOT *root, TABLE *new_table)
{
Field *tmp;
if ((tmp= (Field*) memdup_root(root,(char*) this,size_of())))
@@ -2633,15 +2666,12 @@ int Field_new_decimal::store(const char *from, uint length,
&decimal_value)) &&
table->in_use->abort_on_warning)
{
- /* Because "from" is not NUL-terminated and we use %s in the ER() */
- String from_as_str;
- from_as_str.copy(from, length, &my_charset_bin);
-
- push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ErrConvString errmsg(from, length, &my_charset_bin);
+ push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
- "decimal", from_as_str.c_ptr(), field_name,
- (ulong) table->in_use->row_count);
+ "decimal", errmsg.ptr(), field_name,
+ (ulong) table->in_use->warning_info->current_row_for_warning());
DBUG_RETURN(err);
}
@@ -2656,18 +2686,15 @@ int Field_new_decimal::store(const char *from, uint length,
break;
case E_DEC_BAD_NUM:
{
- /* Because "from" is not NUL-terminated and we use %s in the ER() */
- String from_as_str;
- from_as_str.copy(from, length, &my_charset_bin);
-
- push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
- ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
- "decimal", from_as_str.c_ptr(), field_name,
- (ulong) table->in_use->row_count);
- my_decimal_set_zero(&decimal_value);
-
- break;
+ ErrConvString errmsg(from, length, &my_charset_bin);
+ push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
+ ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
+ "decimal", errmsg.ptr(), field_name,
+ (ulong) table->in_use->warning_info->
+ current_row_for_warning());
+ my_decimal_set_zero(&decimal_value);
+ break;
}
}
@@ -4528,10 +4555,11 @@ warn:
char buf[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE];
String tmp(buf, sizeof(buf), &my_charset_latin1), *str;
str= val_str(&tmp, 0);
+ ErrConvString err(str);
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE), "INTEGER",
- str->c_ptr());
+ err.ptr());
}
return res;
}
@@ -5317,7 +5345,7 @@ bool Field_time::get_date(MYSQL_TIME *ltime, uint fuzzydate)
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARN_DATA_OUT_OF_RANGE,
ER(ER_WARN_DATA_OUT_OF_RANGE), field_name,
- thd->row_count);
+ thd->warning_info->current_row_for_warning());
return 1;
}
tmp=(long) sint3korr(ptr);
@@ -6311,21 +6339,20 @@ check_string_copy_error(Field_str *field,
{
const char *pos;
char tmp[32];
-
+ THD *thd= field->table->in_use;
+
if (!(pos= well_formed_error_pos) &&
!(pos= cannot_convert_error_pos))
return FALSE;
convert_to_printable(tmp, sizeof(tmp), pos, (end - pos), cs, 6);
- push_warning_printf(field->table->in_use,
- field->table->in_use->abort_on_warning ?
- MYSQL_ERROR::WARN_LEVEL_ERROR :
+ push_warning_printf(thd,
MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
+ ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
"string", tmp, field->field_name,
- (ulong) field->table->in_use->row_count);
+ thd->warning_info->current_row_for_warning());
return TRUE;
}
@@ -6359,7 +6386,7 @@ Field_longstr::report_if_important_data(const char *ptr, const char *end,
if (test_if_important_data(field_charset, ptr, end))
{
if (table->in_use->abort_on_warning)
- set_warning(MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DATA_TOO_LONG, 1);
+ set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
else
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
return 2;
@@ -6559,13 +6586,11 @@ double Field_string::val_real(void)
!check_if_only_end_space(cs, end,
(char*) ptr + field_length))))
{
- char buf[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE];
- String tmp(buf, sizeof(buf), cs);
- tmp.copy((char*) ptr, field_length, cs);
+ ErrConvString err((char*) ptr, field_length, cs);
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_TRUNCATED_WRONG_VALUE,
- ER(ER_TRUNCATED_WRONG_VALUE),
- "DOUBLE", tmp.c_ptr());
+ ER_TRUNCATED_WRONG_VALUE,
+ ER(ER_TRUNCATED_WRONG_VALUE), "DOUBLE",
+ err.ptr());
}
return result;
}
@@ -6585,13 +6610,11 @@ longlong Field_string::val_int(void)
!check_if_only_end_space(cs, end,
(char*) ptr + field_length))))
{
- char buf[LONGLONG_TO_STRING_CONVERSION_BUFFER_SIZE];
- String tmp(buf, sizeof(buf), cs);
- tmp.copy((char*) ptr, field_length, cs);
+ ErrConvString err((char*) ptr, field_length, cs);
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE),
- "INTEGER", tmp.c_ptr());
+ "INTEGER", err.ptr());
}
return result;
}
@@ -6623,14 +6646,11 @@ my_decimal *Field_string::val_decimal(my_decimal *decimal_value)
charset(), decimal_value);
if (!table->in_use->no_errors && err)
{
- char buf[DECIMAL_TO_STRING_CONVERSION_BUFFER_SIZE];
- CHARSET_INFO *cs= charset();
- String tmp(buf, sizeof(buf), cs);
- tmp.copy((char*) ptr, field_length, cs);
+ ErrConvString errmsg((char*) ptr, field_length, charset());
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE),
- "DECIMAL", tmp.c_ptr());
+ "DECIMAL", errmsg.ptr());
}
return decimal_value;
@@ -6693,9 +6713,8 @@ int Field_string::cmp(const uchar *a_ptr, const uchar *b_ptr)
void Field_string::sort_string(uchar *to,uint length)
{
- IF_DBUG(uint tmp=) my_strnxfrm(field_charset,
- to, length,
- ptr, field_length);
+ uint tmp __attribute__((unused))=
+ my_strnxfrm(field_charset, to, length, ptr, field_length);
DBUG_ASSERT(tmp == length);
}
@@ -6951,7 +6970,7 @@ uint Field_string::get_key_image(uchar *buff, uint length, imagetype type_arg)
}
-Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table,
+Field *Field_string::new_field(MEM_ROOT *root, TABLE *new_table,
bool keep_type)
{
Field *field;
@@ -7062,22 +7081,46 @@ int Field_varstring::store(longlong nr, bool unsigned_val)
double Field_varstring::val_real(void)
{
ASSERT_COLUMN_MARKED_FOR_READ;
- int not_used;
- char *end_not_used;
+ int error;
+ char *end;
+ double result;
+ CHARSET_INFO* cs= charset();
+
uint length= length_bytes == 1 ? (uint) *ptr : uint2korr(ptr);
- return my_strntod(field_charset, (char*) ptr+length_bytes, length,
- &end_not_used, &not_used);
+ result= my_strntod(cs, (char*)ptr+length_bytes, length, &end, &error);
+
+ if (!table->in_use->no_errors &&
+ (error || (length != (uint)(end - (char*)ptr+length_bytes) &&
+ !check_if_only_end_space(cs, end, (char*)ptr+length_bytes+length))))
+ {
+ push_numerical_conversion_warning(current_thd, (char*)ptr+length_bytes,
+ length, cs,"DOUBLE",
+ ER_TRUNCATED_WRONG_VALUE);
+ }
+ return result;
}
longlong Field_varstring::val_int(void)
{
ASSERT_COLUMN_MARKED_FOR_READ;
- int not_used;
- char *end_not_used;
+ int error;
+ char *end;
+ CHARSET_INFO *cs= charset();
+
uint length= length_bytes == 1 ? (uint) *ptr : uint2korr(ptr);
- return my_strntoll(field_charset, (char*) ptr+length_bytes, length, 10,
- &end_not_used, &not_used);
+ longlong result= my_strntoll(cs, (char*) ptr+length_bytes, length, 10,
+ &end, &error);
+
+ if (!table->in_use->no_errors &&
+ (error || (length != (uint)(end - (char*)ptr+length_bytes) &&
+ !check_if_only_end_space(cs, end, (char*)ptr+length_bytes+length))))
+ {
+ push_numerical_conversion_warning(current_thd, (char*)ptr+length_bytes,
+ length, cs, "INTEGER",
+ ER_TRUNCATED_WRONG_VALUE);
+ }
+ return result;
}
String *Field_varstring::val_str(String *val_buffer __attribute__((unused)),
@@ -7093,9 +7136,17 @@ String *Field_varstring::val_str(String *val_buffer __attribute__((unused)),
my_decimal *Field_varstring::val_decimal(my_decimal *decimal_value)
{
ASSERT_COLUMN_MARKED_FOR_READ;
+ CHARSET_INFO *cs= charset();
uint length= length_bytes == 1 ? (uint) *ptr : uint2korr(ptr);
- str2my_decimal(E_DEC_FATAL_ERROR, (char*) ptr+length_bytes, length,
- charset(), decimal_value);
+ int error= str2my_decimal(E_DEC_FATAL_ERROR, (char*) ptr+length_bytes, length,
+ cs, decimal_value);
+
+ if (!table->in_use->no_errors && error)
+ {
+ push_numerical_conversion_warning(current_thd, (char*)ptr+length_bytes,
+ length, cs, "DECIMAL",
+ ER_TRUNCATED_WRONG_VALUE);
+ }
return decimal_value;
}
@@ -7497,7 +7548,7 @@ int Field_varstring::cmp_binary(const uchar *a_ptr, const uchar *b_ptr,
}
-Field *Field_varstring::new_field(MEM_ROOT *root, struct st_table *new_table,
+Field *Field_varstring::new_field(MEM_ROOT *root, TABLE *new_table,
bool keep_type)
{
Field_varstring *res= (Field_varstring*) Field::new_field(root, new_table,
@@ -7509,7 +7560,7 @@ Field *Field_varstring::new_field(MEM_ROOT *root, struct st_table *new_table,
Field *Field_varstring::new_key_field(MEM_ROOT *root,
- struct st_table *new_table,
+ TABLE *new_table,
uchar *new_ptr, uchar *new_null_ptr,
uint new_null_bit)
{
@@ -8639,7 +8690,7 @@ void Field_enum::sql_type(String &res) const
}
-Field *Field_enum::new_field(MEM_ROOT *root, struct st_table *new_table,
+Field *Field_enum::new_field(MEM_ROOT *root, TABLE *new_table,
bool keep_type)
{
Field_enum *res= (Field_enum*) Field::new_field(root, new_table, keep_type);
@@ -8980,7 +9031,7 @@ Field_bit::do_last_null_byte() const
Field *Field_bit::new_key_field(MEM_ROOT *root,
- struct st_table *new_table,
+ TABLE *new_table,
uchar *new_ptr, uchar *new_null_ptr,
uint new_null_bit)
{
@@ -9021,7 +9072,7 @@ int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs)
set_rec_bits((1 << bit_len) - 1, bit_ptr, bit_ofs, bit_len);
memset(ptr, 0xff, bytes_in_rec);
if (table->in_use->really_abort_on_warning())
- set_warning(MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DATA_TOO_LONG, 1);
+ set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
else
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
return 1;
@@ -9432,7 +9483,7 @@ int Field_bit_as_char::store(const char *from, uint length, CHARSET_INFO *cs)
if (bits)
*ptr&= ((1 << bits) - 1); /* set first uchar */
if (table->in_use->really_abort_on_warning())
- set_warning(MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DATA_TOO_LONG, 1);
+ set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
else
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
return 1;
@@ -9819,8 +9870,7 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
break;
case MYSQL_TYPE_DATE:
/* Old date type. */
- if (protocol_version != PROTOCOL_VERSION-1)
- sql_type= MYSQL_TYPE_NEWDATE;
+ sql_type= MYSQL_TYPE_NEWDATE;
/* fall trough */
case MYSQL_TYPE_NEWDATE:
length= MAX_DATE_WIDTH;
@@ -10315,7 +10365,7 @@ Field::set_warning(MYSQL_ERROR::enum_warning_level level, uint code,
{
thd->cuted_fields+= cuted_increment;
push_warning_printf(thd, level, code, ER(code), field_name,
- thd->row_count);
+ thd->warning_info->current_row_for_warning());
return 0;
}
return level >= MYSQL_ERROR::WARN_LEVEL_WARN;
diff --git a/sql/field.h b/sql/field.h
index ae074cc1a30..159e4bf707c 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1,3 +1,6 @@
+#ifndef FIELD_INCLUDED
+#define FIELD_INCLUDED
+
/* Copyright 2000-2008 MySQL AB, 2008, 2009 Sun Microsystems, Inc.
This program is free software; you can redistribute it and/or modify
@@ -60,8 +63,8 @@ public:
Note that you can use table->in_use as replacement for current_thd member
only inside of val_*() and store() members (e.g. you can't use it in cons)
*/
- struct st_table *table; // Pointer for table
- struct st_table *orig_table; // Pointer to original table
+ TABLE *table; // Pointer for table
+ TABLE *orig_table; // Pointer to original table
const char **table_name, *field_name;
LEX_STRING comment;
/* Field is part of the following keys */
@@ -301,12 +304,12 @@ public:
*/
virtual bool can_be_compared_as_longlong() const { return FALSE; }
virtual void free() {}
- virtual Field *new_field(MEM_ROOT *root, struct st_table *new_table,
+ virtual Field *new_field(MEM_ROOT *root, TABLE *new_table,
bool keep_type);
- virtual Field *new_key_field(MEM_ROOT *root, struct st_table *new_table,
+ virtual Field *new_key_field(MEM_ROOT *root, TABLE *new_table,
uchar *new_ptr, uchar *new_null_ptr,
uint new_null_bit);
- Field *clone(MEM_ROOT *mem_root, struct st_table *new_table);
+ Field *clone(MEM_ROOT *mem_root, TABLE *new_table);
inline void move_field(uchar *ptr_arg,uchar *null_ptr_arg,uchar null_bit_arg)
{
ptr=ptr_arg; null_ptr=null_ptr_arg; null_bit=null_bit_arg;
@@ -1510,7 +1513,7 @@ public:
enum_field_types real_type() const { return MYSQL_TYPE_STRING; }
bool has_charset(void) const
{ return charset() == &my_charset_bin ? FALSE : TRUE; }
- Field *new_field(MEM_ROOT *root, struct st_table *new_table, bool keep_type);
+ Field *new_field(MEM_ROOT *root, TABLE *new_table, bool keep_type);
virtual uint get_key_image(uchar *buff,uint length, imagetype type);
private:
int do_save_field_metadata(uchar *first_byte);
@@ -1597,8 +1600,8 @@ public:
enum_field_types real_type() const { return MYSQL_TYPE_VARCHAR; }
bool has_charset(void) const
{ return charset() == &my_charset_bin ? FALSE : TRUE; }
- Field *new_field(MEM_ROOT *root, struct st_table *new_table, bool keep_type);
- Field *new_key_field(MEM_ROOT *root, struct st_table *new_table,
+ Field *new_field(MEM_ROOT *root, TABLE *new_table, bool keep_type);
+ Field *new_key_field(MEM_ROOT *root, TABLE *new_table,
uchar *new_ptr, uchar *new_null_ptr,
uint new_null_bit);
uint is_equal(Create_field *new_field);
@@ -1837,7 +1840,7 @@ public:
{
flags|=ENUM_FLAG;
}
- Field *new_field(MEM_ROOT *root, struct st_table *new_table, bool keep_type);
+ Field *new_field(MEM_ROOT *root, TABLE *new_table, bool keep_type);
enum_field_types type() const { return MYSQL_TYPE_STRING; }
enum Item_result cmp_type () const { return INT_RESULT; }
enum Item_result cast_to_int_type () const { return INT_RESULT; }
@@ -1937,9 +1940,12 @@ public:
virtual bool str_needs_quotes() { return TRUE; }
my_decimal *val_decimal(my_decimal *);
int cmp(const uchar *a, const uchar *b)
- {
- DBUG_ASSERT(ptr == a);
- return Field_bit::key_cmp(b, bytes_in_rec+test(bit_len));
+ {
+ DBUG_ASSERT(ptr == a || ptr == b);
+ if (ptr == a)
+ return Field_bit::key_cmp(b, bytes_in_rec+test(bit_len));
+ else
+ return Field_bit::key_cmp(a, bytes_in_rec+test(bit_len)) * -1;
}
int cmp_binary_offset(uint row_offset)
{ return cmp_offset(row_offset); }
@@ -1971,7 +1977,7 @@ public:
uint param_data, bool low_byte_first);
virtual void set_default();
- Field *new_key_field(MEM_ROOT *root, struct st_table *new_table,
+ Field *new_key_field(MEM_ROOT *root, TABLE *new_table,
uchar *new_ptr, uchar *new_null_ptr,
uint new_null_bit);
void set_bit_ptr(uchar *bit_ptr_arg, uchar bit_ofs_arg)
@@ -2085,7 +2091,7 @@ public:
A class for sending info to the client
*/
-class Send_field {
+class Send_field :public Sql_alloc {
public:
const char *db_name;
const char *table_name,*org_table_name;
@@ -2188,3 +2194,5 @@ int set_field_to_null_with_conversions(Field *field, bool no_conversions);
#define f_no_default(x) (x & FIELDFLAG_NO_DEFAULT)
#define f_bit_as_char(x) ((x) & FIELDFLAG_TREAT_BIT_AS_CHAR)
#define f_is_hex_escape(x) ((x) & FIELDFLAG_HEX_ESCAPE)
+
+#endif /* FIELD_INCLUDED */
diff --git a/sql/gstream.h b/sql/gstream.h
index 1ef90ad5bf0..ea7158ee1a3 100644
--- a/sql/gstream.h
+++ b/sql/gstream.h
@@ -1,3 +1,6 @@
+#ifndef GSTREAM_INCLUDED
+#define GSTREAM_INCLUDED
+
/* Copyright (C) 2000-2004 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -73,3 +76,5 @@ protected:
char *m_err_msg;
CHARSET_INFO *m_charset;
};
+
+#endif /* GSTREAM_INCLUDED */
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index d047e8ef189..83cceb0da76 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -265,11 +265,11 @@ static int ndb_to_mysql_error(const NdbError *ndberr)
- Used by replication to see if the error was temporary
*/
if (ndberr->status == NdbError::TemporaryError)
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_GET_TEMPORARY_ERRMSG, ER(ER_GET_TEMPORARY_ERRMSG),
ndberr->code, ndberr->message, "NDB");
else
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
ndberr->code, ndberr->message, "NDB");
return error;
@@ -355,8 +355,8 @@ Thd_ndb::Thd_ndb()
m_error_code= 0;
query_state&= NDB_QUERY_NORMAL;
options= 0;
- (void) hash_init(&open_tables, &my_charset_bin, 5, 0, 0,
- (hash_get_key)thd_ndb_share_get_key, 0, 0);
+ (void) my_hash_init(&open_tables, &my_charset_bin, 5, 0, 0,
+ (my_hash_get_key)thd_ndb_share_get_key, 0, 0);
}
Thd_ndb::~Thd_ndb()
@@ -380,7 +380,7 @@ Thd_ndb::~Thd_ndb()
ndb= NULL;
}
changed_tables.empty();
- hash_free(&open_tables);
+ my_hash_free(&open_tables);
}
void
@@ -536,7 +536,7 @@ static void set_ndb_err(THD *thd, const NdbError &err)
{
char buf[FN_REFLEN];
ndb_error_string(thd_ndb->m_error_code, buf, sizeof(buf));
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
thd_ndb->m_error_code, buf, "NDB");
}
@@ -4316,7 +4316,7 @@ int ha_ndbcluster::end_bulk_insert()
}
else
{
- IF_DBUG(int res=) trans->restart();
+ int res __attribute__((unused))= trans->restart();
DBUG_ASSERT(res == 0);
}
}
@@ -4587,9 +4587,9 @@ int ha_ndbcluster::init_handler_for_statement(THD *thd, Thd_ndb *thd_ndb)
const void *key= m_table;
HASH_SEARCH_STATE state;
THD_NDB_SHARE *thd_ndb_share=
- (THD_NDB_SHARE*)hash_first(&thd_ndb->open_tables, (uchar *)&key, sizeof(key), &state);
+ (THD_NDB_SHARE*)my_hash_first(&thd_ndb->open_tables, (uchar *)&key, sizeof(key), &state);
while (thd_ndb_share && thd_ndb_share->key != key)
- thd_ndb_share= (THD_NDB_SHARE*)hash_next(&thd_ndb->open_tables, (uchar *)&key, sizeof(key), &state);
+ thd_ndb_share= (THD_NDB_SHARE*)my_hash_next(&thd_ndb->open_tables, (uchar *)&key, sizeof(key), &state);
if (thd_ndb_share == 0)
{
thd_ndb_share= (THD_NDB_SHARE *) alloc_root(&thd->transaction.mem_root,
@@ -5308,7 +5308,7 @@ int ha_ndbcluster::create(const char *name,
{
if (create_info->storage_media == HA_SM_MEMORY)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
ER(ER_ILLEGAL_HA_CREATE_OPTION),
ndbcluster_hton_name,
@@ -5363,7 +5363,7 @@ int ha_ndbcluster::create(const char *name,
case ROW_TYPE_FIXED:
if (field_type_forces_var_part(field->type()))
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
ER(ER_ILLEGAL_HA_CREATE_OPTION),
ndbcluster_hton_name,
@@ -5523,8 +5523,8 @@ int ha_ndbcluster::create(const char *name,
*/
{
uint length= (uint) strlen(name);
- if ((share= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
- (uchar*) name, length)))
+ if ((share= (NDB_SHARE*) my_hash_search(&ndbcluster_open_tables,
+ (uchar*) name, length)))
handle_trailing_share(share);
}
/*
@@ -5703,7 +5703,7 @@ int ha_ndbcluster::create_index(const char *name, KEY *key_info,
case ORDERED_INDEX:
if (key_info->algorithm == HA_KEY_ALG_HASH)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
ER(ER_ILLEGAL_HA_CREATE_OPTION),
ndbcluster_hton_name,
@@ -5928,7 +5928,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
{
DBUG_PRINT("NDB_SHARE", ("%s temporary use_count: %u",
share->key, share->use_count));
- IF_DBUG(int r=) rename_share(share, to);
+ int r __attribute__((unused))= rename_share(share, to);
DBUG_ASSERT(r == 0);
}
#endif
@@ -5952,7 +5952,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
#ifdef HAVE_NDB_BINLOG
if (share)
{
- IF_DBUG(int ret=) rename_share(share, from);
+ int ret __attribute__((unused))= rename_share(share, from);
DBUG_ASSERT(ret == 0);
/* ndb_share reference temporary free */
DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u",
@@ -7119,18 +7119,18 @@ int ndbcluster_find_files(handlerton *hton, THD *thd,
NdbDictionary::Object::UserTable) != 0)
ERR_RETURN(dict->getNdbError());
- if (hash_init(&ndb_tables, system_charset_info,list.count,0,0,
- (hash_get_key)tables_get_key,0,0))
+ if (my_hash_init(&ndb_tables, system_charset_info,list.count,0,0,
+ (my_hash_get_key)tables_get_key,0,0))
{
DBUG_PRINT("error", ("Failed to init HASH ndb_tables"));
DBUG_RETURN(-1);
}
- if (hash_init(&ok_tables, system_charset_info,32,0,0,
- (hash_get_key)tables_get_key,0,0))
+ if (my_hash_init(&ok_tables, system_charset_info,32,0,0,
+ (my_hash_get_key)tables_get_key,0,0))
{
DBUG_PRINT("error", ("Failed to init HASH ok_tables"));
- hash_free(&ndb_tables);
+ my_hash_free(&ndb_tables);
DBUG_RETURN(-1);
}
@@ -7171,7 +7171,8 @@ int ndbcluster_find_files(handlerton *hton, THD *thd,
{
bool file_on_disk= FALSE;
DBUG_PRINT("info", ("%s", file_name->str));
- if (hash_search(&ndb_tables, (uchar*) file_name->str, file_name->length))
+ if (my_hash_search(&ndb_tables, (uchar*) file_name->str,
+ file_name->length))
{
build_table_filename(name, sizeof(name) - 1, db,
file_name->str, reg_ext, 0);
@@ -7206,10 +7207,10 @@ int ndbcluster_find_files(handlerton *hton, THD *thd,
if (file_on_disk)
{
// Ignore this ndb table
- uchar *record= hash_search(&ndb_tables, (uchar*) file_name->str,
- file_name->length);
+ uchar *record= my_hash_search(&ndb_tables, (uchar*) file_name->str,
+ file_name->length);
DBUG_ASSERT(record);
- hash_delete(&ndb_tables, record);
+ my_hash_delete(&ndb_tables, record);
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TABLE_EXISTS_ERROR,
"Local table %s.%s shadows ndb table",
@@ -7243,7 +7244,7 @@ int ndbcluster_find_files(handlerton *hton, THD *thd,
build_table_filename(name, sizeof(name) - 1, db, "", "", 0);
for (i= 0; i < ok_tables.records; i++)
{
- file_name_str= (char*)hash_element(&ok_tables, i);
+ file_name_str= (char*)my_hash_element(&ok_tables, i);
end= end1 +
tablename_to_filename(file_name_str, end1, sizeof(name) - (end1 - name));
pthread_mutex_lock(&LOCK_open);
@@ -7259,8 +7260,9 @@ int ndbcluster_find_files(handlerton *hton, THD *thd,
List<char> create_list;
for (i= 0 ; i < ndb_tables.records ; i++)
{
- file_name_str= (char*) hash_element(&ndb_tables, i);
- if (!hash_search(&ok_tables, (uchar*) file_name_str, strlen(file_name_str)))
+ file_name_str= (char*) my_hash_element(&ndb_tables, i);
+ if (!my_hash_search(&ok_tables, (uchar*) file_name_str,
+ strlen(file_name_str)))
{
build_table_filename(name, sizeof(name) - 1,
db, file_name_str, reg_ext, 0);
@@ -7314,8 +7316,8 @@ int ndbcluster_find_files(handlerton *hton, THD *thd,
pthread_mutex_unlock(&LOCK_open);
- hash_free(&ok_tables);
- hash_free(&ndb_tables);
+ my_hash_free(&ok_tables);
+ my_hash_free(&ndb_tables);
// Delete schema file from files
if (!strcmp(db, NDB_REP_DB))
@@ -7479,8 +7481,8 @@ static int ndbcluster_init(void *p)
goto ndbcluster_init_error;
}
- (void) hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0,
- (hash_get_key) ndbcluster_get_key,0,0);
+ (void) my_hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0,
+ (my_hash_get_key) ndbcluster_get_key,0,0);
#ifdef HAVE_NDB_BINLOG
/* start the ndb injector thread */
if (ndbcluster_binlog_start())
@@ -7493,7 +7495,7 @@ static int ndbcluster_init(void *p)
if (pthread_create(&tmp, &connection_attrib, ndb_util_thread_func, 0))
{
DBUG_PRINT("error", ("Could not create ndb utility thread"));
- hash_free(&ndbcluster_open_tables);
+ my_hash_free(&ndbcluster_open_tables);
pthread_mutex_destroy(&ndbcluster_mutex);
pthread_mutex_destroy(&LOCK_ndb_util_thread);
pthread_cond_destroy(&COND_ndb_util_thread);
@@ -7510,7 +7512,7 @@ static int ndbcluster_init(void *p)
if (!ndb_util_thread_running)
{
DBUG_PRINT("error", ("ndb utility thread exited prematurely"));
- hash_free(&ndbcluster_open_tables);
+ my_hash_free(&ndbcluster_open_tables);
pthread_mutex_destroy(&ndbcluster_mutex);
pthread_mutex_destroy(&LOCK_ndb_util_thread);
pthread_cond_destroy(&COND_ndb_util_thread);
@@ -7561,7 +7563,7 @@ static int ndbcluster_end(handlerton *hton, ha_panic_function type)
while (ndbcluster_open_tables.records)
{
NDB_SHARE *share=
- (NDB_SHARE*) hash_element(&ndbcluster_open_tables, 0);
+ (NDB_SHARE*) my_hash_element(&ndbcluster_open_tables, 0);
#ifndef DBUG_OFF
fprintf(stderr, "NDB: table share %s with use_count %d not freed\n",
share->key, share->use_count);
@@ -7571,7 +7573,7 @@ static int ndbcluster_end(handlerton *hton, ha_panic_function type)
pthread_mutex_unlock(&ndbcluster_mutex);
}
#endif
- hash_free(&ndbcluster_open_tables);
+ my_hash_free(&ndbcluster_open_tables);
if (g_ndb)
{
@@ -7904,9 +7906,9 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
dbname, tabname, "", 0);
DBUG_PRINT("enter", ("name: %s", name));
pthread_mutex_lock(&ndbcluster_mutex);
- if (!(share=(NDB_SHARE*) hash_search(&ndbcluster_open_tables,
- (uchar*) name,
- strlen(name))))
+ if (!(share=(NDB_SHARE*) my_hash_search(&ndbcluster_open_tables,
+ (uchar*) name,
+ strlen(name))))
{
pthread_mutex_unlock(&ndbcluster_mutex);
DBUG_PRINT("info", ("Table %s not found in ndbcluster_open_tables", name));
@@ -8166,7 +8168,7 @@ static void print_ndbcluster_open_tables()
fprintf(DBUG_FILE, ">ndbcluster_open_tables\n");
for (uint i= 0; i < ndbcluster_open_tables.records; i++)
print_share("",
- (NDB_SHARE*)hash_element(&ndbcluster_open_tables, i));
+ (NDB_SHARE*)my_hash_element(&ndbcluster_open_tables, i));
fprintf(DBUG_FILE, "<ndbcluster_open_tables\n");
DBUG_UNLOCK_FILE;
}
@@ -8277,7 +8279,7 @@ int handle_trailing_share(NDB_SHARE *share)
at the cost of a possible mem leak, by "renaming" the share
- First remove from hash
*/
- hash_delete(&ndbcluster_open_tables, (uchar*) share);
+ my_hash_delete(&ndbcluster_open_tables, (uchar*) share);
/*
now give it a new name, just a running number
@@ -8310,12 +8312,12 @@ static int rename_share(NDB_SHARE *share, const char *new_key)
uint new_length= (uint) strlen(new_key);
DBUG_PRINT("rename_share", ("old_key: %s old__length: %d",
share->key, share->key_length));
- if ((tmp= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
- (uchar*) new_key, new_length)))
+ if ((tmp= (NDB_SHARE*) my_hash_search(&ndbcluster_open_tables,
+ (uchar*) new_key, new_length)))
handle_trailing_share(tmp);
/* remove the share from hash */
- hash_delete(&ndbcluster_open_tables, (uchar*) share);
+ my_hash_delete(&ndbcluster_open_tables, (uchar*) share);
dbug_print_open_tables();
/* save old stuff if insert should fail */
@@ -8416,9 +8418,9 @@ NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table,
if (!have_lock)
pthread_mutex_lock(&ndbcluster_mutex);
- if (!(share= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
- (uchar*) key,
- length)))
+ if (!(share= (NDB_SHARE*) my_hash_search(&ndbcluster_open_tables,
+ (uchar*) key,
+ length)))
{
if (!create_if_not_exists)
{
@@ -8494,7 +8496,7 @@ void ndbcluster_real_free_share(NDB_SHARE **share)
DBUG_ENTER("ndbcluster_real_free_share");
dbug_print_share("ndbcluster_real_free_share:", *share);
- hash_delete(&ndbcluster_open_tables, (uchar*) *share);
+ my_hash_delete(&ndbcluster_open_tables, (uchar*) *share);
thr_lock_delete(&(*share)->lock);
pthread_mutex_destroy(&(*share)->mutex);
@@ -9398,7 +9400,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
}
for (i= 0, open_count= 0; i < record_count; i++)
{
- share= (NDB_SHARE *)hash_element(&ndbcluster_open_tables, i);
+ share= (NDB_SHARE *)my_hash_element(&ndbcluster_open_tables, i);
#ifdef HAVE_NDB_BINLOG
if ((share->use_count - (int) (share->op != 0) - (int) (share->op != 0))
<= 0)
@@ -9609,11 +9611,11 @@ char* ha_ndbcluster::get_tablespace_name(THD *thd, char* name, uint name_len)
}
err:
if (ndberr.status == NdbError::TemporaryError)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_GET_TEMPORARY_ERRMSG, ER(ER_GET_TEMPORARY_ERRMSG),
ndberr.code, ndberr.message, "NDB");
else
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
ndberr.code, ndberr.message, "NDB");
return 0;
@@ -9784,7 +9786,7 @@ void ha_ndbcluster::set_auto_partitions(partition_info *part_info)
int ha_ndbcluster::set_range_data(void *tab_ref, partition_info *part_info)
{
NDBTAB *tab= (NDBTAB*)tab_ref;
- int32 *range_data= (int32*)my_malloc(part_info->no_parts*sizeof(int32),
+ int32 *range_data= (int32*)my_malloc(part_info->num_parts*sizeof(int32),
MYF(0));
uint i;
int error= 0;
@@ -9793,17 +9795,17 @@ int ha_ndbcluster::set_range_data(void *tab_ref, partition_info *part_info)
if (!range_data)
{
- mem_alloc_error(part_info->no_parts*sizeof(int32));
+ mem_alloc_error(part_info->num_parts*sizeof(int32));
DBUG_RETURN(1);
}
- for (i= 0; i < part_info->no_parts; i++)
+ for (i= 0; i < part_info->num_parts; i++)
{
longlong range_val= part_info->range_int_array[i];
if (unsigned_flag)
range_val-= 0x8000000000000000ULL;
if (range_val < INT_MIN32 || range_val >= INT_MAX32)
{
- if ((i != part_info->no_parts - 1) ||
+ if ((i != part_info->num_parts - 1) ||
(range_val != LONGLONG_MAX))
{
my_error(ER_LIMITED_PART_RANGE, MYF(0), "NDB");
@@ -9814,7 +9816,7 @@ int ha_ndbcluster::set_range_data(void *tab_ref, partition_info *part_info)
}
range_data[i]= (int32)range_val;
}
- tab->setRangeListData(range_data, sizeof(int32)*part_info->no_parts);
+ tab->setRangeListData(range_data, sizeof(int32)*part_info->num_parts);
error:
my_free((char*)range_data, MYF(0));
DBUG_RETURN(error);
@@ -9823,7 +9825,7 @@ error:
int ha_ndbcluster::set_list_data(void *tab_ref, partition_info *part_info)
{
NDBTAB *tab= (NDBTAB*)tab_ref;
- int32 *list_data= (int32*)my_malloc(part_info->no_list_values * 2
+ int32 *list_data= (int32*)my_malloc(part_info->num_list_values * 2
* sizeof(int32), MYF(0));
uint32 *part_id, i;
int error= 0;
@@ -9832,10 +9834,10 @@ int ha_ndbcluster::set_list_data(void *tab_ref, partition_info *part_info)
if (!list_data)
{
- mem_alloc_error(part_info->no_list_values*2*sizeof(int32));
+ mem_alloc_error(part_info->num_list_values*2*sizeof(int32));
DBUG_RETURN(1);
}
- for (i= 0; i < part_info->no_list_values; i++)
+ for (i= 0; i < part_info->num_list_values; i++)
{
LIST_PART_ENTRY *list_entry= &part_info->list_array[i];
longlong list_val= list_entry->list_value;
@@ -9851,7 +9853,7 @@ int ha_ndbcluster::set_list_data(void *tab_ref, partition_info *part_info)
part_id= (uint32*)&list_data[2*i+1];
*part_id= list_entry->partition_id;
}
- tab->setRangeListData(list_data, 2*sizeof(int32)*part_info->no_list_values);
+ tab->setRangeListData(list_data, 2*sizeof(int32)*part_info->num_list_values);
error:
my_free((char*)list_data, MYF(0));
DBUG_RETURN(error);
@@ -9907,7 +9909,7 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
{
if (!current_thd->variables.new_mode)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
ER(ER_ILLEGAL_HA_CREATE_OPTION),
ndbcluster_hton_name,
@@ -9973,11 +9975,11 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
ng= 0;
ts_names[fd_index]= part_elem->tablespace_name;
frag_data[fd_index++]= ng;
- } while (++j < part_info->no_subparts);
+ } while (++j < part_info->num_subparts);
}
first= FALSE;
- } while (++i < part_info->no_parts);
- tab->setDefaultNoPartitionsFlag(part_info->use_default_no_partitions);
+ } while (++i < part_info->num_parts);
+ tab->setDefaultNoPartitionsFlag(part_info->use_default_num_partitions);
tab->setLinearFlag(part_info->linear_hash_ind);
{
ha_rows max_rows= table_share->max_rows;
@@ -10371,7 +10373,7 @@ ndberror2:
}
-bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts)
+bool ha_ndbcluster::get_no_parts(const char *name, uint *num_parts)
{
Ndb *ndb;
NDBDICT *dict;
@@ -10393,7 +10395,7 @@ bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts)
Ndb_table_guard ndbtab_g(dict= ndb->getDictionary(), m_tabname);
if (!ndbtab_g.get_table())
ERR_BREAK(dict->getNdbError(), err);
- *no_parts= ndbtab_g.get_table()->getFragmentCount();
+ *num_parts= ndbtab_g.get_table()->getFragmentCount();
DBUG_RETURN(FALSE);
}
@@ -10450,7 +10452,8 @@ static int ndbcluster_fill_files_table(handlerton *hton,
continue;
ERR_RETURN(ndberr);
}
-
+ table->field[IS_FILES_TABLE_CATALOG]->store(STRING_WITH_LEN("def"),
+ system_charset_info);
table->field[IS_FILES_FILE_NAME]->set_notnull();
table->field[IS_FILES_FILE_NAME]->store(elt.name, strlen(elt.name),
system_charset_info);
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 9106fd60731..ac3e7329136 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -1,3 +1,6 @@
+#ifndef HA_NDBCLUSTER_INCLUDED
+#define HA_NDBCLUSTER_INCLUDED
+
/* Copyright (C) 2000-2003 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -582,3 +585,5 @@ static const int ndbcluster_hton_name_length=sizeof(ndbcluster_hton_name)-1;
extern int ndbcluster_terminating;
extern int ndb_util_thread_running;
extern pthread_cond_t COND_ndb_util_ready;
+
+#endif /* HA_NDBCLUSTER_INCLUDED */
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
index 27af3f2cf2f..e34a22cf9f4 100644
--- a/sql/ha_ndbcluster_binlog.cc
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -272,13 +272,13 @@ static void run_query(THD *thd, char *buf, char *end,
Thd_ndb *thd_ndb= get_thd_ndb(thd);
for (i= 0; no_print_error[i]; i++)
if ((thd_ndb->m_error_code == no_print_error[i]) ||
- (thd->main_da.sql_errno() == (unsigned) no_print_error[i]))
+ (thd->stmt_da->sql_errno() == (unsigned) no_print_error[i]))
break;
if (!no_print_error[i])
sql_print_error("NDB: %s: error %s %d(ndb: %d) %d %d",
buf,
- thd->main_da.message(),
- thd->main_da.sql_errno(),
+ thd->stmt_da->message(),
+ thd->stmt_da->sql_errno(),
thd_ndb->m_error_code,
(int) thd->is_error(), thd->is_slave_error);
}
@@ -293,7 +293,7 @@ static void run_query(THD *thd, char *buf, char *end,
is called from ndbcluster_reset_logs(), which is called from
mysql_flush().
*/
- thd->main_da.reset_diagnostics_area();
+ thd->stmt_da->reset_diagnostics_area();
thd->options= save_thd_options;
thd->set_query(save_thd_query, save_thd_query_length);
@@ -740,9 +740,9 @@ static NDB_SHARE *ndbcluster_check_ndb_apply_status_share()
{
pthread_mutex_lock(&ndbcluster_mutex);
- void *share= hash_search(&ndbcluster_open_tables,
- (uchar*) NDB_APPLY_TABLE_FILE,
- sizeof(NDB_APPLY_TABLE_FILE) - 1);
+ void *share= my_hash_search(&ndbcluster_open_tables,
+ (uchar*) NDB_APPLY_TABLE_FILE,
+ sizeof(NDB_APPLY_TABLE_FILE) - 1);
DBUG_PRINT("info",("ndbcluster_check_ndb_apply_status_share %s 0x%lx",
NDB_APPLY_TABLE_FILE, (long) share));
pthread_mutex_unlock(&ndbcluster_mutex);
@@ -758,9 +758,9 @@ static NDB_SHARE *ndbcluster_check_ndb_schema_share()
{
pthread_mutex_lock(&ndbcluster_mutex);
- void *share= hash_search(&ndbcluster_open_tables,
- (uchar*) NDB_SCHEMA_TABLE_FILE,
- sizeof(NDB_SCHEMA_TABLE_FILE) - 1);
+ void *share= my_hash_search(&ndbcluster_open_tables,
+ (uchar*) NDB_SCHEMA_TABLE_FILE,
+ sizeof(NDB_SCHEMA_TABLE_FILE) - 1);
DBUG_PRINT("info",("ndbcluster_check_ndb_schema_share %s 0x%lx",
NDB_SCHEMA_TABLE_FILE, (long) share));
pthread_mutex_unlock(&ndbcluster_mutex);
@@ -963,6 +963,21 @@ struct Cluster_schema
uint32 any_value;
};
+static void print_could_not_discover_error(THD *thd,
+ const Cluster_schema *schema)
+{
+ sql_print_error("NDB Binlog: Could not discover table '%s.%s' from "
+ "binlog schema event '%s' from node %d. "
+ "my_errno: %d",
+ schema->db, schema->name, schema->query,
+ schema->node_id, my_errno);
+ List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list());
+ MYSQL_ERROR *err;
+ while ((err= it++))
+ sql_print_warning("NDB Binlog: (%d)%s", err->get_sql_errno(),
+ err->get_message_text());
+}
+
/*
Transfer schema table data into corresponding struct
*/
@@ -1198,7 +1213,7 @@ ndbcluster_update_slock(THD *thd,
}
if (ndb_error)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
ndb_error->code,
ndb_error->message,
@@ -1521,7 +1536,7 @@ err:
}
end:
if (ndb_error)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
ndb_error->code,
ndb_error->message,
@@ -1971,15 +1986,7 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
}
else if (ndb_create_table_from_engine(thd, schema->db, schema->name))
{
- sql_print_error("NDB Binlog: Could not discover table '%s.%s' from "
- "binlog schema event '%s' from node %d. "
- "my_errno: %d",
- schema->db, schema->name, schema->query,
- schema->node_id, my_errno);
- List_iterator_fast<MYSQL_ERROR> it(thd->warn_list);
- MYSQL_ERROR *err;
- while ((err= it++))
- sql_print_warning("NDB Binlog: (%d)%s", err->code, err->msg);
+ print_could_not_discover_error(thd, schema);
}
pthread_mutex_unlock(&LOCK_open);
log_query= 1;
@@ -2177,8 +2184,8 @@ ndb_binlog_thread_handle_schema_event_post_epoch(THD *thd,
{
pthread_mutex_lock(&ndbcluster_mutex);
NDB_SCHEMA_OBJECT *ndb_schema_object=
- (NDB_SCHEMA_OBJECT*) hash_search(&ndb_schema_objects,
- (uchar*) key, strlen(key));
+ (NDB_SCHEMA_OBJECT*) my_hash_search(&ndb_schema_objects,
+ (uchar*) key, strlen(key));
if (ndb_schema_object)
{
pthread_mutex_lock(&ndb_schema_object->mutex);
@@ -2262,14 +2269,7 @@ ndb_binlog_thread_handle_schema_event_post_epoch(THD *thd,
}
else if (ndb_create_table_from_engine(thd, schema->db, schema->name))
{
- sql_print_error("NDB Binlog: Could not discover table '%s.%s' from "
- "binlog schema event '%s' from node %d. my_errno: %d",
- schema->db, schema->name, schema->query,
- schema->node_id, my_errno);
- List_iterator_fast<MYSQL_ERROR> it(thd->warn_list);
- MYSQL_ERROR *err;
- while ((err= it++))
- sql_print_warning("NDB Binlog: (%d)%s", err->code, err->msg);
+ print_could_not_discover_error(thd, schema);
}
pthread_mutex_unlock(&LOCK_open);
}
@@ -2344,8 +2344,8 @@ static int open_ndb_binlog_index(THD *thd, TABLE_LIST *tables,
sql_print_error("NDB Binlog: Opening ndb_binlog_index: killed");
else
sql_print_error("NDB Binlog: Opening ndb_binlog_index: %d, '%s'",
- thd->main_da.sql_errno(),
- thd->main_da.message());
+ thd->stmt_da->sql_errno(),
+ thd->stmt_da->message());
thd->proc_info= save_proc_info;
return -1;
}
@@ -2571,8 +2571,8 @@ int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key,
pthread_mutex_lock(&ndbcluster_mutex);
/* Handle any trailing share */
- NDB_SHARE *share= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
- (uchar*) key, key_len);
+ NDB_SHARE *share= (NDB_SHARE*) my_hash_search(&ndbcluster_open_tables,
+ (uchar*) key, key_len);
if (share && share_may_exist)
{
@@ -2741,7 +2741,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
"with BLOB attribute and no PK is not supported",
share->key);
if (push_warning)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
ER(ER_ILLEGAL_HA_CREATE_OPTION),
ndbcluster_hton_name,
@@ -2785,7 +2785,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
failed, print a warning
*/
if (push_warning > 1)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
dict->getNdbError().code,
dict->getNdbError().message, "NDB");
@@ -2813,7 +2813,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
dict->dropEvent(my_event.getName()))
{
if (push_warning > 1)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
dict->getNdbError().code,
dict->getNdbError().message, "NDB");
@@ -2832,7 +2832,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
if (dict->createEvent(my_event))
{
if (push_warning > 1)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
dict->getNdbError().code,
dict->getNdbError().message, "NDB");
@@ -2845,7 +2845,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
DBUG_RETURN(-1);
}
#ifdef NDB_BINLOG_EXTRA_WARNINGS
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
0, "NDB Binlog: Removed trailing event",
"NDB");
@@ -2956,7 +2956,7 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
{
sql_print_error("NDB Binlog: Creating NdbEventOperation failed for"
" %s",event_name);
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
ndb->getNdbError().code,
ndb->getNdbError().message,
@@ -3005,7 +3005,7 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
sql_print_error("NDB Binlog: Creating NdbEventOperation"
" blob field %u handles failed (code=%d) for %s",
j, op->getNdbError().code, event_name);
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
op->getNdbError().code,
op->getNdbError().message,
@@ -3044,7 +3044,7 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
retries= 0;
if (retries == 0)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
op->getNdbError().code, op->getNdbError().message,
"NDB");
@@ -3112,7 +3112,7 @@ ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name,
if (dict->getNdbError().code != 4710)
{
/* drop event failed for some reason, issue a warning */
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
dict->getNdbError().code,
dict->getNdbError().message, "NDB");
@@ -3390,14 +3390,14 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
if (share->flags & NSF_BLOB_FLAG)
{
my_ptrdiff_t ptrdiff= 0;
- IF_DBUG(int ret =) get_ndb_blobs_value(table, share->ndb_value[0],
+ int ret __attribute__((unused))= get_ndb_blobs_value(table, share->ndb_value[0],
blobs_buffer[0],
blobs_buffer_size[0],
ptrdiff);
DBUG_ASSERT(ret == 0);
}
ndb_unpack_record(table, share->ndb_value[0], &b, table->record[0]);
- IF_DBUG(int ret=) trans.write_row(originating_server_id,
+ int ret __attribute__((unused))= trans.write_row(originating_server_id,
injector::transaction::table(table,
TRUE),
&b, n_fields, table->record[0]);
@@ -3429,7 +3429,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
if (share->flags & NSF_BLOB_FLAG)
{
my_ptrdiff_t ptrdiff= table->record[n] - table->record[0];
- IF_DBUG(int ret =) get_ndb_blobs_value(table, share->ndb_value[n],
+ int ret __attribute__((unused))= get_ndb_blobs_value(table, share->ndb_value[n],
blobs_buffer[n],
blobs_buffer_size[n],
ptrdiff);
@@ -3437,7 +3437,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
}
ndb_unpack_record(table, share->ndb_value[n], &b, table->record[n]);
DBUG_EXECUTE("info", print_records(table, table->record[n]););
- IF_DBUG(int ret =) trans.delete_row(originating_server_id,
+ int ret __attribute__((unused))= trans.delete_row(originating_server_id,
injector::transaction::table(table,
TRUE),
&b, n_fields, table->record[n]);
@@ -3452,7 +3452,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
if (share->flags & NSF_BLOB_FLAG)
{
my_ptrdiff_t ptrdiff= 0;
- IF_DBUG(int ret =) get_ndb_blobs_value(table, share->ndb_value[0],
+ int ret __attribute__((unused))= get_ndb_blobs_value(table, share->ndb_value[0],
blobs_buffer[0],
blobs_buffer_size[0],
ptrdiff);
@@ -3480,7 +3480,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
if (share->flags & NSF_BLOB_FLAG)
{
my_ptrdiff_t ptrdiff= table->record[1] - table->record[0];
- IF_DBUG(int ret =) get_ndb_blobs_value(table, share->ndb_value[1],
+ int ret __attribute__((unused))= get_ndb_blobs_value(table, share->ndb_value[1],
blobs_buffer[1],
blobs_buffer_size[1],
ptrdiff);
@@ -3488,7 +3488,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
}
ndb_unpack_record(table, share->ndb_value[1], &b, table->record[1]);
DBUG_EXECUTE("info", print_records(table, table->record[1]););
- IF_DBUG(int ret =) trans.update_row(originating_server_id,
+ int ret __attribute__((unused))= trans.update_row(originating_server_id,
injector::transaction::table(table,
TRUE),
&b, n_fields,
@@ -3558,9 +3558,9 @@ static NDB_SCHEMA_OBJECT *ndb_get_schema_object(const char *key,
if (!have_lock)
pthread_mutex_lock(&ndbcluster_mutex);
while (!(ndb_schema_object=
- (NDB_SCHEMA_OBJECT*) hash_search(&ndb_schema_objects,
- (uchar*) key,
- length)))
+ (NDB_SCHEMA_OBJECT*) my_hash_search(&ndb_schema_objects,
+ (uchar*) key,
+ length)))
{
if (!create_if_not_exists)
{
@@ -3609,7 +3609,7 @@ static void ndb_free_schema_object(NDB_SCHEMA_OBJECT **ndb_schema_object,
if (!--(*ndb_schema_object)->use_count)
{
DBUG_PRINT("info", ("use_count: %d", (*ndb_schema_object)->use_count));
- hash_delete(&ndb_schema_objects, (uchar*) *ndb_schema_object);
+ my_hash_delete(&ndb_schema_objects, (uchar*) *ndb_schema_object);
pthread_mutex_destroy(&(*ndb_schema_object)->mutex);
my_free((uchar*) *ndb_schema_object, MYF(0));
*ndb_schema_object= 0;
@@ -3715,8 +3715,8 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
}
/* init hash for schema object distribution */
- (void) hash_init(&ndb_schema_objects, system_charset_info, 32, 0, 0,
- (hash_get_key)ndb_schema_objects_get_key, 0, 0);
+ (void) my_hash_init(&ndb_schema_objects, system_charset_info, 32, 0, 0,
+ (my_hash_get_key)ndb_schema_objects_get_key, 0, 0);
/*
Expose global reference to our ndb object.
@@ -3792,7 +3792,7 @@ restart:
{ C_STRING_WITH_LEN("mysqld startup") },
{ C_STRING_WITH_LEN("cluster disconnect")}
};
- IF_DBUG(int error=)
+ int error __attribute__((unused))=
inj->record_incident(thd, INCIDENT_LOST_EVENTS, msg[incident_id]);
DBUG_ASSERT(!error);
break;
@@ -4107,7 +4107,7 @@ restart:
DBUG_PRINT("info", ("use_table: %.*s",
(int) name.length, name.str));
injector::transaction::table tbl(table, TRUE);
- IF_DBUG(int ret=) trans.use_table(::server_id, tbl);
+ int ret __attribute__((unused))= trans.use_table(::server_id, tbl);
DBUG_ASSERT(ret == 0);
}
}
@@ -4123,7 +4123,7 @@ restart:
(int) name.length, name.str));
#endif
injector::transaction::table tbl(table, TRUE);
- IF_DBUG(int ret=) trans.use_table(::server_id, tbl);
+ int ret __attribute__((unused))= trans.use_table(::server_id, tbl);
DBUG_ASSERT(ret == 0);
/*
@@ -4193,7 +4193,7 @@ restart:
else
{
// set injector_ndb database/schema from table internal name
- IF_DBUG(int ret=)
+ int ret __attribute__((unused))=
i_ndb->setDatabaseAndSchemaName(pOp->getEvent()->getTable());
DBUG_ASSERT(ret == 0);
ndb_binlog_thread_handle_non_data_event(thd, i_ndb, pOp, row);
@@ -4367,7 +4367,7 @@ err:
i_ndb= 0;
}
- hash_free(&ndb_schema_objects);
+ my_hash_free(&ndb_schema_objects);
net_end(&thd->net);
thd->cleanup();
diff --git a/sql/ha_ndbcluster_binlog.h b/sql/ha_ndbcluster_binlog.h
index 1cad643e5ec..d80dfe9ee74 100644
--- a/sql/ha_ndbcluster_binlog.h
+++ b/sql/ha_ndbcluster_binlog.h
@@ -1,3 +1,6 @@
+#ifndef HA_NDBCLUSTER_BINLOG_INCLUDED
+#define HA_NDBCLUSTER_BINLOG_INCLUDED
+
/* Copyright (C) 2000-2003 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -225,3 +228,5 @@ set_thd_ndb(THD *thd, Thd_ndb *thd_ndb)
{ thd_set_ha_data(thd, ndbcluster_hton, thd_ndb); }
Ndb* check_ndb_in_thd(THD* thd);
+
+#endif /* HA_NDBCLUSTER_BINLOG_INCLUDED */
diff --git a/sql/ha_ndbcluster_cond.h b/sql/ha_ndbcluster_cond.h
index 4401a93c9e1..4ccc7e062ec 100644
--- a/sql/ha_ndbcluster_cond.h
+++ b/sql/ha_ndbcluster_cond.h
@@ -1,3 +1,6 @@
+#ifndef HA_NDBCLUSTER_COND_INCLUDED
+#define HA_NDBCLUSTER_COND_INCLUDED
+
/* Copyright (C) 2000-2007 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -486,3 +489,5 @@ private:
Ndb_cond_stack *m_cond_stack;
};
+
+#endif /* HA_NDBCLUSTER_COND_INCLUDED */
diff --git a/sql/ha_ndbcluster_tables.h b/sql/ha_ndbcluster_tables.h
index c6bc8f577f8..ba2e8ec251b 100644
--- a/sql/ha_ndbcluster_tables.h
+++ b/sql/ha_ndbcluster_tables.h
@@ -1,3 +1,6 @@
+#ifndef HA_NDBCLUSTER_TABLES_INCLUDED
+#define HA_NDBCLUSTER_TABLES_INCLUDED
+
/* Copyright (C) 2000-2003 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -21,3 +24,5 @@
#define OLD_NDB_APPLY_TABLE "apply_status"
#define NDB_SCHEMA_TABLE "ndb_schema"
#define OLD_NDB_SCHEMA_TABLE "schema"
+
+#endif /* HA_NDBCLUSTER_TABLES_INCLUDED */
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 451631ff373..7e5eccb2374 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -1,4 +1,4 @@
-/* Copyright 2005-2008 MySQL AB, 2008 Sun Microsystems, Inc.
+/* Copyright 2005-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -245,7 +245,7 @@ void ha_partition::init_handler_variables()
/*
this allows blackhole to work properly
*/
- m_no_locks= 0;
+ m_num_locks= 0;
#ifdef DONT_HAVE_TO_BE_INITALIZED
m_start_key.flag= 0;
@@ -580,8 +580,8 @@ int ha_partition::drop_partitions(const char *path)
{
List_iterator<partition_element> part_it(m_part_info->partitions);
char part_name_buff[FN_REFLEN];
- uint no_parts= m_part_info->partitions.elements;
- uint no_subparts= m_part_info->no_subparts;
+ uint num_parts= m_part_info->partitions.elements;
+ uint num_subparts= m_part_info->num_subparts;
uint i= 0;
uint name_variant;
int ret_error;
@@ -611,7 +611,7 @@ int ha_partition::drop_partitions(const char *path)
do
{
partition_element *sub_elem= sub_it++;
- part= i * no_subparts + j;
+ part= i * num_subparts + j;
create_subpartition_name(part_name_buff, path,
part_elem->partition_name,
sub_elem->partition_name, name_variant);
@@ -621,7 +621,7 @@ int ha_partition::drop_partitions(const char *path)
error= ret_error;
if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
error= 1;
- } while (++j < no_subparts);
+ } while (++j < num_subparts);
}
else
{
@@ -640,7 +640,7 @@ int ha_partition::drop_partitions(const char *path)
else
part_elem->part_state= PART_IS_DROPPED;
}
- } while (++i < no_parts);
+ } while (++i < num_parts);
VOID(sync_ddl_log());
DBUG_RETURN(error);
}
@@ -671,9 +671,9 @@ int ha_partition::rename_partitions(const char *path)
List_iterator<partition_element> temp_it(m_part_info->temp_partitions);
char part_name_buff[FN_REFLEN];
char norm_name_buff[FN_REFLEN];
- uint no_parts= m_part_info->partitions.elements;
+ uint num_parts= m_part_info->partitions.elements;
uint part_count= 0;
- uint no_subparts= m_part_info->no_subparts;
+ uint num_subparts= m_part_info->num_subparts;
uint i= 0;
uint j= 0;
int error= 0;
@@ -722,7 +722,7 @@ int ha_partition::rename_partitions(const char *path)
error= 1;
else
sub_elem->log_entry= NULL; /* Indicate success */
- } while (++j < no_subparts);
+ } while (++j < num_subparts);
}
else
{
@@ -778,7 +778,7 @@ int ha_partition::rename_partitions(const char *path)
do
{
sub_elem= sub_it++;
- part= i * no_subparts + j;
+ part= i * num_subparts + j;
create_subpartition_name(norm_name_buff, path,
part_elem->partition_name,
sub_elem->partition_name,
@@ -807,7 +807,7 @@ int ha_partition::rename_partitions(const char *path)
error= 1;
else
sub_elem->log_entry= NULL;
- } while (++j < no_subparts);
+ } while (++j < num_subparts);
}
else
{
@@ -839,7 +839,7 @@ int ha_partition::rename_partitions(const char *path)
part_elem->log_entry= NULL;
}
}
- } while (++i < no_parts);
+ } while (++i < num_parts);
VOID(sync_ddl_log());
DBUG_RETURN(error);
}
@@ -849,9 +849,12 @@ int ha_partition::rename_partitions(const char *path)
#define ANALYZE_PARTS 2
#define CHECK_PARTS 3
#define REPAIR_PARTS 4
+#define ASSIGN_KEYCACHE_PARTS 5
+#define PRELOAD_KEYS_PARTS 6
static const char *opt_op_name[]= {NULL,
- "optimize", "analyze", "check", "repair" };
+ "optimize", "analyze", "check", "repair",
+ "assign_to_keycache", "preload_keys"};
/*
Optimize table
@@ -936,7 +939,44 @@ int ha_partition::repair(THD *thd, HA_CHECK_OPT *check_opt)
DBUG_RETURN(handle_opt_partitions(thd, check_opt, REPAIR_PARTS));
}
+/**
+ Assign to keycache
+
+ @param thd Thread object
+ @param check_opt Check/analyze/repair/optimize options
+
+ @return
+ @retval >0 Error
+ @retval 0 Success
+*/
+
+int ha_partition::assign_to_keycache(THD *thd, HA_CHECK_OPT *check_opt)
+{
+ DBUG_ENTER("ha_partition::assign_to_keycache");
+
+ DBUG_RETURN(handle_opt_partitions(thd, check_opt, ASSIGN_KEYCACHE_PARTS));
+}
+
+
+/**
+ Preload to keycache
+
+ @param thd Thread object
+ @param check_opt Check/analyze/repair/optimize options
+
+ @return
+ @retval >0 Error
+ @retval 0 Success
+*/
+
+int ha_partition::preload_keys(THD *thd, HA_CHECK_OPT *check_opt)
+{
+ DBUG_ENTER("ha_partition::preload_keys");
+ DBUG_RETURN(handle_opt_partitions(thd, check_opt, PRELOAD_KEYS_PARTS));
+}
+
+
/*
Handle optimize/analyze/check/repair of one partition
@@ -967,6 +1007,10 @@ static int handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt,
error= file->ha_check(thd, check_opt);
else if (flag == REPAIR_PARTS)
error= file->ha_repair(thd, check_opt);
+ else if (flag == ASSIGN_KEYCACHE_PARTS)
+ error= file->assign_to_keycache(thd, check_opt);
+ else if (flag == PRELOAD_KEYS_PARTS)
+ error= file->preload_keys(thd, check_opt);
else
{
DBUG_ASSERT(FALSE);
@@ -1049,8 +1093,8 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
uint flag)
{
List_iterator<partition_element> part_it(m_part_info->partitions);
- uint no_parts= m_part_info->no_parts;
- uint no_subparts= m_part_info->no_subparts;
+ uint num_parts= m_part_info->num_parts;
+ uint num_subparts= m_part_info->num_subparts;
uint i= 0;
int error;
DBUG_ENTER("ha_partition::handle_opt_partitions");
@@ -1064,7 +1108,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
it should only do named partitions, otherwise all partitions
*/
if (!(thd->lex->alter_info.flags & ALTER_ADMIN_PARTITION) ||
- part_elem->part_state == PART_CHANGED)
+ part_elem->part_state == PART_ADMIN)
{
if (m_is_sub_partitioned)
{
@@ -1074,7 +1118,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
do
{
sub_elem= subpart_it++;
- part= i * no_subparts + j;
+ part= i * num_subparts + j;
DBUG_PRINT("info", ("Optimize subpartition %u (%s)",
part, sub_elem->partition_name));
#ifdef NOT_USED
@@ -1096,9 +1140,15 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
"Subpartition %s returned error",
sub_elem->partition_name);
}
+ /* reset part_state for the remaining partitions */
+ do
+ {
+ if (part_elem->part_state == PART_ADMIN)
+ part_elem->part_state= PART_NORMAL;
+ } while (part_elem= part_it++);
DBUG_RETURN(error);
}
- } while (++j < no_subparts);
+ } while (++j < num_subparts);
}
else
{
@@ -1122,11 +1172,18 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
opt_op_name[flag], "Partition %s returned error",
part_elem->partition_name);
}
+ /* reset part_state for the remaining partitions */
+ do
+ {
+ if (part_elem->part_state == PART_ADMIN)
+ part_elem->part_state= PART_NORMAL;
+ } while (part_elem= part_it++);
DBUG_RETURN(error);
}
}
+ part_elem->part_state= PART_NORMAL;
}
- } while (++i < no_parts);
+ } while (++i < num_parts);
DBUG_RETURN(FALSE);
}
@@ -1330,10 +1387,10 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
List_iterator<partition_element> part_it(m_part_info->partitions);
List_iterator <partition_element> t_it(m_part_info->temp_partitions);
char part_name_buff[FN_REFLEN];
- uint no_parts= m_part_info->partitions.elements;
- uint no_subparts= m_part_info->no_subparts;
+ uint num_parts= m_part_info->partitions.elements;
+ uint num_subparts= m_part_info->num_subparts;
uint i= 0;
- uint no_remain_partitions, part_count, orig_count;
+ uint num_remain_partitions, part_count, orig_count;
handler **new_file_array;
int error= 1;
bool first;
@@ -1349,7 +1406,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
part_name_buff)));
m_reorged_parts= 0;
if (!m_part_info->is_sub_partitioned())
- no_subparts= 1;
+ num_subparts= 1;
/*
Step 1:
@@ -1358,7 +1415,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
*/
if (temp_partitions)
{
- m_reorged_parts= temp_partitions * no_subparts;
+ m_reorged_parts= temp_partitions * num_subparts;
}
else
{
@@ -1368,9 +1425,9 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
if (part_elem->part_state == PART_CHANGED ||
part_elem->part_state == PART_REORGED_DROPPED)
{
- m_reorged_parts+= no_subparts;
+ m_reorged_parts+= num_subparts;
}
- } while (++i < no_parts);
+ } while (++i < num_parts);
}
if (m_reorged_parts &&
!(m_reorged_file= (handler**)sql_calloc(sizeof(handler*)*
@@ -1385,10 +1442,10 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
Calculate number of partitions after change and allocate space for
their handler references.
*/
- no_remain_partitions= 0;
+ num_remain_partitions= 0;
if (temp_partitions)
{
- no_remain_partitions= no_parts * no_subparts;
+ num_remain_partitions= num_parts * num_subparts;
}
else
{
@@ -1401,17 +1458,17 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
part_elem->part_state == PART_TO_BE_ADDED ||
part_elem->part_state == PART_CHANGED)
{
- no_remain_partitions+= no_subparts;
+ num_remain_partitions+= num_subparts;
}
- } while (++i < no_parts);
+ } while (++i < num_parts);
}
if (!(new_file_array= (handler**)sql_calloc(sizeof(handler*)*
- (2*(no_remain_partitions + 1)))))
+ (2*(num_remain_partitions + 1)))))
{
- mem_alloc_error(sizeof(handler*)*2*(no_remain_partitions+1));
+ mem_alloc_error(sizeof(handler*)*2*(num_remain_partitions+1));
DBUG_RETURN(ER_OUTOFMEMORY);
}
- m_added_file= &new_file_array[no_remain_partitions + 1];
+ m_added_file= &new_file_array[num_remain_partitions + 1];
/*
Step 3:
@@ -1430,9 +1487,9 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
part_elem->part_state == PART_REORGED_DROPPED)
{
memcpy((void*)&m_reorged_file[part_count],
- (void*)&m_file[i*no_subparts],
- sizeof(handler*)*no_subparts);
- part_count+= no_subparts;
+ (void*)&m_file[i*num_subparts],
+ sizeof(handler*)*num_subparts);
+ part_count+= num_subparts;
}
else if (first && temp_partitions &&
part_elem->part_state == PART_TO_BE_ADDED)
@@ -1447,11 +1504,11 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
ones used to be.
*/
first= FALSE;
- DBUG_ASSERT(((i*no_subparts) + m_reorged_parts) <= m_file_tot_parts);
- memcpy((void*)m_reorged_file, &m_file[i*no_subparts],
+ DBUG_ASSERT(((i*num_subparts) + m_reorged_parts) <= m_file_tot_parts);
+ memcpy((void*)m_reorged_file, &m_file[i*num_subparts],
sizeof(handler*)*m_reorged_parts);
}
- } while (++i < no_parts);
+ } while (++i < num_parts);
}
/*
@@ -1469,11 +1526,11 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
partition_element *part_elem= part_it++;
if (part_elem->part_state == PART_NORMAL)
{
- DBUG_ASSERT(orig_count + no_subparts <= m_file_tot_parts);
+ DBUG_ASSERT(orig_count + num_subparts <= m_file_tot_parts);
memcpy((void*)&new_file_array[part_count], (void*)&m_file[orig_count],
- sizeof(handler*)*no_subparts);
- part_count+= no_subparts;
- orig_count+= no_subparts;
+ sizeof(handler*)*num_subparts);
+ part_count+= num_subparts;
+ orig_count+= num_subparts;
}
else if (part_elem->part_state == PART_CHANGED ||
part_elem->part_state == PART_TO_BE_ADDED)
@@ -1489,16 +1546,16 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
mem_alloc_error(sizeof(handler));
DBUG_RETURN(ER_OUTOFMEMORY);
}
- } while (++j < no_subparts);
+ } while (++j < num_subparts);
if (part_elem->part_state == PART_CHANGED)
- orig_count+= no_subparts;
+ orig_count+= num_subparts;
else if (temp_partitions && first)
{
- orig_count+= (no_subparts * temp_partitions);
+ orig_count+= (num_subparts * temp_partitions);
first= FALSE;
}
}
- } while (++i < no_parts);
+ } while (++i < num_parts);
first= FALSE;
/*
Step 5:
@@ -1535,7 +1592,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
part_elem->partition_name,
sub_elem->partition_name,
name_variant);
- part= i * no_subparts + j;
+ part= i * num_subparts + j;
DBUG_PRINT("info", ("Add subpartition %s", part_name_buff));
if ((error= prepare_new_partition(table, create_info,
new_file_array[part],
@@ -1546,7 +1603,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
DBUG_RETURN(error);
}
m_added_file[part_count++]= new_file_array[part];
- } while (++j < no_subparts);
+ } while (++j < num_subparts);
}
else
{
@@ -1565,7 +1622,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
m_added_file[part_count++]= new_file_array[i];
}
}
- } while (++i < no_parts);
+ } while (++i < num_parts);
/*
Step 6:
@@ -1582,7 +1639,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
part_elem->part_state= PART_IS_CHANGED;
else if (part_elem->part_state == PART_REORGED_DROPPED)
part_elem->part_state= PART_TO_BE_DROPPED;
- } while (++i < no_parts);
+ } while (++i < num_parts);
for (i= 0; i < temp_partitions; i++)
{
partition_element *part_elem= t_it++;
@@ -1623,9 +1680,9 @@ int ha_partition::copy_partitions(ulonglong * const copied,
if (m_part_info->linear_hash_ind)
{
if (m_part_info->part_type == HASH_PARTITION)
- set_linear_hash_mask(m_part_info, m_part_info->no_parts);
+ set_linear_hash_mask(m_part_info, m_part_info->num_parts);
else
- set_linear_hash_mask(m_part_info, m_part_info->no_subparts);
+ set_linear_hash_mask(m_part_info, m_part_info->num_subparts);
}
while (reorg_part < m_reorged_parts)
@@ -1904,7 +1961,7 @@ partition_element *ha_partition::find_partition_element(uint part_id)
uint curr_part_id= 0;
List_iterator_fast <partition_element> part_it(m_part_info->partitions);
- for (i= 0; i < m_part_info->no_parts; i++)
+ for (i= 0; i < m_part_info->num_parts; i++)
{
partition_element *part_elem;
part_elem= part_it++;
@@ -1912,7 +1969,7 @@ partition_element *ha_partition::find_partition_element(uint part_id)
{
uint j;
List_iterator_fast <partition_element> sub_it(part_elem->subpartitions);
- for (j= 0; j < m_part_info->no_subparts; j++)
+ for (j= 0; j < m_part_info->num_subparts; j++)
{
part_elem= sub_it++;
if (part_id == curr_part_id++)
@@ -2033,7 +2090,7 @@ bool ha_partition::create_handler_file(const char *name)
{
partition_element *part_elem, *subpart_elem;
uint i, j, part_name_len, subpart_name_len;
- uint tot_partition_words, tot_name_len, no_parts;
+ uint tot_partition_words, tot_name_len, num_parts;
uint tot_parts= 0;
uint tot_len_words, tot_len_byte, chksum, tot_name_words;
char *name_buffer_ptr;
@@ -2046,11 +2103,11 @@ bool ha_partition::create_handler_file(const char *name)
List_iterator_fast <partition_element> part_it(m_part_info->partitions);
DBUG_ENTER("create_handler_file");
- no_parts= m_part_info->partitions.elements;
- DBUG_PRINT("info", ("table name = %s, no_parts = %u", name,
- no_parts));
+ num_parts= m_part_info->partitions.elements;
+ DBUG_PRINT("info", ("table name = %s, num_parts = %u", name,
+ num_parts));
tot_name_len= 0;
- for (i= 0; i < no_parts; i++)
+ for (i= 0; i < num_parts; i++)
{
part_elem= part_it++;
if (part_elem->part_state != PART_NORMAL &&
@@ -2068,7 +2125,7 @@ bool ha_partition::create_handler_file(const char *name)
else
{
List_iterator_fast <partition_element> sub_it(part_elem->subpartitions);
- for (j= 0; j < m_part_info->no_subparts; j++)
+ for (j= 0; j < m_part_info->num_subparts; j++)
{
subpart_elem= sub_it++;
tablename_to_filename(subpart_elem->partition_name,
@@ -2102,7 +2159,7 @@ bool ha_partition::create_handler_file(const char *name)
engine_array= (file_buffer + 12);
name_buffer_ptr= (char*) (file_buffer + ((4 + tot_partition_words) * 4));
part_it.rewind();
- for (i= 0; i < no_parts; i++)
+ for (i= 0; i < num_parts; i++)
{
part_elem= part_it++;
if (part_elem->part_state != PART_NORMAL &&
@@ -2120,7 +2177,7 @@ bool ha_partition::create_handler_file(const char *name)
else
{
List_iterator_fast <partition_element> sub_it(part_elem->subpartitions);
- for (j= 0; j < m_part_info->no_subparts; j++)
+ for (j= 0; j < m_part_info->num_subparts; j++)
{
subpart_elem= sub_it++;
tablename_to_filename(part_elem->partition_name, part_name,
@@ -2256,7 +2313,7 @@ bool ha_partition::new_handlers_from_part_info(MEM_ROOT *mem_root)
}
m_file_tot_parts= m_tot_parts;
bzero((char*) m_file, alloc_len);
- DBUG_ASSERT(m_part_info->no_parts > 0);
+ DBUG_ASSERT(m_part_info->num_parts > 0);
i= 0;
part_count= 0;
@@ -2269,7 +2326,7 @@ bool ha_partition::new_handlers_from_part_info(MEM_ROOT *mem_root)
part_elem= part_it++;
if (m_is_sub_partitioned)
{
- for (j= 0; j < m_part_info->no_subparts; j++)
+ for (j= 0; j < m_part_info->num_subparts; j++)
{
if (!(m_file[part_count++]= get_new_handler(table_share, mem_root,
part_elem->engine_type)))
@@ -2286,7 +2343,7 @@ bool ha_partition::new_handlers_from_part_info(MEM_ROOT *mem_root)
DBUG_PRINT("info", ("engine_type: %u",
(uint) ha_legacy_type(part_elem->engine_type)));
}
- } while (++i < m_part_info->no_parts);
+ } while (++i < m_part_info->num_parts);
if (part_elem->engine_type == myisam_hton)
{
DBUG_PRINT("info", ("MyISAM"));
@@ -2395,6 +2452,21 @@ err1:
/****************************************************************************
MODULE open/close object
****************************************************************************/
+
+
+/**
+ A destructor for partition-specific TABLE_SHARE data.
+*/
+
+void ha_data_partition_destroy(void *ha_data)
+{
+ if (ha_data)
+ {
+ HA_DATA_PARTITION *ha_data_partition= (HA_DATA_PARTITION*) ha_data;
+ pthread_mutex_destroy(&ha_data_partition->mutex);
+ }
+}
+
/*
Open handler object
@@ -2489,7 +2561,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
if ((error= (*file)->ha_open(table, (const char*) name_buff, mode,
test_if_locked)))
goto err_handler;
- m_no_locks+= (*file)->lock_count();
+ m_num_locks+= (*file)->lock_count();
name_buffer_ptr+= strlen(name_buffer_ptr) + 1;
set_if_bigger(ref_length, ((*file)->ref_length));
/*
@@ -2551,6 +2623,8 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
}
DBUG_PRINT("info", ("table_share->ha_data 0x%p", ha_data));
bzero(ha_data, sizeof(HA_DATA_PARTITION));
+ table_share->ha_data_destroy= ha_data_partition_destroy;
+ pthread_mutex_init(&ha_data->mutex, MY_MUTEX_INIT_FAST);
}
if (is_not_tmp_table)
pthread_mutex_unlock(&table_share->mutex);
@@ -2836,8 +2910,8 @@ int ha_partition::start_stmt(THD *thd, thr_lock_type lock_type)
uint ha_partition::lock_count() const
{
DBUG_ENTER("ha_partition::lock_count");
- DBUG_PRINT("info", ("m_no_locks %d", m_no_locks));
- DBUG_RETURN(m_no_locks);
+ DBUG_PRINT("info", ("m_num_locks %d", m_num_locks));
+ DBUG_RETURN(m_num_locks);
}
@@ -3221,6 +3295,9 @@ int ha_partition::delete_row(const uchar *buf)
Called from sql_delete.cc by mysql_delete().
Called from sql_select.cc by JOIN::reinit().
Called from sql_union.cc by st_select_lex_unit::exec().
+
+ Also used for handle ALTER TABLE t TRUNCATE PARTITION ...
+ NOTE: auto increment value will be truncated in that partition as well!
*/
int ha_partition::delete_all_rows()
@@ -3233,11 +3310,84 @@ int ha_partition::delete_all_rows()
if (thd->lex->sql_command == SQLCOM_TRUNCATE)
{
+ Alter_info *alter_info= &thd->lex->alter_info;
HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
+ /* TRUNCATE also means resetting auto_increment */
lock_auto_increment();
ha_data->next_auto_inc_val= 0;
ha_data->auto_inc_initialized= FALSE;
unlock_auto_increment();
+ if (alter_info->flags & ALTER_ADMIN_PARTITION)
+ {
+ /* ALTER TABLE t TRUNCATE PARTITION ... */
+ List_iterator<partition_element> part_it(m_part_info->partitions);
+ int saved_error= 0;
+ uint num_parts= m_part_info->num_parts;
+ uint num_subparts= m_part_info->num_subparts;
+ uint i= 0;
+ uint num_parts_set= alter_info->partition_names.elements;
+ uint num_parts_found= set_part_state(alter_info, m_part_info,
+ PART_ADMIN);
+ if (num_parts_set != num_parts_found &&
+ (!(alter_info->flags & ALTER_ALL_PARTITION)))
+ DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+
+ /*
+ Cannot return HA_ERR_WRONG_COMMAND here without correct pruning
+ since that whould delete the whole table row by row in sql_delete.cc
+ */
+ bitmap_clear_all(&m_part_info->used_partitions);
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_ADMIN)
+ {
+ if (m_is_sub_partitioned)
+ {
+ List_iterator<partition_element>
+ subpart_it(part_elem->subpartitions);
+ partition_element *sub_elem;
+ uint j= 0, part;
+ do
+ {
+ sub_elem= subpart_it++;
+ part= i * num_subparts + j;
+ bitmap_set_bit(&m_part_info->used_partitions, part);
+ if (!saved_error)
+ {
+ DBUG_PRINT("info", ("truncate subpartition %u (%s)",
+ part, sub_elem->partition_name));
+ if ((error= m_file[part]->ha_delete_all_rows()))
+ saved_error= error;
+ /* If not reset_auto_increment is supported, just accept it */
+ if (!saved_error &&
+ (error= m_file[part]->ha_reset_auto_increment(0)) &&
+ error != HA_ERR_WRONG_COMMAND)
+ saved_error= error;
+ }
+ } while (++j < num_subparts);
+ }
+ else
+ {
+ DBUG_PRINT("info", ("truncate partition %u (%s)", i,
+ part_elem->partition_name));
+ bitmap_set_bit(&m_part_info->used_partitions, i);
+ if (!saved_error)
+ {
+ if ((error= m_file[i]->ha_delete_all_rows()) && !saved_error)
+ saved_error= error;
+ /* If not reset_auto_increment is supported, just accept it */
+ if (!saved_error &&
+ (error= m_file[i]->ha_reset_auto_increment(0)) &&
+ error != HA_ERR_WRONG_COMMAND)
+ saved_error= error;
+ }
+ }
+ part_elem->part_state= PART_NORMAL;
+ }
+ } while (++i < num_parts);
+ DBUG_RETURN(saved_error);
+ }
truncate= TRUE;
}
file= m_file;
@@ -5978,12 +6128,14 @@ enum row_type ha_partition::get_row_type() const
void ha_partition::print_error(int error, myf errflag)
{
+ THD *thd= ha_thd();
DBUG_ENTER("ha_partition::print_error");
/* Should probably look for my own errors first */
DBUG_PRINT("enter", ("error: %d", error));
- if (error == HA_ERR_NO_PARTITION_FOUND)
+ if (error == HA_ERR_NO_PARTITION_FOUND &&
+ thd->lex->sql_command != SQLCOM_TRUNCATE)
m_part_info->print_no_partition_found(table);
else
m_file[m_last_part]->print_error(error, errflag);
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index c08b1f77eca..d4579d013fd 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -1,4 +1,7 @@
-/* Copyright 2005-2008 MySQL AB, 2008 Sun Microsystems, Inc.
+#ifndef HA_PARTITION_INCLUDED
+#define HA_PARTITION_INCLUDED
+
+/* Copyright 2005-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -19,7 +22,8 @@
enum partition_keywords
{
- PKW_HASH= 0, PKW_RANGE, PKW_LIST, PKW_KEY, PKW_MAXVALUE, PKW_LINEAR
+ PKW_HASH= 0, PKW_RANGE, PKW_LIST, PKW_KEY, PKW_MAXVALUE, PKW_LINEAR,
+ PKW_COLUMNS
};
/*
@@ -45,6 +49,7 @@ typedef struct st_ha_data_partition
{
ulonglong next_auto_inc_val; /**< first non reserved value */
bool auto_inc_initialized;
+ pthread_mutex_t mutex;
} HA_DATA_PARTITION;
#define PARTITION_BYTES_IN_POS 2
@@ -111,7 +116,7 @@ private:
uint m_reorged_parts; // Number of reorganised parts
uint m_tot_parts; // Total number of partitions;
- uint m_no_locks; // For engines like ha_blackhole, which needs no locks
+ uint m_num_locks; // For engines like ha_blackhole, which needs no locks
uint m_last_part; // Last file that we update,write,read
int m_lock_type; // Remembers type of last
// external_lock
@@ -243,10 +248,10 @@ public:
size_t pack_frm_len);
virtual int drop_partitions(const char *path);
virtual int rename_partitions(const char *path);
- bool get_no_parts(const char *name, uint *no_parts)
+ bool get_no_parts(const char *name, uint *num_parts)
{
DBUG_ENTER("ha_partition::get_no_parts");
- *no_parts= m_tot_parts;
+ *num_parts= m_tot_parts;
DBUG_RETURN(0);
}
virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share);
@@ -1072,12 +1077,13 @@ public:
virtual int backup(TD* thd, HA_CHECK_OPT *check_opt);
virtual int restore(THD* thd, HA_CHECK_OPT *check_opt);
- virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt);
- virtual int preload_keys(THD *thd, HA_CHECK_OPT *check_opt);
virtual int dump(THD* thd, int fd = -1);
virtual int net_read_dump(NET* net);
virtual uint checksum() const;
*/
+ /* Enabled keycache for performance reasons, WL#4571 */
+ virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt);
+ virtual int preload_keys(THD* thd, HA_CHECK_OPT* check_opt);
/*
-------------------------------------------------------------------------
@@ -1101,3 +1107,5 @@ public:
virtual void append_create_info(String *packet)
*/
};
+
+#endif /* HA_PARTITION_INCLUDED */
diff --git a/sql/handler.cc b/sql/handler.cc
index 397c4ce7335..ad7e1ecfa80 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -24,6 +24,7 @@
#endif
#include "mysql_priv.h"
+#include "rpl_handler.h"
#include "rpl_filter.h"
#include <myisampack.h>
#include <errno.h>
@@ -222,6 +223,8 @@ handlerton *ha_checktype(THD *thd, enum legacy_db_type database_type,
return NULL;
}
+ RUN_HOOK(transaction, after_rollback, (thd, FALSE));
+
switch (database_type) {
#ifndef NO_HASH
case DB_TYPE_HASH:
@@ -283,6 +286,15 @@ handler *get_ha_partition(partition_info *part_info)
#endif
+const char **handler_errmsgs;
+
+
+const char **get_handler_errmsgs()
+{
+ return handler_errmsgs;
+}
+
+
/**
Register handler error messages for use with my_error().
@@ -294,61 +306,61 @@ handler *get_ha_partition(partition_info *part_info)
int ha_init_errors(void)
{
-#define SETMSG(nr, msg) errmsgs[(nr) - HA_ERR_FIRST]= (msg)
+#define SETMSG(nr, msg) handler_errmsgs[(nr) - HA_ERR_FIRST]= (msg)
const char **errmsgs;
/* Allocate a pointer array for the error message strings. */
/* Zerofill it to avoid uninitialized gaps. */
- if (! (errmsgs= (const char**) my_malloc(HA_ERR_ERRORS * sizeof(char*),
- MYF(MY_WME | MY_ZEROFILL))))
+ if (! (handler_errmsgs= (const char**) my_malloc(HA_ERR_ERRORS * sizeof(char*),
+ MYF(MY_WME | MY_ZEROFILL))))
return 1;
/* Set the dedicated error messages. */
- SETMSG(HA_ERR_KEY_NOT_FOUND, ER(ER_KEY_NOT_FOUND));
- SETMSG(HA_ERR_FOUND_DUPP_KEY, ER(ER_DUP_KEY));
+ SETMSG(HA_ERR_KEY_NOT_FOUND, ER_DEFAULT(ER_KEY_NOT_FOUND));
+ SETMSG(HA_ERR_FOUND_DUPP_KEY, ER_DEFAULT(ER_DUP_KEY));
SETMSG(HA_ERR_RECORD_CHANGED, "Update wich is recoverable");
SETMSG(HA_ERR_WRONG_INDEX, "Wrong index given to function");
- SETMSG(HA_ERR_CRASHED, ER(ER_NOT_KEYFILE));
- SETMSG(HA_ERR_WRONG_IN_RECORD, ER(ER_CRASHED_ON_USAGE));
+ SETMSG(HA_ERR_CRASHED, ER_DEFAULT(ER_NOT_KEYFILE));
+ SETMSG(HA_ERR_WRONG_IN_RECORD, ER_DEFAULT(ER_CRASHED_ON_USAGE));
SETMSG(HA_ERR_OUT_OF_MEM, "Table handler out of memory");
SETMSG(HA_ERR_NOT_A_TABLE, "Incorrect file format '%.64s'");
SETMSG(HA_ERR_WRONG_COMMAND, "Command not supported");
- SETMSG(HA_ERR_OLD_FILE, ER(ER_OLD_KEYFILE));
+ SETMSG(HA_ERR_OLD_FILE, ER_DEFAULT(ER_OLD_KEYFILE));
SETMSG(HA_ERR_NO_ACTIVE_RECORD, "No record read in update");
SETMSG(HA_ERR_RECORD_DELETED, "Intern record deleted");
- SETMSG(HA_ERR_RECORD_FILE_FULL, ER(ER_RECORD_FILE_FULL));
+ SETMSG(HA_ERR_RECORD_FILE_FULL, ER_DEFAULT(ER_RECORD_FILE_FULL));
SETMSG(HA_ERR_INDEX_FILE_FULL, "No more room in index file '%.64s'");
SETMSG(HA_ERR_END_OF_FILE, "End in next/prev/first/last");
- SETMSG(HA_ERR_UNSUPPORTED, ER(ER_ILLEGAL_HA));
+ SETMSG(HA_ERR_UNSUPPORTED, ER_DEFAULT(ER_ILLEGAL_HA));
SETMSG(HA_ERR_TO_BIG_ROW, "Too big row");
SETMSG(HA_WRONG_CREATE_OPTION, "Wrong create option");
- SETMSG(HA_ERR_FOUND_DUPP_UNIQUE, ER(ER_DUP_UNIQUE));
+ SETMSG(HA_ERR_FOUND_DUPP_UNIQUE, ER_DEFAULT(ER_DUP_UNIQUE));
SETMSG(HA_ERR_UNKNOWN_CHARSET, "Can't open charset");
- SETMSG(HA_ERR_WRONG_MRG_TABLE_DEF, ER(ER_WRONG_MRG_TABLE));
- SETMSG(HA_ERR_CRASHED_ON_REPAIR, ER(ER_CRASHED_ON_REPAIR));
- SETMSG(HA_ERR_CRASHED_ON_USAGE, ER(ER_CRASHED_ON_USAGE));
- SETMSG(HA_ERR_LOCK_WAIT_TIMEOUT, ER(ER_LOCK_WAIT_TIMEOUT));
- SETMSG(HA_ERR_LOCK_TABLE_FULL, ER(ER_LOCK_TABLE_FULL));
- SETMSG(HA_ERR_READ_ONLY_TRANSACTION, ER(ER_READ_ONLY_TRANSACTION));
- SETMSG(HA_ERR_LOCK_DEADLOCK, ER(ER_LOCK_DEADLOCK));
- SETMSG(HA_ERR_CANNOT_ADD_FOREIGN, ER(ER_CANNOT_ADD_FOREIGN));
- SETMSG(HA_ERR_NO_REFERENCED_ROW, ER(ER_NO_REFERENCED_ROW_2));
- SETMSG(HA_ERR_ROW_IS_REFERENCED, ER(ER_ROW_IS_REFERENCED_2));
+ SETMSG(HA_ERR_WRONG_MRG_TABLE_DEF, ER_DEFAULT(ER_WRONG_MRG_TABLE));
+ SETMSG(HA_ERR_CRASHED_ON_REPAIR, ER_DEFAULT(ER_CRASHED_ON_REPAIR));
+ SETMSG(HA_ERR_CRASHED_ON_USAGE, ER_DEFAULT(ER_CRASHED_ON_USAGE));
+ SETMSG(HA_ERR_LOCK_WAIT_TIMEOUT, ER_DEFAULT(ER_LOCK_WAIT_TIMEOUT));
+ SETMSG(HA_ERR_LOCK_TABLE_FULL, ER_DEFAULT(ER_LOCK_TABLE_FULL));
+ SETMSG(HA_ERR_READ_ONLY_TRANSACTION, ER_DEFAULT(ER_READ_ONLY_TRANSACTION));
+ SETMSG(HA_ERR_LOCK_DEADLOCK, ER_DEFAULT(ER_LOCK_DEADLOCK));
+ SETMSG(HA_ERR_CANNOT_ADD_FOREIGN, ER_DEFAULT(ER_CANNOT_ADD_FOREIGN));
+ SETMSG(HA_ERR_NO_REFERENCED_ROW, ER_DEFAULT(ER_NO_REFERENCED_ROW_2));
+ SETMSG(HA_ERR_ROW_IS_REFERENCED, ER_DEFAULT(ER_ROW_IS_REFERENCED_2));
SETMSG(HA_ERR_NO_SAVEPOINT, "No savepoint with that name");
SETMSG(HA_ERR_NON_UNIQUE_BLOCK_SIZE, "Non unique key block size");
SETMSG(HA_ERR_NO_SUCH_TABLE, "No such table: '%.64s'");
- SETMSG(HA_ERR_TABLE_EXIST, ER(ER_TABLE_EXISTS_ERROR));
+ SETMSG(HA_ERR_TABLE_EXIST, ER_DEFAULT(ER_TABLE_EXISTS_ERROR));
SETMSG(HA_ERR_NO_CONNECTION, "Could not connect to storage engine");
- SETMSG(HA_ERR_TABLE_DEF_CHANGED, ER(ER_TABLE_DEF_CHANGED));
+ SETMSG(HA_ERR_TABLE_DEF_CHANGED, ER_DEFAULT(ER_TABLE_DEF_CHANGED));
SETMSG(HA_ERR_FOREIGN_DUPLICATE_KEY, "FK constraint would lead to duplicate key");
- SETMSG(HA_ERR_TABLE_NEEDS_UPGRADE, ER(ER_TABLE_NEEDS_UPGRADE));
- SETMSG(HA_ERR_TABLE_READONLY, ER(ER_OPEN_AS_READONLY));
- SETMSG(HA_ERR_AUTOINC_READ_FAILED, ER(ER_AUTOINC_READ_FAILED));
- SETMSG(HA_ERR_AUTOINC_ERANGE, ER(ER_WARN_DATA_OUT_OF_RANGE));
- SETMSG(HA_ERR_TOO_MANY_CONCURRENT_TRXS, ER(ER_TOO_MANY_CONCURRENT_TRXS));
+ SETMSG(HA_ERR_TABLE_NEEDS_UPGRADE, ER_DEFAULT(ER_TABLE_NEEDS_UPGRADE));
+ SETMSG(HA_ERR_TABLE_READONLY, ER_DEFAULT(ER_OPEN_AS_READONLY));
+ SETMSG(HA_ERR_AUTOINC_READ_FAILED, ER_DEFAULT(ER_AUTOINC_READ_FAILED));
+ SETMSG(HA_ERR_AUTOINC_ERANGE, ER_DEFAULT(ER_WARN_DATA_OUT_OF_RANGE));
+ SETMSG(HA_ERR_TOO_MANY_CONCURRENT_TRXS, ER_DEFAULT(ER_TOO_MANY_CONCURRENT_TRXS));
/* Register the error messages for use with my_error(). */
- return my_error_register(errmsgs, HA_ERR_FIRST, HA_ERR_LAST);
+ return my_error_register(get_handler_errmsgs, HA_ERR_FIRST, HA_ERR_LAST);
}
@@ -414,7 +426,13 @@ int ha_finalize_handlerton(st_plugin_int *plugin)
reuse an array slot. Otherwise the number of uninstall/install
cycles would be limited.
*/
- hton2plugin[hton->slot]= NULL;
+ if (hton->slot != HA_SLOT_UNDEF)
+ {
+ /* Make sure we are not unpluging another plugin */
+ DBUG_ASSERT(hton2plugin[hton->slot] == plugin);
+ DBUG_ASSERT(hton->slot < MAX_HA);
+ hton2plugin[hton->slot]= NULL;
+ }
my_free((uchar*)hton, MYF(0));
@@ -431,6 +449,15 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
hton= (handlerton *)my_malloc(sizeof(handlerton),
MYF(MY_WME | MY_ZEROFILL));
+
+ if (hton == NULL)
+ {
+ sql_print_error("Unable to allocate memory for plugin '%s' handlerton.",
+ plugin->name.str);
+ goto err_no_hton_memory;
+ }
+
+ hton->slot= HA_SLOT_UNDEF;
/* Historical Requirement */
plugin->data= hton; // shortcut for the future
if (plugin->plugin->init && plugin->plugin->init(hton))
@@ -541,6 +568,7 @@ err_deinit:
err:
my_free((uchar*) hton, MYF(0));
+err_no_hton_memory:
plugin->data= NULL;
DBUG_RETURN(1);
}
@@ -1191,6 +1219,7 @@ int ha_commit_trans(THD *thd, bool all)
if (cookie)
tc_log->unlog(cookie, xid);
DBUG_EXECUTE_IF("crash_commit_after", abort(););
+ RUN_HOOK(transaction, after_commit, (thd, FALSE));
end:
if (rw_trans)
start_waiting_global_read_lock(thd);
@@ -1314,7 +1343,7 @@ int ha_rollback_trans(THD *thd, bool all)
trans->no_2pc=0;
if (is_real_trans && thd->transaction_rollback_request &&
thd->transaction.xid_state.xa_state != XA_NOTR)
- thd->transaction.xid_state.rm_error= thd->main_da.sql_errno();
+ thd->transaction.xid_state.rm_error= thd->stmt_da->sql_errno();
if (all)
thd->variables.tx_isolation=thd->session_tx_isolation;
}
@@ -1339,6 +1368,7 @@ int ha_rollback_trans(THD *thd, bool all)
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARNING_NOT_COMPLETE_ROLLBACK,
ER(ER_WARNING_NOT_COMPLETE_ROLLBACK));
+ RUN_HOOK(transaction, after_rollback, (thd, FALSE));
DBUG_RETURN(error);
}
@@ -1373,7 +1403,14 @@ int ha_autocommit_or_rollback(THD *thd, int error)
thd->variables.tx_isolation=thd->session_tx_isolation;
}
+ else
#endif
+ {
+ if (!error)
+ RUN_HOOK(transaction, after_commit, (thd, FALSE));
+ else
+ RUN_HOOK(transaction, after_rollback, (thd, FALSE));
+ }
DBUG_RETURN(error);
}
@@ -1534,7 +1571,7 @@ static my_bool xarecover_handlerton(THD *unused, plugin_ref plugin,
}
// recovery mode
if (info->commit_list ?
- hash_search(info->commit_list, (uchar *)&x, sizeof(x)) != 0 :
+ my_hash_search(info->commit_list, (uchar *)&x, sizeof(x)) != 0 :
tc_heuristic_recover == TC_HEURISTIC_RECOVER_COMMIT)
{
#ifndef DBUG_OFF
@@ -1645,12 +1682,12 @@ bool mysql_xa_recover(THD *thd)
field_list.push_back(new Item_int("bqual_length", 0, MY_INT32_NUM_DECIMAL_DIGITS));
field_list.push_back(new Item_empty_string("data",XIDDATASIZE));
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(1);
pthread_mutex_lock(&LOCK_xid_cache);
- while ((xs= (XID_STATE*)hash_element(&xid_cache, i++)))
+ while ((xs= (XID_STATE*) my_hash_element(&xid_cache, i++)))
{
if (xs->xa_state==XA_PREPARED)
{
@@ -1945,23 +1982,28 @@ const char *get_canonical_filename(handler *file, const char *path,
struct Ha_delete_table_error_handler: public Internal_error_handler
{
public:
- virtual bool handle_error(uint sql_errno,
- const char *message,
- MYSQL_ERROR::enum_warning_level level,
- THD *thd);
+ virtual bool handle_condition(THD *thd,
+ uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl);
char buff[MYSQL_ERRMSG_SIZE];
};
bool
Ha_delete_table_error_handler::
-handle_error(uint sql_errno,
- const char *message,
- MYSQL_ERROR::enum_warning_level level,
- THD *thd)
-{
+handle_condition(THD *,
+ uint,
+ const char*,
+ MYSQL_ERROR::enum_warning_level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl)
+{
+ *cond_hdl= NULL;
/* Grab the error message */
- strmake(buff, message, sizeof(buff)-1);
+ strmake(buff, msg, sizeof(buff)-1);
return TRUE;
}
@@ -2020,7 +2062,7 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path,
XXX: should we convert *all* errors to warnings here?
What if the error is fatal?
*/
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, error,
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, error,
ha_delete_table_error_handler.buff);
}
delete file;
@@ -2773,6 +2815,9 @@ void handler::print_error(int error, myf errflag)
case HA_ERR_TABLE_NEEDS_UPGRADE:
textno=ER_TABLE_NEEDS_UPGRADE;
break;
+ case HA_ERR_NO_PARTITION_FOUND:
+ textno=ER_WRONG_PARTITION_NAME;
+ break;
case HA_ERR_TABLE_READONLY:
textno= ER_OPEN_AS_READONLY;
break;
@@ -2965,9 +3010,9 @@ static bool update_frm_version(TABLE *table)
if ((result= my_pwrite(file,(uchar*) version,4,51L,MYF_RW)))
goto err;
- for (entry=(TABLE*) hash_first(&open_cache,(uchar*) key,key_length, &state);
+ for (entry=(TABLE*) my_hash_first(&open_cache,(uchar*) key,key_length, &state);
entry;
- entry= (TABLE*) hash_next(&open_cache,(uchar*) key,key_length, &state))
+ entry= (TABLE*) my_hash_next(&open_cache,(uchar*) key,key_length, &state))
entry->s->mysql_version= MYSQL_VERSION_ID;
}
err:
@@ -4423,7 +4468,7 @@ bool ha_show_status(THD *thd, handlerton *db_type, enum ha_stat_type stat)
field_list.push_back(new Item_empty_string("Name",FN_REFLEN));
field_list.push_back(new Item_empty_string("Status",10));
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
return TRUE;
diff --git a/sql/handler.h b/sql/handler.h
index 7fc2bf2fece..05a9e13653c 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -1,3 +1,6 @@
+#ifndef HANDLER_INCLUDED
+#define HANDLER_INCLUDED
+
/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc.
This program is free software; you can redistribute it and/or modify
@@ -214,6 +217,13 @@
#define MAX_HA 15
/*
+ Use this instead of 0 as the initial value for the slot number of
+ handlerton, so that we can distinguish uninitialized slot number
+ from slot 0.
+*/
+#define HA_SLOT_UNDEF ((uint)-1)
+
+/*
Parameters for open() (in register form->filestat)
HA_GET_INFO does an implicit HA_ABORT_IF_LOCKED
*/
@@ -504,9 +514,8 @@ class st_alter_tablespace : public Sql_alloc
/* The handler for a table type. Will be included in the TABLE structure */
-struct st_table;
-typedef struct st_table TABLE;
-typedef struct st_table_share TABLE_SHARE;
+struct TABLE;
+struct TABLE_SHARE;
struct st_foreign_key_info;
typedef struct st_foreign_key_info FOREIGN_KEY_INFO;
typedef bool (stat_print_fn)(THD *thd, const char *type, uint type_len,
@@ -581,6 +590,7 @@ struct handler_iterator {
void *buffer;
};
+class handler;
/*
handlerton is a singleton structure - one instance per storage engine -
to provide access to storage engine functionality that works on the
@@ -1084,8 +1094,8 @@ class handler :public Sql_alloc
public:
typedef ulonglong Table_flags;
protected:
- struct st_table_share *table_share; /* The table definition */
- struct st_table *table; /* The current open table */
+ TABLE_SHARE *table_share; /* The table definition */
+ TABLE *table; /* The current open table */
Table_flags cached_table_flags; /* Set on init() and open() */
ha_rows estimation_rows_to_insert;
@@ -1161,7 +1171,7 @@ public:
virtual ~handler(void)
{
DBUG_ASSERT(locked == FALSE);
- /* TODO: DBUG_ASSERT(inited == NONE); */
+ DBUG_ASSERT(inited == NONE);
}
virtual handler *clone(MEM_ROOT *mem_root);
/** This is called after create to allow us to set up cached variables */
@@ -2072,3 +2082,4 @@ int ha_binlog_end(THD *thd);
#define ha_binlog_wait(a) do {} while (0)
#define ha_binlog_end(a) do {} while (0)
#endif
+#endif /* HANDLER_INCLUDED */
diff --git a/sql/hash_filo.h b/sql/hash_filo.h
index ab13d338695..5d17b880b4d 100644
--- a/sql/hash_filo.h
+++ b/sql/hash_filo.h
@@ -38,8 +38,8 @@ class hash_filo_element
class hash_filo
{
const uint size, key_offset, key_length;
- const hash_get_key get_key;
- hash_free_key free_element;
+ const my_hash_get_key get_key;
+ my_hash_free_key free_element;
bool init;
CHARSET_INFO *hash_charset;
@@ -49,7 +49,7 @@ public:
HASH cache;
hash_filo(uint size_arg, uint key_offset_arg , uint key_length_arg,
- hash_get_key get_key_arg, hash_free_key free_element_arg,
+ my_hash_get_key get_key_arg, my_hash_free_key free_element_arg,
CHARSET_INFO *hash_charset_arg)
:size(size_arg), key_offset(key_offset_arg), key_length(key_length_arg),
get_key(get_key_arg), free_element(free_element_arg),init(0),
@@ -63,7 +63,7 @@ public:
if (init)
{
if (cache.array.buffer) /* Avoid problems with thread library */
- (void) hash_free(&cache);
+ (void) my_hash_free(&cache);
pthread_mutex_destroy(&lock);
}
}
@@ -76,8 +76,8 @@ public:
}
if (!locked)
(void) pthread_mutex_lock(&lock);
- (void) hash_free(&cache);
- (void) hash_init(&cache,hash_charset,size,key_offset,
+ (void) my_hash_free(&cache);
+ (void) my_hash_init(&cache,hash_charset,size,key_offset,
key_length, get_key, free_element,0);
if (!locked)
(void) pthread_mutex_unlock(&lock);
@@ -87,7 +87,7 @@ public:
hash_filo_element *search(uchar* key, size_t length)
{
hash_filo_element *entry=(hash_filo_element*)
- hash_search(&cache,(uchar*) key,length);
+ my_hash_search(&cache,(uchar*) key,length);
if (entry)
{ // Found; link it first
if (entry != first_link)
@@ -113,7 +113,7 @@ public:
{
hash_filo_element *tmp=last_link;
last_link=last_link->prev_used;
- hash_delete(&cache,(uchar*) tmp);
+ my_hash_delete(&cache,(uchar*) tmp);
}
if (my_hash_insert(&cache,(uchar*) entry))
{
diff --git a/sql/hostname.cc b/sql/hostname.cc
index c8cf46383a9..45b10d16ce2 100644
--- a/sql/hostname.cc
+++ b/sql/hostname.cc
@@ -64,7 +64,7 @@ bool hostname_cache_init()
uint offset= (uint) ((char*) (&tmp.ip) - (char*) &tmp);
if (!(hostname_cache=new hash_filo(HOST_CACHE_SIZE, offset,
sizeof(struct in_addr),NULL,
- (hash_free_key) free,
+ (my_hash_free_key) free,
&my_charset_bin)))
return 1;
hostname_cache->clear();
diff --git a/sql/item.cc b/sql/item.cc
index b35a6ae3d6e..ba920497ec1 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -264,10 +264,11 @@ my_decimal *Item::val_decimal_from_string(my_decimal *decimal_value)
res->ptr(), res->length(), res->charset(),
decimal_value) & E_DEC_BAD_NUM)
{
+ ErrConvString err(res);
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE), "DECIMAL",
- str_value.c_ptr());
+ err.ptr());
}
return decimal_value;
}
@@ -2447,6 +2448,7 @@ double_from_string_with_check (CHARSET_INFO *cs, const char *cptr, char *end)
tmp= my_strntod(cs, (char*) cptr, end - cptr, &end, &error);
if (error || (end != org_end && !check_if_only_end_space(cs, end, org_end)))
{
+ ErrConvString err(cptr, cs);
/*
We can use str_value.ptr() here as Item_string is gurantee to put an
end \0 here.
@@ -2454,7 +2456,7 @@ double_from_string_with_check (CHARSET_INFO *cs, const char *cptr, char *end)
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE), "DOUBLE",
- cptr);
+ err.ptr());
}
return tmp;
}
@@ -2484,10 +2486,11 @@ longlong_from_string_with_check (CHARSET_INFO *cs, const char *cptr, char *end)
(err > 0 ||
(end != org_end && !check_if_only_end_space(cs, end, org_end))))
{
+ ErrConvString err(cptr, cs);
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE), "INTEGER",
- cptr);
+ err.ptr());
}
return tmp;
}
@@ -2574,7 +2577,8 @@ Item_param::Item_param(uint pos_in_query_arg) :
param_type(MYSQL_TYPE_VARCHAR),
pos_in_query(pos_in_query_arg),
set_param_func(default_set_param_func),
- limit_clause_param(FALSE)
+ limit_clause_param(FALSE),
+ m_out_param_info(NULL)
{
name= (char*) "?";
/*
@@ -2656,6 +2660,17 @@ void Item_param::set_decimal(const char *str, ulong length)
DBUG_VOID_RETURN;
}
+void Item_param::set_decimal(const my_decimal *dv)
+{
+ state= DECIMAL_VALUE;
+
+ my_decimal2decimal(dv, &decimal_value);
+
+ decimals= (uint8) decimal_value.frac;
+ unsigned_flag= !decimal_value.sign();
+ max_length= my_decimal_precision_to_length(decimal_value.intg + decimals,
+ decimals, unsigned_flag);
+}
/**
Set parameter value from MYSQL_TIME value.
@@ -3278,6 +3293,158 @@ Item_param::set_param_type_and_swap_value(Item_param *src)
str_value_ptr.swap(src->str_value_ptr);
}
+
+/**
+ This operation is intended to store some item value in Item_param to be
+ used later.
+
+ @param thd thread context
+ @param ctx stored procedure runtime context
+ @param it a pointer to an item in the tree
+
+ @return Error status
+ @retval TRUE on error
+ @retval FALSE on success
+*/
+
+bool
+Item_param::set_value(THD *thd, sp_rcontext *ctx, Item **it)
+{
+ Item *value= *it;
+
+ if (value->is_null())
+ {
+ set_null();
+ return FALSE;
+ }
+
+ null_value= FALSE;
+
+ switch (value->result_type()) {
+ case STRING_RESULT:
+ {
+ char str_buffer[STRING_BUFFER_USUAL_SIZE];
+ String sv_buffer(str_buffer, sizeof(str_buffer), &my_charset_bin);
+ String *sv= value->val_str(&sv_buffer);
+
+ if (!sv)
+ return TRUE;
+
+ set_str(sv->c_ptr_safe(), sv->length());
+ str_value_ptr.set(str_value.ptr(),
+ str_value.length(),
+ str_value.charset());
+ collation.set(str_value.charset(), DERIVATION_COERCIBLE);
+ decimals= 0;
+ param_type= MYSQL_TYPE_STRING;
+
+ break;
+ }
+
+ case REAL_RESULT:
+ set_double(value->val_real());
+ param_type= MYSQL_TYPE_DOUBLE;
+ break;
+
+ case INT_RESULT:
+ set_int(value->val_int(), value->max_length);
+ param_type= MYSQL_TYPE_LONG;
+ break;
+
+ case DECIMAL_RESULT:
+ {
+ my_decimal dv_buf;
+ my_decimal *dv= value->val_decimal(&dv_buf);
+
+ if (!dv)
+ return TRUE;
+
+ set_decimal(dv);
+ param_type= MYSQL_TYPE_NEWDECIMAL;
+
+ break;
+ }
+
+ default:
+ /* That can not happen. */
+
+ DBUG_ASSERT(TRUE); // Abort in debug mode.
+
+ set_null(); // Set to NULL in release mode.
+ return FALSE;
+ }
+
+ item_result_type= value->result_type();
+ item_type= value->type();
+ return FALSE;
+}
+
+
+/**
+ Setter of Item_param::m_out_param_info.
+
+ m_out_param_info is used to store information about store routine
+ OUT-parameters, such as stored routine name, database, stored routine
+ variable name. It is supposed to be set in sp_head::execute() after
+ Item_param::set_value() is called.
+*/
+
+void
+Item_param::set_out_param_info(Send_field *info)
+{
+ m_out_param_info= info;
+}
+
+
+/**
+ Getter of Item_param::m_out_param_info.
+
+ m_out_param_info is used to store information about store routine
+ OUT-parameters, such as stored routine name, database, stored routine
+ variable name. It is supposed to be retrieved in
+ Protocol_binary::send_out_parameters() during creation of OUT-parameter
+ result set.
+*/
+
+const Send_field *
+Item_param::get_out_param_info() const
+{
+ return m_out_param_info;
+}
+
+
+/**
+ Fill meta-data information for the corresponding column in a result set.
+ If this is an OUT-parameter of a stored procedure, preserve meta-data of
+ stored-routine variable.
+
+ @param field container for meta-data to be filled
+*/
+
+void Item_param::make_field(Send_field *field)
+{
+ Item::make_field(field);
+
+ if (!m_out_param_info)
+ return;
+
+ /*
+ This is an OUT-parameter of stored procedure. We should use
+ OUT-parameter info to fill out the names.
+ */
+
+ field->db_name= m_out_param_info->db_name;
+ field->table_name= m_out_param_info->table_name;
+ field->org_table_name= m_out_param_info->org_table_name;
+ field->col_name= m_out_param_info->col_name;
+ field->org_col_name= m_out_param_info->org_col_name;
+ field->length= m_out_param_info->length;
+ field->charsetnr= m_out_param_info->charsetnr;
+ field->flags= m_out_param_info->flags;
+ field->decimals= m_out_param_info->decimals;
+ field->type= m_out_param_info->type;
+}
+
/****************************************************************************
Item_copy
****************************************************************************/
@@ -3509,7 +3676,7 @@ void Item_copy_decimal::copy()
/*
- Functions to convert item to field (for send_fields)
+ Functions to convert item to field (for send_result_set_metadata)
*/
/* ARGSUSED */
@@ -4773,7 +4940,6 @@ String *Item::check_well_formed_result(String *str, bool send_error)
{
THD *thd= current_thd;
char hexbuf[7];
- enum MYSQL_ERROR::enum_warning_level level;
uint diff= str->length() - wlen;
set_if_smaller(diff, 3);
octet2hex(hexbuf, str->ptr() + wlen, diff);
@@ -4786,16 +4952,14 @@ String *Item::check_well_formed_result(String *str, bool send_error)
if ((thd->variables.sql_mode &
(MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES)))
{
- level= MYSQL_ERROR::WARN_LEVEL_ERROR;
null_value= 1;
str= 0;
}
else
{
- level= MYSQL_ERROR::WARN_LEVEL_WARN;
str->length(wlen);
}
- push_warning_printf(thd, level, ER_INVALID_CHARACTER_STRING,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_INVALID_CHARACTER_STRING,
ER(ER_INVALID_CHARACTER_STRING), cs->csname, hexbuf);
}
return str;
diff --git a/sql/item.h b/sql/item.h
index 2d429ca6cf3..24d800300e7 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -1,3 +1,6 @@
+#ifndef ITEM_INCLUDED
+#define ITEM_INCLUDED
+
/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc.
This program is free software; you can redistribute it and/or modify
@@ -450,6 +453,11 @@ public:
TRUE if error has occured.
*/
virtual bool set_value(THD *thd, sp_rcontext *ctx, Item **it)= 0;
+
+ virtual void set_out_param_info(Send_field *info) {}
+
+ virtual const Send_field *get_out_param_info() const
+ { return NULL; }
};
@@ -1563,7 +1571,8 @@ public:
/* Item represents one placeholder ('?') of prepared statement */
-class Item_param :public Item
+class Item_param :public Item,
+ private Settable_routine_parameter
{
char cnvbuf[MAX_FIELD_WIDTH];
String cnvstr;
@@ -1651,6 +1660,7 @@ public:
void set_int(longlong i, uint32 max_length_arg);
void set_double(double i);
void set_decimal(const char *str, ulong length);
+ void set_decimal(const my_decimal *dv);
bool set_str(const char *str, ulong length);
bool set_longdata(const char *str, ulong length);
void set_time(MYSQL_TIME *tm, timestamp_type type, uint32 max_length_arg);
@@ -1700,6 +1710,25 @@ public:
/** Item is a argument to a limit clause. */
bool limit_clause_param;
void set_param_type_and_swap_value(Item_param *from);
+
+private:
+ virtual inline Settable_routine_parameter *
+ get_settable_routine_parameter()
+ {
+ return this;
+ }
+
+ virtual bool set_value(THD *thd, sp_rcontext *ctx, Item **it);
+
+ virtual void set_out_param_info(Send_field *info);
+
+public:
+ virtual const Send_field *get_out_param_info() const;
+
+ virtual void make_field(Send_field *field);
+
+private:
+ Send_field *m_out_param_info;
};
@@ -2057,7 +2086,7 @@ public:
/**
Item_empty_string -- is a utility class to put an item into List<Item>
- which is then used in protocol.send_fields() when sending SHOW output to
+ which is then used in protocol.send_result_set_metadata() when sending SHOW output to
the client.
*/
@@ -3162,3 +3191,5 @@ extern Cached_item *new_Cached_item(THD *thd, Item *item);
extern Item_result item_cmp_type(Item_result a,Item_result b);
extern void resolve_const_item(THD *thd, Item **ref, Item *cmp_item);
extern int stored_field_cmp_to_item(THD *thd, Field *field, Item *item);
+
+#endif /* ITEM_INCLUDED */
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 52af6a31c0c..33f1e9b6950 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -1,3 +1,6 @@
+#ifndef ITEM_CMPFUNC_INCLUDED
+#define ITEM_CMPFUNC_INCLUDED
+
/* Copyright (C) 2000-2003 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -1741,5 +1744,7 @@ inline Item *and_conds(Item *a, Item *b)
Item *and_expressions(Item *a, Item *b, Item **org_item);
-bool get_mysql_time_from_str(THD *thd, String *str, timestamp_type warn_type,
+bool get_mysql_time_from_str(THD *thd, String *str, timestamp_type warn_type,
const char *warn_name, MYSQL_TIME *l_time);
+
+#endif /* ITEM_CMPFUNC_INCLUDED */
diff --git a/sql/item_create.cc b/sql/item_create.cc
index 53aa8081da1..c00b5ec1701 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -927,10 +927,10 @@ protected:
};
-class Create_func_format : public Create_func_arg2
+class Create_func_format : public Create_native_func
{
public:
- virtual Item *create(THD *thd, Item *arg1, Item *arg2);
+ virtual Item *create_native(THD *thd, LEX_STRING name, List<Item> *item_list);
static Create_func_format s_singleton;
@@ -2052,6 +2052,18 @@ protected:
virtual ~Create_func_to_days() {}
};
+class Create_func_to_seconds : public Create_func_arg1
+{
+public:
+ virtual Item* create(THD *thd, Item *arg1);
+
+ static Create_func_to_seconds s_singleton;
+
+protected:
+ Create_func_to_seconds() {}
+ virtual ~Create_func_to_seconds() {}
+};
+
#ifdef HAVE_SPATIAL
class Create_func_touches : public Create_func_arg2
@@ -3352,9 +3364,34 @@ Create_func_floor::create(THD *thd, Item *arg1)
Create_func_format Create_func_format::s_singleton;
Item*
-Create_func_format::create(THD *thd, Item *arg1, Item *arg2)
+Create_func_format::create_native(THD *thd, LEX_STRING name,
+ List<Item> *item_list)
{
- return new (thd->mem_root) Item_func_format(arg1, arg2);
+ Item *func= NULL;
+ int arg_count= item_list ? item_list->elements : 0;
+
+ switch (arg_count) {
+ case 2:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ func= new (thd->mem_root) Item_func_format(param_1, param_2);
+ break;
+ }
+ case 3:
+ {
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
+ Item *param_3= item_list->pop();
+ func= new (thd->mem_root) Item_func_format(param_1, param_2, param_3);
+ break;
+ }
+ default:
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ break;
+ }
+
+ return func;
}
@@ -4487,6 +4524,15 @@ Create_func_to_days::create(THD *thd, Item *arg1)
}
+Create_func_to_seconds Create_func_to_seconds::s_singleton;
+
+Item*
+Create_func_to_seconds::create(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_to_seconds(arg1);
+}
+
+
#ifdef HAVE_SPATIAL
Create_func_touches Create_func_touches::s_singleton;
@@ -4925,6 +4971,7 @@ static Native_func_registry func_array[] =
{ { C_STRING_WITH_LEN("TIME_TO_SEC") }, BUILDER(Create_func_time_to_sec)},
{ { C_STRING_WITH_LEN("TOUCHES") }, GEOM_BUILDER(Create_func_touches)},
{ { C_STRING_WITH_LEN("TO_DAYS") }, BUILDER(Create_func_to_days)},
+ { { C_STRING_WITH_LEN("TO_SECONDS") }, BUILDER(Create_func_to_seconds)},
{ { C_STRING_WITH_LEN("UCASE") }, BUILDER(Create_func_ucase)},
{ { C_STRING_WITH_LEN("UNCOMPRESS") }, BUILDER(Create_func_uncompress)},
{ { C_STRING_WITH_LEN("UNCOMPRESSED_LENGTH") }, BUILDER(Create_func_uncompressed_length)},
@@ -4968,14 +5015,14 @@ int item_create_init()
DBUG_ENTER("item_create_init");
- if (hash_init(& native_functions_hash,
- system_charset_info,
- array_elements(func_array),
- 0,
- 0,
- (hash_get_key) get_native_fct_hash_key,
- NULL, /* Nothing to free */
- MYF(0)))
+ if (my_hash_init(& native_functions_hash,
+ system_charset_info,
+ array_elements(func_array),
+ 0,
+ 0,
+ (my_hash_get_key) get_native_fct_hash_key,
+ NULL, /* Nothing to free */
+ MYF(0)))
DBUG_RETURN(1);
for (func= func_array; func->builder != NULL; func++)
@@ -4987,7 +5034,7 @@ int item_create_init()
#ifndef DBUG_OFF
for (uint i=0 ; i < native_functions_hash.records ; i++)
{
- func= (Native_func_registry*) hash_element(& native_functions_hash, i);
+ func= (Native_func_registry*) my_hash_element(& native_functions_hash, i);
DBUG_PRINT("info", ("native function: %s length: %u",
func->name.str, (uint) func->name.length));
}
@@ -5005,7 +5052,7 @@ int item_create_init()
void item_create_cleanup()
{
DBUG_ENTER("item_create_cleanup");
- hash_free(& native_functions_hash);
+ my_hash_free(& native_functions_hash);
DBUG_VOID_RETURN;
}
@@ -5016,9 +5063,9 @@ find_native_function_builder(THD *thd, LEX_STRING name)
Create_func *builder= NULL;
/* Thread safe */
- func= (Native_func_registry*) hash_search(& native_functions_hash,
- (uchar*) name.str,
- name.length);
+ func= (Native_func_registry*) my_hash_search(& native_functions_hash,
+ (uchar*) name.str,
+ name.length);
if (func)
{
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 977a0de39af..49b770c4af0 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -598,7 +598,7 @@ void Item_func::signal_divide_by_null()
{
THD *thd= current_thd;
if (thd->variables.sql_mode & MODE_ERROR_FOR_DIVISION_BY_ZERO)
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DIVISION_BY_ZERO,
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_DIVISION_BY_ZERO,
ER(ER_DIVISION_BY_ZERO));
null_value= 1;
}
@@ -1053,7 +1053,7 @@ my_decimal *Item_decimal_typecast::val_decimal(my_decimal *dec)
return dec;
err:
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARN_DATA_OUT_OF_RANGE,
ER(ER_WARN_DATA_OUT_OF_RANGE),
name, 1);
@@ -3283,7 +3283,7 @@ public:
{
if (key)
{
- hash_delete(&hash_user_locks,(uchar*) this);
+ my_hash_delete(&hash_user_locks,(uchar*) this);
my_free(key, MYF(0));
}
pthread_cond_destroy(&cond);
@@ -3307,8 +3307,8 @@ static bool item_user_lock_inited= 0;
void item_user_lock_init(void)
{
pthread_mutex_init(&LOCK_user_locks,MY_MUTEX_INIT_SLOW);
- hash_init(&hash_user_locks,system_charset_info,
- 16,0,0,(hash_get_key) ull_get_key,NULL,0);
+ my_hash_init(&hash_user_locks,system_charset_info,
+ 16,0,0,(my_hash_get_key) ull_get_key,NULL,0);
item_user_lock_inited= 1;
}
@@ -3317,7 +3317,7 @@ void item_user_lock_free(void)
if (item_user_lock_inited)
{
item_user_lock_inited= 0;
- hash_free(&hash_user_locks);
+ my_hash_free(&hash_user_locks);
pthread_mutex_destroy(&LOCK_user_locks);
}
}
@@ -3384,9 +3384,9 @@ void debug_sync_point(const char* lock_name, uint lock_timeout)
this case, we will not be waiting, but rather, just waste CPU and
memory on the whole deal
*/
- if (!(ull= ((User_level_lock*) hash_search(&hash_user_locks,
- (uchar*) lock_name,
- lock_name_len))))
+ if (!(ull= ((User_level_lock*) my_hash_search(&hash_user_locks,
+ (uchar*) lock_name,
+ lock_name_len))))
{
pthread_mutex_unlock(&LOCK_user_locks);
return;
@@ -3487,9 +3487,9 @@ longlong Item_func_get_lock::val_int()
thd->ull=0;
}
- if (!(ull= ((User_level_lock *) hash_search(&hash_user_locks,
- (uchar*) res->ptr(),
- (size_t) res->length()))))
+ if (!(ull= ((User_level_lock *) my_hash_search(&hash_user_locks,
+ (uchar*) res->ptr(),
+ (size_t) res->length()))))
{
ull= new User_level_lock((uchar*) res->ptr(), (size_t) res->length(),
thd->thread_id);
@@ -3591,9 +3591,9 @@ longlong Item_func_release_lock::val_int()
result=0;
pthread_mutex_lock(&LOCK_user_locks);
- if (!(ull= ((User_level_lock*) hash_search(&hash_user_locks,
- (const uchar*) res->ptr(),
- (size_t) res->length()))))
+ if (!(ull= ((User_level_lock*) my_hash_search(&hash_user_locks,
+ (const uchar*) res->ptr(),
+ (size_t) res->length()))))
{
null_value=1;
}
@@ -3666,7 +3666,7 @@ longlong Item_func_benchmark::val_int()
{
char buff[22];
llstr(((longlong) loop_count), buff);
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WRONG_VALUE_FOR_TYPE, ER(ER_WRONG_VALUE_FOR_TYPE),
"count", buff, "benchmark");
}
@@ -3773,12 +3773,12 @@ static user_var_entry *get_variable(HASH *hash, LEX_STRING &name,
{
user_var_entry *entry;
- if (!(entry = (user_var_entry*) hash_search(hash, (uchar*) name.str,
- name.length)) &&
+ if (!(entry = (user_var_entry*) my_hash_search(hash, (uchar*) name.str,
+ name.length)) &&
create_if_not_exists)
{
uint size=ALIGN_SIZE(sizeof(user_var_entry))+name.length+1+extra_size;
- if (!hash_inited(hash))
+ if (!my_hash_inited(hash))
return 0;
if (!(entry = (user_var_entry*) my_malloc(size,MYF(MY_WME))))
return 0;
@@ -5694,8 +5694,8 @@ longlong Item_func_is_free_lock::val_int()
}
pthread_mutex_lock(&LOCK_user_locks);
- ull= (User_level_lock *) hash_search(&hash_user_locks, (uchar*) res->ptr(),
- (size_t) res->length());
+ ull= (User_level_lock *) my_hash_search(&hash_user_locks, (uchar*) res->ptr(),
+ (size_t) res->length());
pthread_mutex_unlock(&LOCK_user_locks);
if (!ull || !ull->locked)
return 1;
@@ -5713,8 +5713,8 @@ longlong Item_func_is_used_lock::val_int()
return 0;
pthread_mutex_lock(&LOCK_user_locks);
- ull= (User_level_lock *) hash_search(&hash_user_locks, (uchar*) res->ptr(),
- (size_t) res->length());
+ ull= (User_level_lock *) my_hash_search(&hash_user_locks, (uchar*) res->ptr(),
+ (size_t) res->length());
pthread_mutex_unlock(&LOCK_user_locks);
if (!ull || !ull->locked)
return 0;
@@ -5795,12 +5795,12 @@ Item_func_sp::func_name() const
}
-int my_missing_function_error(const LEX_STRING &token, const char *func_name)
+void my_missing_function_error(const LEX_STRING &token, const char *func_name)
{
if (token.length && is_lex_native_function (&token))
- return my_error(ER_FUNC_INEXISTENT_NAME_COLLISION, MYF(0), func_name);
+ my_error(ER_FUNC_INEXISTENT_NAME_COLLISION, MYF(0), func_name);
else
- return my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", func_name);
+ my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", func_name);
}
diff --git a/sql/item_func.h b/sql/item_func.h
index 1aae0a5abb5..f22bc0c301c 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -1,3 +1,6 @@
+#ifndef ITEM_FUNC_INCLUDED
+#define ITEM_FUNC_INCLUDED
+
/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc.
This program is free software; you can redistribute it and/or modify
@@ -1706,3 +1709,4 @@ public:
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
};
+#endif /* ITEM_FUNC_INCLUDED */
diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h
index edbe104e307..9a55ea7d5b1 100644
--- a/sql/item_geofunc.h
+++ b/sql/item_geofunc.h
@@ -1,3 +1,6 @@
+#ifndef ITEM_GEOFUNC_INCLUDED
+#define ITEM_GEOFUNC_INCLUDED
+
/* Copyright (C) 2000-2003 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -386,3 +389,4 @@ public:
#endif
+#endif /* ITEM_GEOFUNC_INCLUDED */
diff --git a/sql/item_row.h b/sql/item_row.h
index 67441f49603..7c08c5888e0 100644
--- a/sql/item_row.h
+++ b/sql/item_row.h
@@ -1,3 +1,6 @@
+#ifndef ITEM_ROW_INCLUDED
+#define ITEM_ROW_INCLUDED
+
/* Copyright (C) 2000 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -77,3 +80,5 @@ public:
bool null_inside() { return with_null; };
void bring_value();
};
+
+#endif /* ITEM_ROW_INCLUDED */
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 183a628f8e4..86a892a9dcc 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -528,11 +528,11 @@ String *Item_func_des_encrypt::val_str(String *str)
return &tmp_value;
error:
- push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN,
code, ER(code),
"des_encrypt");
#else
- push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN,
ER_FEATURE_DISABLED, ER(ER_FEATURE_DISABLED),
"des_encrypt","--with-openssl");
#endif /* HAVE_OPENSSL */
@@ -605,12 +605,12 @@ String *Item_func_des_decrypt::val_str(String *str)
return &tmp_value;
error:
- push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN,
code, ER(code),
"des_decrypt");
wrong_key:
#else
- push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN,
ER_FEATURE_DISABLED, ER(ER_FEATURE_DISABLED),
"des_decrypt","--with-openssl");
#endif /* HAVE_OPENSSL */
@@ -2041,9 +2041,22 @@ String *Item_func_soundex::val_str(String *str)
const int FORMAT_MAX_DECIMALS= 30;
-Item_func_format::Item_func_format(Item *org, Item *dec)
-: Item_str_func(org, dec)
+
+MY_LOCALE *Item_func_format::get_locale(Item *item)
{
+ DBUG_ASSERT(arg_count == 3);
+ String tmp, *locale_name= args[2]->val_str(&tmp);
+ MY_LOCALE *lc;
+ if (!locale_name ||
+ !(lc= my_locale_by_name(locale_name->c_ptr_safe())))
+ {
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_UNKNOWN_LOCALE,
+ ER(ER_UNKNOWN_LOCALE),
+ locale_name ? locale_name->c_ptr_safe() : "NULL");
+ lc= &my_locale_en_US;
+ }
+ return lc;
}
void Item_func_format::fix_length_and_dec()
@@ -2053,6 +2066,10 @@ void Item_func_format::fix_length_and_dec()
collation.set(default_charset());
max_length= (char_length + max_sep_count + decimals) *
collation.collation->mbmaxlen;
+ if (arg_count == 3)
+ locale= args[2]->basic_const_item() ? get_locale(args[2]) : NULL;
+ else
+ locale= &my_locale_en_US; /* Two arguments */
}
@@ -2064,13 +2081,12 @@ void Item_func_format::fix_length_and_dec()
String *Item_func_format::val_str(String *str)
{
- uint32 length;
uint32 str_length;
/* Number of decimal digits */
int dec;
/* Number of characters used to represent the decimals, including '.' */
uint32 dec_length;
- int diff;
+ MY_LOCALE *lc;
DBUG_ASSERT(fixed == 1);
dec= (int) args[1]->val_int();
@@ -2080,6 +2096,8 @@ String *Item_func_format::val_str(String *str)
return NULL;
}
+ lc= locale ? locale : get_locale(args[2]);
+
dec= set_zone(dec, 0, FORMAT_MAX_DECIMALS);
dec_length= dec ? dec+1 : 0;
null_value=0;
@@ -2094,8 +2112,6 @@ String *Item_func_format::val_str(String *str)
my_decimal_round(E_DEC_FATAL_ERROR, res, dec, false, &rnd_dec);
my_decimal2string(E_DEC_FATAL_ERROR, &rnd_dec, 0, 0, 0, str);
str_length= str->length();
- if (rnd_dec.sign())
- str_length--;
}
else
{
@@ -2108,31 +2124,51 @@ String *Item_func_format::val_str(String *str)
if (isnan(nr))
return str;
str_length=str->length();
- if (nr < 0)
- str_length--; // Don't count sign
- }
- /* We need this test to handle 'nan' values */
- if (str_length >= dec_length+4)
- {
- char *tmp,*pos;
- length= str->length()+(diff=((int)(str_length- dec_length-1))/3);
- str= copy_if_not_alloced(&tmp_str,str,length);
- str->length(length);
- tmp= (char*) str->ptr()+length - dec_length-1;
- for (pos= (char*) str->ptr()+length-1; pos != tmp; pos--)
- pos[0]= pos[-diff];
- while (diff)
+ }
+ /* We need this test to handle 'nan' and short values */
+ if (lc->grouping[0] > 0 &&
+ str_length >= dec_length + 1 + lc->grouping[0])
+ {
+ char buf[DECIMAL_MAX_STR_LENGTH * 2]; /* 2 - in the worst case when grouping=1 */
+ int count;
+ const char *grouping= lc->grouping;
+ char sign_length= *str->ptr() == '-' ? 1 : 0;
+ const char *src= str->ptr() + str_length - dec_length - 1;
+ const char *src_begin= str->ptr() + sign_length;
+ char *dst= buf + sizeof(buf);
+
+ /* Put the fractional part */
+ if (dec)
+ {
+ dst-= (dec + 1);
+ *dst= lc->decimal_point;
+ memcpy(dst + 1, src + 2, dec);
+ }
+
+ /* Put the integer part with grouping */
+ for (count= *grouping; src >= src_begin; count--)
{
- *pos= *(pos - diff);
- pos--;
- *pos= *(pos - diff);
- pos--;
- *pos= *(pos - diff);
- pos--;
- pos[0]=',';
- pos--;
- diff--;
+ /*
+ When *grouping==0x80 (which means "end of grouping")
+ count will be initialized to -1 and
+ we'll never get into this "if" anymore.
+ */
+ if (!count)
+ {
+ *--dst= lc->thousand_sep;
+ if (grouping[1])
+ grouping++;
+ count= *grouping;
+ }
+ DBUG_ASSERT(dst > buf);
+ *--dst= *src--;
}
+
+ if (sign_length) /* Put '-' */
+ *--dst= *str->ptr();
+
+ /* Put the rest of the integer part without grouping */
+ str->copy(dst, buf + sizeof(buf) - dst, &my_charset_latin1);
}
return str;
}
@@ -3238,7 +3274,7 @@ longlong Item_func_uncompressed_length::val_int()
*/
if (res->length() <= 4)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ZLIB_Z_DATA_ERROR,
ER(ER_ZLIB_Z_DATA_ERROR));
null_value= 1;
@@ -3315,7 +3351,7 @@ String *Item_func_compress::val_str(String *str)
(const Bytef*)res->ptr(), res->length())) != Z_OK)
{
code= err==Z_MEM_ERROR ? ER_ZLIB_Z_MEM_ERROR : ER_ZLIB_Z_BUF_ERROR;
- push_warning(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR,code,ER(code));
+ push_warning(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN,code,ER(code));
null_value= 1;
return 0;
}
@@ -3353,7 +3389,7 @@ String *Item_func_uncompress::val_str(String *str)
/* If length is less than 4 bytes, data is corrupt */
if (res->length() <= 4)
{
- push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ZLIB_Z_DATA_ERROR,
ER(ER_ZLIB_Z_DATA_ERROR));
goto err;
@@ -3363,7 +3399,7 @@ String *Item_func_uncompress::val_str(String *str)
new_size= uint4korr(res->ptr()) & 0x3FFFFFFF;
if (new_size > current_thd->variables.max_allowed_packet)
{
- push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TOO_BIG_FOR_UNCOMPRESS,
ER(ER_TOO_BIG_FOR_UNCOMPRESS),
current_thd->variables.max_allowed_packet);
@@ -3381,7 +3417,7 @@ String *Item_func_uncompress::val_str(String *str)
code= ((err == Z_BUF_ERROR) ? ER_ZLIB_Z_BUF_ERROR :
((err == Z_MEM_ERROR) ? ER_ZLIB_Z_MEM_ERROR : ER_ZLIB_Z_DATA_ERROR));
- push_warning(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR,code,ER(code));
+ push_warning(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN,code,ER(code));
err:
null_value= 1;
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index 2cdb45100ae..2233679e40c 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -1,3 +1,6 @@
+#ifndef ITEM_STRFUNC_INCLUDED
+#define ITEM_STRFUNC_INCLUDED
+
/* Copyright (C) 2000-2003 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -498,8 +501,13 @@ public:
class Item_func_format :public Item_str_func
{
String tmp_str;
+ MY_LOCALE *locale;
public:
- Item_func_format(Item *org, Item *dec);
+ Item_func_format(Item *org, Item *dec): Item_str_func(org, dec) {}
+ Item_func_format(Item *org, Item *dec, Item *lang):
+ Item_str_func(org, dec, lang) {}
+
+ MY_LOCALE *get_locale(Item *item);
String *val_str(String *);
void fix_length_and_dec();
const char *func_name() const { return "format"; }
@@ -842,3 +850,4 @@ public:
String *val_str(String *);
};
+#endif /* ITEM_STRFUNC_INCLUDED */
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index 26d3833f72c..a6499b9584c 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -1870,7 +1870,8 @@ void subselect_uniquesubquery_engine::fix_length_and_dec(Item_cache **row)
DBUG_ASSERT(0);
}
-int init_read_record_seq(JOIN_TAB *tab);
+int read_first_record_seq(JOIN_TAB *tab);
+int rr_sequential(READ_RECORD *info);
int join_read_always_key_or_null(JOIN_TAB *tab);
int join_read_next_same_or_null(READ_RECORD *info);
@@ -1952,7 +1953,8 @@ int subselect_single_select_engine::exec()
/* Change the access method to full table scan */
tab->save_read_first_record= tab->read_first_record;
tab->save_read_record= tab->read_record.read_record;
- tab->read_first_record= init_read_record_seq;
+ tab->read_record.read_record= rr_sequential;
+ tab->read_first_record= read_first_record_seq;
tab->read_record.record= tab->table->record[0];
tab->read_record.thd= join->thd;
tab->read_record.ref_length= tab->table->file->ref_length;
diff --git a/sql/item_subselect.h b/sql/item_subselect.h
index 467e9b22637..3503d42edc0 100644
--- a/sql/item_subselect.h
+++ b/sql/item_subselect.h
@@ -1,3 +1,6 @@
+#ifndef ITEM_SUBSELECT_INCLUDED
+#define ITEM_SUBSELECT_INCLUDED
+
/* Copyright (C) 2000 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -580,4 +583,4 @@ inline bool Item_subselect::is_uncacheable() const
return engine->uncacheable();
}
-
+#endif /* ITEM_SUBSELECT_INCLUDED */
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 4ab8e75ddf5..82b68a87dfd 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -374,6 +374,7 @@ Item_sum::Item_sum(List<Item> &list) :arg_count(list.elements),
args= NULL;
}
mark_as_sum_func();
+ init_aggregator();
list.empty(); // Fields are used
}
@@ -405,6 +406,10 @@ Item_sum::Item_sum(THD *thd, Item_sum *item):
}
memcpy(args, item->args, sizeof(Item*)*arg_count);
memcpy(orig_args, item->orig_args, sizeof(Item*)*arg_count);
+ init_aggregator();
+ with_distinct= item->with_distinct;
+ if (item->aggr)
+ set_aggregator(item->aggr->Aggrtype());
}
@@ -550,13 +555,518 @@ void Item_sum::update_used_tables ()
}
-Item *Item_sum::set_arg(int i, THD *thd, Item *new_val)
+Item *Item_sum::set_arg(uint i, THD *thd, Item *new_val)
{
thd->change_item_tree(args + i, new_val);
return new_val;
}
+int Item_sum::set_aggregator(Aggregator::Aggregator_type aggregator)
+{
+ if (aggr)
+ {
+ DBUG_ASSERT(aggregator == aggr->Aggrtype());
+ return FALSE;
+ }
+ switch (aggregator)
+ {
+ case Aggregator::DISTINCT_AGGREGATOR:
+ aggr= new Aggregator_distinct(this);
+ break;
+
+ case Aggregator::SIMPLE_AGGREGATOR:
+ aggr= new Aggregator_simple(this);
+ break;
+ };
+ return aggr ? FALSE : TRUE;
+}
+
+
+void Item_sum::cleanup()
+{
+ if (aggr)
+ {
+ delete aggr;
+ aggr= NULL;
+ }
+ Item_result_field::cleanup();
+ forced_const= FALSE;
+}
+
+
+/**
+ Compare keys consisting of single field that cannot be compared as binary.
+
+ Used by the Unique class to compare keys. Will do correct comparisons
+ for all field types.
+
+ @param arg Pointer to the relevant Field class instance
+ @param key1 left key image
+ @param key2 right key image
+ @return comparison result
+ @retval < 0 if key1 < key2
+ @retval = 0 if key1 = key2
+ @retval > 0 if key1 > key2
+*/
+
+static int simple_str_key_cmp(void* arg, uchar* key1, uchar* key2)
+{
+ Field *f= (Field*) arg;
+ return f->cmp(key1, key2);
+}
+
+
+/**
+ Correctly compare composite keys.
+
+ Used by the Unique class to compare keys. Will do correct comparisons
+ for composite keys with various field types.
+
+ @param arg Pointer to the relevant Aggregator_distinct instance
+ @param key1 left key image
+ @param key2 right key image
+ @return comparison result
+ @retval <0 if key1 < key2
+ @retval =0 if key1 = key2
+ @retval >0 if key1 > key2
+*/
+
+int Aggregator_distinct::composite_key_cmp(void* arg, uchar* key1, uchar* key2)
+{
+ Aggregator_distinct *aggr= (Aggregator_distinct *) arg;
+ Field **field = aggr->table->field;
+ Field **field_end= field + aggr->table->s->fields;
+ uint32 *lengths=aggr->field_lengths;
+ for (; field < field_end; ++field)
+ {
+ Field* f = *field;
+ int len = *lengths++;
+ int res = f->cmp(key1, key2);
+ if (res)
+ return res;
+ key1 += len;
+ key2 += len;
+ }
+ return 0;
+}
+
+
+static enum enum_field_types
+calc_tmp_field_type(enum enum_field_types table_field_type,
+ Item_result result_type)
+{
+ /* Adjust tmp table type according to the chosen aggregation type */
+ switch (result_type) {
+ case STRING_RESULT:
+ case REAL_RESULT:
+ if (table_field_type != MYSQL_TYPE_FLOAT)
+ table_field_type= MYSQL_TYPE_DOUBLE;
+ break;
+ case INT_RESULT:
+ table_field_type= MYSQL_TYPE_LONGLONG;
+ /* fallthrough */
+ case DECIMAL_RESULT:
+ if (table_field_type != MYSQL_TYPE_LONGLONG)
+ table_field_type= MYSQL_TYPE_NEWDECIMAL;
+ break;
+ case ROW_RESULT:
+ default:
+ DBUG_ASSERT(0);
+ }
+ return table_field_type;
+}
+
+
+/***************************************************************************/
+
+C_MODE_START
+
+/* Declarations for auxilary C-callbacks */
+
+static int simple_raw_key_cmp(void* arg, const void* key1, const void* key2)
+{
+ return memcmp(key1, key2, *(uint *) arg);
+}
+
+
+static int item_sum_distinct_walk(void *element, element_count num_of_dups,
+ void *item)
+{
+ return ((Aggregator_distinct*) (item))->unique_walk_function(element);
+}
+
+C_MODE_END
+
+/***************************************************************************/
+/**
+ Called before feeding the first row. Used to allocate/setup
+ the internal structures used for aggregation.
+
+ @param thd Thread descriptor
+ @return status
+ @retval FALSE success
+ @retval TRUE faliure
+
+ Prepares Aggregator_distinct to process the incoming stream.
+ Creates the temporary table and the Unique class if needed.
+ Called by Item_sum::aggregator_setup()
+*/
+
+bool Aggregator_distinct::setup(THD *thd)
+{
+ endup_done= FALSE;
+ /*
+ Setup can be called twice for ROLLUP items. This is a bug.
+ Please add DBUG_ASSERT(tree == 0) here when it's fixed.
+ */
+ if (tree || table || tmp_table_param)
+ return FALSE;
+
+ if (item_sum->setup(thd))
+ return TRUE;
+ if (item_sum->sum_func() == Item_sum::COUNT_FUNC ||
+ item_sum->sum_func() == Item_sum::COUNT_DISTINCT_FUNC)
+ {
+ List<Item> list;
+ SELECT_LEX *select_lex= thd->lex->current_select;
+
+ if (!(tmp_table_param= new TMP_TABLE_PARAM))
+ return TRUE;
+
+ /* Create a table with an unique key over all parameters */
+ for (uint i=0; i < item_sum->get_arg_count() ; i++)
+ {
+ Item *item=item_sum->get_arg(i);
+ if (list.push_back(item))
+ return TRUE; // End of memory
+ if (item->const_item() && item->is_null())
+ always_null= true;
+ }
+ if (always_null)
+ return FALSE;
+ count_field_types(select_lex, tmp_table_param, list, 0);
+ tmp_table_param->force_copy_fields= item_sum->has_force_copy_fields();
+ DBUG_ASSERT(table == 0);
+ /*
+ Make create_tmp_table() convert BIT columns to BIGINT.
+ This is needed because BIT fields store parts of their data in table's
+ null bits, and we don't have methods to compare two table records, which
+ is needed by Unique which is used when HEAP table is used.
+ */
+ {
+ List_iterator_fast<Item> li(list);
+ Item *item;
+ while ((item= li++))
+ {
+ if (item->type() == Item::FIELD_ITEM &&
+ ((Item_field*)item)->field->type() == FIELD_TYPE_BIT)
+ item->marker=4;
+ }
+ }
+ if (!(table= create_tmp_table(thd, tmp_table_param, list, (ORDER*) 0, 1,
+ 0,
+ (select_lex->options | thd->options),
+ HA_POS_ERROR, (char*)"")))
+ return TRUE;
+ table->file->extra(HA_EXTRA_NO_ROWS); // Don't update rows
+ table->no_rows=1;
+
+ if (table->s->db_type() == heap_hton)
+ {
+ /*
+ No blobs, otherwise it would have been MyISAM: set up a compare
+ function and its arguments to use with Unique.
+ */
+ qsort_cmp2 compare_key;
+ void* cmp_arg;
+ Field **field= table->field;
+ Field **field_end= field + table->s->fields;
+ bool all_binary= TRUE;
+
+ for (tree_key_length= 0; field < field_end; ++field)
+ {
+ Field *f= *field;
+ enum enum_field_types type= f->type();
+ tree_key_length+= f->pack_length();
+ if ((type == MYSQL_TYPE_VARCHAR) ||
+ (!f->binary() && (type == MYSQL_TYPE_STRING ||
+ type == MYSQL_TYPE_VAR_STRING)))
+ {
+ all_binary= FALSE;
+ break;
+ }
+ }
+ if (all_binary)
+ {
+ cmp_arg= (void*) &tree_key_length;
+ compare_key= (qsort_cmp2) simple_raw_key_cmp;
+ }
+ else
+ {
+ if (table->s->fields == 1)
+ {
+ /*
+ If we have only one field, which is the most common use of
+ count(distinct), it is much faster to use a simpler key
+ compare method that can take advantage of not having to worry
+ about other fields.
+ */
+ compare_key= (qsort_cmp2) simple_str_key_cmp;
+ cmp_arg= (void*) table->field[0];
+ /* tree_key_length has been set already */
+ }
+ else
+ {
+ uint32 *length;
+ compare_key= (qsort_cmp2) composite_key_cmp;
+ cmp_arg= (void*) this;
+ field_lengths= (uint32*) thd->alloc(table->s->fields * sizeof(uint32));
+ for (tree_key_length= 0, length= field_lengths, field= table->field;
+ field < field_end; ++field, ++length)
+ {
+ *length= (*field)->pack_length();
+ tree_key_length+= *length;
+ }
+ }
+ }
+ DBUG_ASSERT(tree == 0);
+ tree= new Unique(compare_key, cmp_arg, tree_key_length,
+ thd->variables.max_heap_table_size);
+ /*
+ The only time tree_key_length could be 0 is if someone does
+ count(distinct) on a char(0) field - stupid thing to do,
+ but this has to be handled - otherwise someone can crash
+ the server with a DoS attack
+ */
+ if (! tree)
+ return TRUE;
+ }
+ return FALSE;
+ }
+ else
+ {
+ List<Create_field> field_list;
+ Create_field field_def; /* field definition */
+ Item *arg;
+ DBUG_ENTER("Aggregator_distinct::setup");
+ /* It's legal to call setup() more than once when in a subquery */
+ if (tree)
+ DBUG_RETURN(FALSE);
+
+ /*
+ Virtual table and the tree are created anew on each re-execution of
+ PS/SP. Hence all further allocations are performed in the runtime
+ mem_root.
+ */
+ if (field_list.push_back(&field_def))
+ DBUG_RETURN(TRUE);
+
+ item_sum->null_value= item_sum->maybe_null= 1;
+ item_sum->quick_group= 0;
+
+ DBUG_ASSERT(item_sum->get_arg(0)->fixed);
+
+ arg= item_sum->get_arg(0);
+ if (arg->const_item())
+ {
+ (void) arg->val_int();
+ if (arg->null_value)
+ always_null= true;
+ }
+
+ if (always_null)
+ DBUG_RETURN(FALSE);
+
+ enum enum_field_types field_type;
+
+ field_type= calc_tmp_field_type(arg->field_type(),
+ arg->result_type());
+ field_def.init_for_tmp_table(field_type,
+ arg->max_length,
+ arg->decimals,
+ arg->maybe_null,
+ arg->unsigned_flag);
+
+ if (! (table= create_virtual_tmp_table(thd, field_list)))
+ DBUG_RETURN(TRUE);
+
+ /* XXX: check that the case of CHAR(0) works OK */
+ tree_key_length= table->s->reclength - table->s->null_bytes;
+
+ /*
+ Unique handles all unique elements in a tree until they can't fit
+ in. Then the tree is dumped to the temporary file. We can use
+ simple_raw_key_cmp because the table contains numbers only; decimals
+ are converted to binary representation as well.
+ */
+ tree= new Unique(simple_raw_key_cmp, &tree_key_length, tree_key_length,
+ thd->variables.max_heap_table_size);
+
+ DBUG_RETURN(tree == 0);
+ }
+}
+
+
+/**
+ Invalidate calculated value and clear the distinct rows.
+
+ Frees space used by the internal data structures.
+ Removes the accumulated distinct rows. Invalidates the calculated result.
+*/
+
+void Aggregator_distinct::clear()
+{
+ endup_done= FALSE;
+ item_sum->clear();
+ if (tree)
+ tree->reset();
+ /* tree and table can be both null only if always_null */
+ if (item_sum->sum_func() == Item_sum::COUNT_FUNC ||
+ item_sum->sum_func() == Item_sum::COUNT_DISTINCT_FUNC)
+ {
+ if (!tree && table)
+ {
+ table->file->extra(HA_EXTRA_NO_CACHE);
+ table->file->ha_delete_all_rows();
+ table->file->extra(HA_EXTRA_WRITE_CACHE);
+ }
+ }
+ else
+ {
+ item_sum->null_value= 1;
+ }
+}
+
+
+/**
+ Process incoming row.
+
+ Add it to Unique/temp hash table if it's unique. Skip the row if
+ not unique.
+ Prepare Aggregator_distinct to process the incoming stream.
+ Create the temporary table and the Unique class if needed.
+ Called by Item_sum::aggregator_add().
+ To actually get the result value in item_sum's buffers
+ Aggregator_distinct::endup() must be called.
+
+ @return status
+ @retval FALSE success
+ @retval TRUE failure
+*/
+
+bool Aggregator_distinct::add()
+{
+ if (always_null)
+ return 0;
+
+ if (item_sum->sum_func() == Item_sum::COUNT_FUNC ||
+ item_sum->sum_func() == Item_sum::COUNT_DISTINCT_FUNC)
+ {
+ int error;
+ copy_fields(tmp_table_param);
+ copy_funcs(tmp_table_param->items_to_copy);
+
+ for (Field **field=table->field ; *field ; field++)
+ if ((*field)->is_real_null(0))
+ return 0; // Don't count NULL
+
+ if (tree)
+ {
+ /*
+ The first few bytes of record (at least one) are just markers
+ for deleted and NULLs. We want to skip them since they will
+ bloat the tree without providing any valuable info. Besides,
+ key_length used to initialize the tree didn't include space for them.
+ */
+ return tree->unique_add(table->record[0] + table->s->null_bytes);
+ }
+ if ((error= table->file->ha_write_row(table->record[0])) &&
+ table->file->is_fatal_error(error, HA_CHECK_DUP))
+ return TRUE;
+ return FALSE;
+ }
+ else
+ {
+ item_sum->get_arg(0)->save_in_field(table->field[0], FALSE);
+ if (table->field[0]->is_null())
+ return 0;
+ DBUG_ASSERT(tree);
+ item_sum->null_value= 0;
+ /*
+ '0' values are also stored in the tree. This doesn't matter
+ for SUM(DISTINCT), but is important for AVG(DISTINCT)
+ */
+ return tree->unique_add(table->field[0]->ptr);
+ }
+}
+
+
+/**
+ Calculate the aggregate function value.
+
+ Since Distinct_aggregator::add() just collects the distinct rows,
+ we must go over the distinct rows and feed them to the aggregation
+ function before returning its value.
+ This is what endup () does. It also sets the result validity flag
+ endup_done to TRUE so it will not recalculate the aggregate value
+ again if the Item_sum hasn't been reset.
+*/
+
+void Aggregator_distinct::endup()
+{
+ /* prevent consecutive recalculations */
+ if (endup_done)
+ return;
+
+ /* we are going to calculate the aggregate value afresh */
+ item_sum->clear();
+
+ /* The result will definitely be null : no more calculations needed */
+ if (always_null)
+ return;
+
+ if (item_sum->sum_func() == Item_sum::COUNT_FUNC ||
+ item_sum->sum_func() == Item_sum::COUNT_DISTINCT_FUNC)
+ {
+ DBUG_ASSERT(item_sum->fixed == 1);
+ Item_sum_count *sum= (Item_sum_count *)item_sum;
+ if (tree && tree->elements == 0)
+ {
+ /* everything fits in memory */
+ sum->count= (longlong) tree->elements_in_tree();
+ endup_done= TRUE;
+ }
+ if (!tree)
+ {
+ /* there were blobs */
+ table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
+ sum->count= table->file->stats.records;
+ endup_done= TRUE;
+ }
+ }
+ else
+ {
+ /*
+ We don't have a tree only if 'setup()' hasn't been called;
+ this is the case of sql_select.cc:return_zero_rows.
+ */
+ if (tree)
+ table->field[0]->set_notnull();
+ }
+
+ if (tree && !endup_done)
+ {
+ /* go over the tree of distinct keys and calculate the aggregate value */
+ use_distinct_values= TRUE;
+ tree->walk(item_sum_distinct_walk, (void*) this);
+ use_distinct_values= FALSE;
+ }
+ /* prevent consecutive recalculations */
+ endup_done= TRUE;
+}
+
+
String *
Item_sum_num::val_str(String *str)
{
@@ -815,10 +1325,27 @@ void Item_sum_sum::fix_length_and_dec()
bool Item_sum_sum::add()
{
DBUG_ENTER("Item_sum_sum::add");
+ bool arg_is_null;
if (hybrid_type == DECIMAL_RESULT)
{
- my_decimal value, *val= args[0]->val_decimal(&value);
- if (!args[0]->null_value)
+ my_decimal value, *val;
+ if (aggr->use_distinct_values)
+ {
+ /*
+ We are aggregating distinct rows. Get the value from the distinct
+ table pointer
+ */
+ Aggregator_distinct *daggr= (Aggregator_distinct *)aggr;
+ val= daggr->table->field[0]->val_decimal (&value);
+ arg_is_null= daggr->table->field[0]->is_null();
+ }
+ else
+ {
+ /* non-distinct aggregation */
+ val= args[0]->val_decimal(&value);
+ arg_is_null= args[0]->null_value;
+ }
+ if (!arg_is_null)
{
my_decimal_add(E_DEC_FATAL_ERROR, dec_buffs + (curr_dec_buff^1),
val, dec_buffs + curr_dec_buff);
@@ -828,8 +1355,25 @@ bool Item_sum_sum::add()
}
else
{
- sum+= args[0]->val_real();
- if (!args[0]->null_value)
+ double val;
+ if (aggr->use_distinct_values)
+ {
+ /*
+ We are aggregating distinct rows. Get the value from the distinct
+ table pointer
+ */
+ Aggregator_distinct *daggr= (Aggregator_distinct *)aggr;
+ val= daggr->table->field[0]->val_real ();
+ arg_is_null= daggr->table->field[0]->is_null();
+ }
+ else
+ {
+ /* non-distinct aggregation */
+ val= args[0]->val_real();
+ arg_is_null= args[0]->null_value;
+ }
+ sum+= val;
+ if (!arg_is_null)
null_value= 0;
}
DBUG_RETURN(0);
@@ -839,6 +1383,8 @@ bool Item_sum_sum::add()
longlong Item_sum_sum::val_int()
{
DBUG_ASSERT(fixed == 1);
+ if (aggr)
+ aggr->endup();
if (hybrid_type == DECIMAL_RESULT)
{
longlong result;
@@ -853,6 +1399,8 @@ longlong Item_sum_sum::val_int()
double Item_sum_sum::val_real()
{
DBUG_ASSERT(fixed == 1);
+ if (aggr)
+ aggr->endup();
if (hybrid_type == DECIMAL_RESULT)
my_decimal2double(E_DEC_FATAL_ERROR, dec_buffs + curr_dec_buff, &sum);
return sum;
@@ -861,6 +1409,8 @@ double Item_sum_sum::val_real()
String *Item_sum_sum::val_str(String *str)
{
+ if (aggr)
+ aggr->endup();
if (hybrid_type == DECIMAL_RESULT)
return val_string_from_decimal(str);
return val_string_from_real(str);
@@ -869,311 +1419,54 @@ String *Item_sum_sum::val_str(String *str)
my_decimal *Item_sum_sum::val_decimal(my_decimal *val)
{
+ if (aggr)
+ aggr->endup();
if (hybrid_type == DECIMAL_RESULT)
return (dec_buffs + curr_dec_buff);
return val_decimal_from_real(val);
}
-/***************************************************************************/
-
-C_MODE_START
-
-/* Declarations for auxilary C-callbacks */
-
-static int simple_raw_key_cmp(void* arg, const void* key1, const void* key2)
-{
- return memcmp(key1, key2, *(uint *) arg);
-}
-
-
-static int item_sum_distinct_walk(void *element, element_count num_of_dups,
- void *item)
-{
- return ((Item_sum_distinct*) (item))->unique_walk_function(element);
-}
-
-C_MODE_END
-
-/* Item_sum_distinct */
-
-Item_sum_distinct::Item_sum_distinct(Item *item_arg)
- :Item_sum_num(item_arg), tree(0)
-{
- /*
- quick_group is an optimizer hint, which means that GROUP BY can be
- handled with help of index on grouped columns.
- By setting quick_group to zero we force creation of temporary table
- to perform GROUP BY.
- */
- quick_group= 0;
-}
-
-
-Item_sum_distinct::Item_sum_distinct(THD *thd, Item_sum_distinct *original)
- :Item_sum_num(thd, original), val(original->val), tree(0),
- table_field_type(original->table_field_type)
-{
- quick_group= 0;
-}
-
-
/**
- Behaves like an Integer except to fix_length_and_dec().
- Additionally div() converts val with this traits to a val with true
- decimal traits along with conversion of integer value to decimal value.
- This is to speedup SUM/AVG(DISTINCT) evaluation for 8-32 bit integer
- values.
+ Aggregate a distinct row from the distinct hash table.
+
+ Called for each row into the hash table 'Aggregator_distinct::table'.
+ Includes the current distinct row into the calculation of the
+ aggregate value. Uses the Field classes to get the value from the row.
+ This function is used for AVG/SUM(DISTINCT). For COUNT(DISTINCT)
+ it's called only when there are no blob arguments and the data don't
+ fit into memory (so Unique makes persisted trees on disk).
+
+ @param element pointer to the row data.
+
+ @return status
+ @retval FALSE success
+ @retval TRUE failure
*/
-struct Hybrid_type_traits_fast_decimal: public
- Hybrid_type_traits_integer
-{
- virtual Item_result type() const { return DECIMAL_RESULT; }
- virtual void fix_length_and_dec(Item *item, Item *arg) const
- { Hybrid_type_traits_decimal::instance()->fix_length_and_dec(item, arg); }
-
- virtual void div(Hybrid_type *val, ulonglong u) const
- {
- int2my_decimal(E_DEC_FATAL_ERROR, val->integer, 0, val->dec_buf);
- val->used_dec_buf_no= 0;
- val->traits= Hybrid_type_traits_decimal::instance();
- val->traits->div(val, u);
- }
- static const Hybrid_type_traits_fast_decimal *instance();
- Hybrid_type_traits_fast_decimal() {};
-};
-
-static const Hybrid_type_traits_fast_decimal fast_decimal_traits_instance;
-
-const Hybrid_type_traits_fast_decimal
- *Hybrid_type_traits_fast_decimal::instance()
-{
- return &fast_decimal_traits_instance;
-}
-
-void Item_sum_distinct::fix_length_and_dec()
+
+bool Aggregator_distinct::unique_walk_function(void *element)
{
- DBUG_ASSERT(args[0]->fixed);
-
- table_field_type= args[0]->field_type();
-
- /* Adjust tmp table type according to the chosen aggregation type */
- switch (args[0]->result_type()) {
- case STRING_RESULT:
- case REAL_RESULT:
- val.traits= Hybrid_type_traits::instance();
- if (table_field_type != MYSQL_TYPE_FLOAT)
- table_field_type= MYSQL_TYPE_DOUBLE;
- break;
- case INT_RESULT:
- /*
- Preserving int8, int16, int32 field types gives ~10% performance boost
- as the size of result tree becomes significantly smaller.
- Another speed up we gain by using longlong for intermediate
- calculations. The range of int64 is enough to hold sum 2^32 distinct
- integers each <= 2^32.
- */
- if (table_field_type == MYSQL_TYPE_INT24 ||
- (table_field_type >= MYSQL_TYPE_TINY &&
- table_field_type <= MYSQL_TYPE_LONG))
- {
- val.traits= Hybrid_type_traits_fast_decimal::instance();
- break;
- }
- table_field_type= MYSQL_TYPE_LONGLONG;
- /* fallthrough */
- case DECIMAL_RESULT:
- val.traits= Hybrid_type_traits_decimal::instance();
- if (table_field_type != MYSQL_TYPE_LONGLONG)
- table_field_type= MYSQL_TYPE_NEWDECIMAL;
- break;
- case ROW_RESULT:
- default:
- DBUG_ASSERT(0);
- }
- val.traits->fix_length_and_dec(this, args[0]);
+ memcpy(table->field[0]->ptr, element, tree_key_length);
+ item_sum->add();
+ return 0;
}
-/**
- @todo
- check that the case of CHAR(0) works OK
-*/
-bool Item_sum_distinct::setup(THD *thd)
+Aggregator_distinct::~Aggregator_distinct()
{
- List<Create_field> field_list;
- Create_field field_def; /* field definition */
- DBUG_ENTER("Item_sum_distinct::setup");
- /* It's legal to call setup() more than once when in a subquery */
if (tree)
- DBUG_RETURN(FALSE);
-
- /*
- Virtual table and the tree are created anew on each re-execution of
- PS/SP. Hence all further allocations are performed in the runtime
- mem_root.
- */
- if (field_list.push_back(&field_def))
- DBUG_RETURN(TRUE);
-
- null_value= maybe_null= 1;
- quick_group= 0;
-
- DBUG_ASSERT(args[0]->fixed);
-
- field_def.init_for_tmp_table(table_field_type, args[0]->max_length,
- args[0]->decimals, args[0]->maybe_null,
- args[0]->unsigned_flag);
-
- if (! (table= create_virtual_tmp_table(thd, field_list)))
- DBUG_RETURN(TRUE);
-
- /* XXX: check that the case of CHAR(0) works OK */
- tree_key_length= table->s->reclength - table->s->null_bytes;
-
- /*
- Unique handles all unique elements in a tree until they can't fit
- in. Then the tree is dumped to the temporary file. We can use
- simple_raw_key_cmp because the table contains numbers only; decimals
- are converted to binary representation as well.
- */
- tree= new Unique(simple_raw_key_cmp, &tree_key_length, tree_key_length,
- thd->variables.max_heap_table_size);
-
- is_evaluated= FALSE;
- DBUG_RETURN(tree == 0);
-}
-
-
-bool Item_sum_distinct::add()
-{
- args[0]->save_in_field(table->field[0], FALSE);
- is_evaluated= FALSE;
- if (!table->field[0]->is_null())
{
- DBUG_ASSERT(tree);
- null_value= 0;
- /*
- '0' values are also stored in the tree. This doesn't matter
- for SUM(DISTINCT), but is important for AVG(DISTINCT)
- */
- return tree->unique_add(table->field[0]->ptr);
+ delete tree;
+ tree= NULL;
}
- return 0;
-}
-
-
-bool Item_sum_distinct::unique_walk_function(void *element)
-{
- memcpy(table->field[0]->ptr, element, tree_key_length);
- ++count;
- val.traits->add(&val, table->field[0]);
- return 0;
-}
-
-
-void Item_sum_distinct::clear()
-{
- DBUG_ENTER("Item_sum_distinct::clear");
- DBUG_ASSERT(tree != 0); /* we always have a tree */
- null_value= 1;
- tree->reset();
- is_evaluated= FALSE;
- DBUG_VOID_RETURN;
-}
-
-void Item_sum_distinct::cleanup()
-{
- Item_sum_num::cleanup();
- delete tree;
- tree= 0;
- table= 0;
- is_evaluated= FALSE;
-}
-
-Item_sum_distinct::~Item_sum_distinct()
-{
- delete tree;
- /* no need to free the table */
-}
-
-
-void Item_sum_distinct::calculate_val_and_count()
-{
- if (!is_evaluated)
+ if (table)
{
- count= 0;
- val.traits->set_zero(&val);
- /*
- We don't have a tree only if 'setup()' hasn't been called;
- this is the case of sql_select.cc:return_zero_rows.
- */
- if (tree)
- {
- table->field[0]->set_notnull();
- tree->walk(item_sum_distinct_walk, (void*) this);
- }
- is_evaluated= TRUE;
+ free_tmp_table(table->in_use, table);
+ table=NULL;
}
-}
-
-
-double Item_sum_distinct::val_real()
-{
- calculate_val_and_count();
- return val.traits->val_real(&val);
-}
-
-
-my_decimal *Item_sum_distinct::val_decimal(my_decimal *to)
-{
- calculate_val_and_count();
- if (null_value)
- return 0;
- return val.traits->val_decimal(&val, to);
-}
-
-
-longlong Item_sum_distinct::val_int()
-{
- calculate_val_and_count();
- return val.traits->val_int(&val, unsigned_flag);
-}
-
-
-String *Item_sum_distinct::val_str(String *str)
-{
- calculate_val_and_count();
- if (null_value)
- return 0;
- return val.traits->val_str(&val, str, decimals);
-}
-
-/* end of Item_sum_distinct */
-
-/* Item_sum_avg_distinct */
-
-void
-Item_sum_avg_distinct::fix_length_and_dec()
-{
- Item_sum_distinct::fix_length_and_dec();
- prec_increment= current_thd->variables.div_precincrement;
- /*
- AVG() will divide val by count. We need to reserve digits
- after decimal point as the result can be fractional.
- */
- decimals= min(decimals + prec_increment, NOT_FIXED_DEC);
-}
-
-
-void
-Item_sum_avg_distinct::calculate_val_and_count()
-{
- if (!is_evaluated)
+ if (tmp_table_param)
{
- Item_sum_distinct::calculate_val_and_count();
- if (count)
- val.traits->div(&val, count);
- is_evaluated= TRUE;
+ delete tmp_table_param;
+ tmp_table_param= NULL;
}
}
@@ -1200,6 +1493,8 @@ bool Item_sum_count::add()
longlong Item_sum_count::val_int()
{
DBUG_ASSERT(fixed == 1);
+ if (aggr)
+ aggr->endup();
return (longlong) count;
}
@@ -1289,6 +1584,8 @@ bool Item_sum_avg::add()
double Item_sum_avg::val_real()
{
DBUG_ASSERT(fixed == 1);
+ if (aggr)
+ aggr->endup();
if (!count)
{
null_value=1;
@@ -1303,6 +1600,8 @@ my_decimal *Item_sum_avg::val_decimal(my_decimal *val)
my_decimal sum_buff, cnt;
const my_decimal *sum_dec;
DBUG_ASSERT(fixed == 1);
+ if (aggr)
+ aggr->endup();
if (!count)
{
null_value=1;
@@ -1325,6 +1624,8 @@ my_decimal *Item_sum_avg::val_decimal(my_decimal *val)
String *Item_sum_avg::val_str(String *str)
{
+ if (aggr)
+ aggr->endup();
if (hybrid_type == DECIMAL_RESULT)
return val_string_from_decimal(str);
return val_string_from_real(str);
@@ -1857,6 +2158,7 @@ void Item_sum_hybrid::reset_field()
void Item_sum_sum::reset_field()
{
+ DBUG_ASSERT (aggr->Aggrtype() != Aggregator::DISTINCT_AGGREGATOR);
if (hybrid_type == DECIMAL_RESULT)
{
my_decimal value, *arg_val= args[0]->val_decimal(&value);
@@ -1881,6 +2183,7 @@ void Item_sum_count::reset_field()
{
uchar *res=result_field->ptr;
longlong nr=0;
+ DBUG_ASSERT (aggr->Aggrtype() != Aggregator::DISTINCT_AGGREGATOR);
if (!args[0]->maybe_null || !args[0]->is_null())
nr=1;
@@ -1891,6 +2194,7 @@ void Item_sum_count::reset_field()
void Item_sum_avg::reset_field()
{
uchar *res=result_field->ptr;
+ DBUG_ASSERT (aggr->Aggrtype() != Aggregator::DISTINCT_AGGREGATOR);
if (hybrid_type == DECIMAL_RESULT)
{
longlong tmp;
@@ -1944,6 +2248,7 @@ void Item_sum_bit::update_field()
void Item_sum_sum::update_field()
{
+ DBUG_ASSERT (aggr->Aggrtype() != Aggregator::DISTINCT_AGGREGATOR);
if (hybrid_type == DECIMAL_RESULT)
{
my_decimal value, *arg_val= args[0]->val_decimal(&value);
@@ -1996,6 +2301,9 @@ void Item_sum_avg::update_field()
{
longlong field_count;
uchar *res=result_field->ptr;
+
+ DBUG_ASSERT (aggr->Aggrtype() != Aggregator::DISTINCT_AGGREGATOR);
+
if (hybrid_type == DECIMAL_RESULT)
{
my_decimal value, *arg_val= args[0]->val_decimal(&value);
@@ -2299,318 +2607,6 @@ double Item_variance_field::val_real()
/****************************************************************************
-** COUNT(DISTINCT ...)
-****************************************************************************/
-
-int simple_str_key_cmp(void* arg, uchar* key1, uchar* key2)
-{
- Field *f= (Field*) arg;
- return f->cmp(key1, key2);
-}
-
-/**
- Did not make this one static - at least gcc gets confused when
- I try to declare a static function as a friend. If you can figure
- out the syntax to make a static function a friend, make this one
- static
-*/
-
-int composite_key_cmp(void* arg, uchar* key1, uchar* key2)
-{
- Item_sum_count_distinct* item = (Item_sum_count_distinct*)arg;
- Field **field = item->table->field;
- Field **field_end= field + item->table->s->fields;
- uint32 *lengths=item->field_lengths;
- for (; field < field_end; ++field)
- {
- Field* f = *field;
- int len = *lengths++;
- int res = f->cmp(key1, key2);
- if (res)
- return res;
- key1 += len;
- key2 += len;
- }
- return 0;
-}
-
-
-C_MODE_START
-
-static int count_distinct_walk(void *elem, element_count count, void *arg)
-{
- (*((ulonglong*)arg))++;
- return 0;
-}
-
-C_MODE_END
-
-
-void Item_sum_count_distinct::cleanup()
-{
- DBUG_ENTER("Item_sum_count_distinct::cleanup");
- Item_sum_int::cleanup();
-
- /* Free objects only if we own them. */
- if (!original)
- {
- /*
- We need to delete the table and the tree in cleanup() as
- they were allocated in the runtime memroot. Using the runtime
- memroot reduces memory footprint for PS/SP and simplifies setup().
- */
- delete tree;
- tree= 0;
- is_evaluated= FALSE;
- if (table)
- {
- free_tmp_table(table->in_use, table);
- table= 0;
- }
- delete tmp_table_param;
- tmp_table_param= 0;
- }
- always_null= FALSE;
- DBUG_VOID_RETURN;
-}
-
-
-/**
- This is used by rollup to create a separate usable copy of
- the function.
-*/
-
-void Item_sum_count_distinct::make_unique()
-{
- table=0;
- original= 0;
- force_copy_fields= 1;
- tree= 0;
- is_evaluated= FALSE;
- tmp_table_param= 0;
- always_null= FALSE;
-}
-
-
-Item_sum_count_distinct::~Item_sum_count_distinct()
-{
- cleanup();
-}
-
-
-bool Item_sum_count_distinct::setup(THD *thd)
-{
- List<Item> list;
- SELECT_LEX *select_lex= thd->lex->current_select;
-
- /*
- Setup can be called twice for ROLLUP items. This is a bug.
- Please add DBUG_ASSERT(tree == 0) here when it's fixed.
- It's legal to call setup() more than once when in a subquery
- */
- if (tree || table || tmp_table_param)
- return FALSE;
-
- if (!(tmp_table_param= new TMP_TABLE_PARAM))
- return TRUE;
-
- /* Create a table with an unique key over all parameters */
- for (uint i=0; i < arg_count ; i++)
- {
- Item *item=args[i];
- if (list.push_back(item))
- return TRUE; // End of memory
- if (item->const_item() && item->is_null())
- always_null= 1;
- }
- if (always_null)
- return FALSE;
- count_field_types(select_lex, tmp_table_param, list, 0);
- tmp_table_param->force_copy_fields= force_copy_fields;
- DBUG_ASSERT(table == 0);
- /*
- Make create_tmp_table() convert BIT columns to BIGINT.
- This is needed because BIT fields store parts of their data in table's
- null bits, and we don't have methods to compare two table records, which
- is needed by Unique which is used when HEAP table is used.
- */
- {
- List_iterator_fast<Item> li(list);
- Item *item;
- while ((item= li++))
- {
- if (item->type() == Item::FIELD_ITEM &&
- ((Item_field*)item)->field->type() == FIELD_TYPE_BIT)
- item->marker=4;
- }
- }
-
- if (!(table= create_tmp_table(thd, tmp_table_param, list, (ORDER*) 0, 1,
- 0,
- (select_lex->options | thd->options),
- HA_POS_ERROR, (char*)"")))
- return TRUE;
- table->file->extra(HA_EXTRA_NO_ROWS); // Don't update rows
- table->no_rows=1;
-
- if (table->s->db_type() == heap_hton)
- {
- /*
- No blobs, otherwise it would have been MyISAM: set up a compare
- function and its arguments to use with Unique.
- */
- qsort_cmp2 compare_key;
- void* cmp_arg;
- Field **field= table->field;
- Field **field_end= field + table->s->fields;
- bool all_binary= TRUE;
-
- for (tree_key_length= 0; field < field_end; ++field)
- {
- Field *f= *field;
- enum enum_field_types f_type= f->type();
- tree_key_length+= f->pack_length();
- if ((f_type == MYSQL_TYPE_VARCHAR) ||
- (!f->binary() && (f_type == MYSQL_TYPE_STRING ||
- f_type == MYSQL_TYPE_VAR_STRING)))
- {
- all_binary= FALSE;
- break;
- }
- }
- if (all_binary)
- {
- cmp_arg= (void*) &tree_key_length;
- compare_key= (qsort_cmp2) simple_raw_key_cmp;
- }
- else
- {
- if (table->s->fields == 1)
- {
- /*
- If we have only one field, which is the most common use of
- count(distinct), it is much faster to use a simpler key
- compare method that can take advantage of not having to worry
- about other fields.
- */
- compare_key= (qsort_cmp2) simple_str_key_cmp;
- cmp_arg= (void*) table->field[0];
- /* tree_key_length has been set already */
- }
- else
- {
- uint32 *length;
- compare_key= (qsort_cmp2) composite_key_cmp;
- cmp_arg= (void*) this;
- field_lengths= (uint32*) thd->alloc(table->s->fields * sizeof(uint32));
- for (tree_key_length= 0, length= field_lengths, field= table->field;
- field < field_end; ++field, ++length)
- {
- *length= (*field)->pack_length();
- tree_key_length+= *length;
- }
- }
- }
- DBUG_ASSERT(tree == 0);
- tree= new Unique(compare_key, cmp_arg, tree_key_length,
- thd->variables.max_heap_table_size);
- /*
- The only time tree_key_length could be 0 is if someone does
- count(distinct) on a char(0) field - stupid thing to do,
- but this has to be handled - otherwise someone can crash
- the server with a DoS attack
- */
- is_evaluated= FALSE;
- if (! tree)
- return TRUE;
- }
- return FALSE;
-}
-
-
-Item *Item_sum_count_distinct::copy_or_same(THD* thd)
-{
- return new (thd->mem_root) Item_sum_count_distinct(thd, this);
-}
-
-
-void Item_sum_count_distinct::clear()
-{
- /* tree and table can be both null only if always_null */
- is_evaluated= FALSE;
- if (tree)
- {
- tree->reset();
- }
- else if (table)
- {
- table->file->extra(HA_EXTRA_NO_CACHE);
- table->file->ha_delete_all_rows();
- table->file->extra(HA_EXTRA_WRITE_CACHE);
- }
-}
-
-bool Item_sum_count_distinct::add()
-{
- int error;
- if (always_null)
- return 0;
- copy_fields(tmp_table_param);
- copy_funcs(tmp_table_param->items_to_copy);
-
- for (Field **field=table->field ; *field ; field++)
- if ((*field)->is_real_null(0))
- return 0; // Don't count NULL
-
- is_evaluated= FALSE;
- if (tree)
- {
- /*
- The first few bytes of record (at least one) are just markers
- for deleted and NULLs. We want to skip them since they will
- bloat the tree without providing any valuable info. Besides,
- key_length used to initialize the tree didn't include space for them.
- */
- return tree->unique_add(table->record[0] + table->s->null_bytes);
- }
- if ((error= table->file->ha_write_row(table->record[0])) &&
- table->file->is_fatal_error(error, HA_CHECK_DUP))
- return TRUE;
- return FALSE;
-}
-
-
-longlong Item_sum_count_distinct::val_int()
-{
- int error;
- DBUG_ASSERT(fixed == 1);
- if (!table) // Empty query
- return LL(0);
- if (tree)
- {
- if (is_evaluated)
- return count;
-
- if (tree->elements == 0)
- return (longlong) tree->elements_in_tree(); // everything fits in memory
- count= 0;
- tree->walk(count_distinct_walk, (void*) &count);
- is_evaluated= TRUE;
- return (longlong) count;
- }
-
- error= table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
-
- if(error)
- {
- table->file->print_error(error, MYF(0));
- }
-
- return table->file->stats.records;
-}
-
-
-/****************************************************************************
** Functions to handle dynamic loadable aggregates
** Original source by: Alexis Mikhailov <root@medinf.chuvashia.su>
** Adapted for UDAs by: Andreas F. Bobak <bobak@relog.ch>.
@@ -2931,6 +2927,8 @@ int dump_leaf_key(uchar* key, element_count count __attribute__((unused)),
result->append(*res);
}
+ item->row_count++;
+
/* stop if length of result more than max_length */
if (result->length() > item->max_length)
{
@@ -2949,8 +2947,11 @@ int dump_leaf_key(uchar* key, element_count count __attribute__((unused)),
result->length(),
&well_formed_error);
result->length(old_length + add_length);
- item->count_cut_values++;
item->warning_for_row= TRUE;
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_CUT_VALUE_GROUP_CONCAT, ER(ER_CUT_VALUE_GROUP_CONCAT),
+ item->row_count);
+
return 1;
}
return 0;
@@ -2970,12 +2971,12 @@ Item_func_group_concat::
Item_func_group_concat(Name_resolution_context *context_arg,
bool distinct_arg, List<Item> *select_list,
SQL_LIST *order_list, String *separator_arg)
- :tmp_table_param(0), warning(0),
- separator(separator_arg), tree(0), unique_filter(NULL), table(0),
+ :tmp_table_param(0), separator(separator_arg), tree(0),
+ unique_filter(NULL), table(0),
order(0), context(context_arg),
arg_count_order(order_list ? order_list->elements : 0),
arg_count_field(select_list->elements),
- count_cut_values(0),
+ row_count(0),
distinct(distinct_arg),
warning_for_row(FALSE),
force_copy_fields(0), original(0)
@@ -3029,7 +3030,6 @@ Item_func_group_concat::Item_func_group_concat(THD *thd,
Item_func_group_concat *item)
:Item_sum(thd, item),
tmp_table_param(item->tmp_table_param),
- warning(item->warning),
separator(item->separator),
tree(item->tree),
unique_filter(item->unique_filter),
@@ -3038,7 +3038,7 @@ Item_func_group_concat::Item_func_group_concat(THD *thd,
context(item->context),
arg_count_order(item->arg_count_order),
arg_count_field(item->arg_count_field),
- count_cut_values(item->count_cut_values),
+ row_count(item->row_count),
distinct(item->distinct),
warning_for_row(item->warning_for_row),
always_null(item->always_null),
@@ -3056,15 +3056,6 @@ void Item_func_group_concat::cleanup()
DBUG_ENTER("Item_func_group_concat::cleanup");
Item_sum::cleanup();
- /* Adjust warning message to include total number of cut values */
- if (warning)
- {
- char warn_buff[MYSQL_ERRMSG_SIZE];
- sprintf(warn_buff, ER(ER_CUT_VALUE_GROUP_CONCAT), count_cut_values);
- warning->set_msg(current_thd, warn_buff);
- warning= 0;
- }
-
/*
Free table and tree if they belong to this item (if item have not pointer
to original item from which was made copy => it own its objects )
@@ -3088,15 +3079,8 @@ void Item_func_group_concat::cleanup()
delete unique_filter;
unique_filter= NULL;
}
- if (warning)
- {
- char warn_buff[MYSQL_ERRMSG_SIZE];
- sprintf(warn_buff, ER(ER_CUT_VALUE_GROUP_CONCAT), count_cut_values);
- warning->set_msg(thd, warn_buff);
- warning= 0;
- }
}
- DBUG_ASSERT(tree == 0 && warning == 0);
+ DBUG_ASSERT(tree == 0);
}
DBUG_VOID_RETURN;
}
@@ -3385,17 +3369,6 @@ String* Item_func_group_concat::val_str(String* str)
/* Tree is used for sorting as in ORDER BY */
tree_walk(tree, (tree_walk_action)&dump_leaf_key, (void*)this,
left_root_right);
- if (count_cut_values && !warning)
- {
- /*
- ER_CUT_VALUE_GROUP_CONCAT needs an argument, but this gets set in
- Item_func_group_concat::cleanup().
- */
- DBUG_ASSERT(table);
- warning= push_warning(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_CUT_VALUE_GROUP_CONCAT,
- ER(ER_CUT_VALUE_GROUP_CONCAT));
- }
return &result;
}
diff --git a/sql/item_sum.h b/sql/item_sum.h
index f70da52bcd1..88be83137cb 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -1,3 +1,6 @@
+#ifndef ITEM_SUM_INCLUDED
+#define ITEM_SUM_INCLUDED
+
/* Copyright (C) 2000-2006 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -22,7 +25,87 @@
#include <my_tree.h>
-/*
+class Item_sum;
+class Aggregator_distinct;
+class Aggregator_simple;
+
+/**
+ The abstract base class for the Aggregator_* classes.
+ It implements the data collection functions (setup/add/clear)
+ as either pass-through to the real functionality or
+ as collectors into an Unique (for distinct) structure.
+
+ Note that update_field/reset_field are not in that
+ class, because they're simply not called when
+ GROUP BY/DISTINCT can be handled with help of index on grouped
+ fields (quick_group = 0);
+*/
+
+class Aggregator : public Sql_alloc
+{
+ friend class Item_sum;
+ friend class Item_sum_sum;
+ friend class Item_sum_count;
+ friend class Item_sum_avg;
+
+ /*
+ All members are protected as this class is not usable outside of an
+ Item_sum descendant.
+ */
+protected:
+ /* the aggregate function class to act on */
+ Item_sum *item_sum;
+
+ /**
+ When feeding back the data in endup() from Unique/temp table back to
+ Item_sum::add() methods we must read the data from Unique (and not
+ recalculate the functions that are given as arguments to the aggregate
+ function.
+ This flag is to tell the add() methods to take the data from the Unique
+ instead by calling the relevant val_..() method
+ */
+
+ bool use_distinct_values;
+
+public:
+ Aggregator (Item_sum *arg): item_sum(arg), use_distinct_values(FALSE) {}
+ virtual ~Aggregator () {} /* Keep gcc happy */
+
+ enum Aggregator_type { SIMPLE_AGGREGATOR, DISTINCT_AGGREGATOR };
+ virtual Aggregator_type Aggrtype() = 0;
+
+ /**
+ Called before adding the first row.
+ Allocates and sets up the internal aggregation structures used,
+ e.g. the Unique instance used to calculate distinct.
+ */
+ virtual bool setup(THD *) = 0;
+
+ /**
+ Called when we need to wipe out all the data from the aggregator :
+ all the values acumulated and all the state.
+ Cleans up the internal structures and resets them to their initial state.
+ */
+ virtual void clear() = 0;
+
+ /**
+ Called when there's a new value to be aggregated.
+ Updates the internal state of the aggregator to reflect the new value.
+ */
+ virtual bool add() = 0;
+
+ /**
+ Called when there are no more data and the final value is to be retrieved.
+ Finalises the state of the aggregator, so the final result can be retrieved.
+ */
+ virtual void endup() = 0;
+
+};
+
+
+class st_select_lex;
+
+/**
Class Item_sum is the base class used for special expressions that SQL calls
'set functions'. These expressions are formed with the help of aggregate
functions such as SUM, MAX, GROUP_CONCAT etc.
@@ -215,13 +298,38 @@
TODO: to catch queries where the limit is exceeded to make the
code clean here.
-*/
-
-class st_select_lex;
+*/
class Item_sum :public Item_result_field
{
+protected:
+ /**
+ Aggregator class instance. Not set initially. Allocated only after
+ it is determined if the incoming data are already distinct.
+ */
+ Aggregator *aggr;
+
+private:
+ /**
+ Used in making ROLLUP. Set for the ROLLUP copies of the original
+ Item_sum and passed to create_tmp_field() to cause it to work
+ over the temp table buffer that is referenced by
+ Item_result_field::result_field.
+ */
+ bool force_copy_fields;
+
+ /**
+ Indicates how the aggregate function was specified by the parser :
+ 1 if it was written as AGGREGATE(DISTINCT),
+ 0 if it was AGGREGATE()
+ */
+ bool with_distinct;
+
public:
+
+ bool has_force_copy_fields() const { return force_copy_fields; }
+ bool has_with_distinct() const { return with_distinct; }
+
enum Sumfunctype
{ COUNT_FUNC, COUNT_DISTINCT_FUNC, SUM_FUNC, SUM_DISTINCT_FUNC, AVG_FUNC,
AVG_DISTINCT_FUNC, MIN_FUNC, MAX_FUNC, STD_FUNC,
@@ -263,47 +371,28 @@ public:
Item_sum() :quick_group(1), arg_count(0), forced_const(FALSE)
{
mark_as_sum_func();
+ init_aggregator();
}
Item_sum(Item *a) :quick_group(1), arg_count(1), args(tmp_args),
orig_args(tmp_orig_args), forced_const(FALSE)
{
args[0]=a;
mark_as_sum_func();
+ init_aggregator();
}
Item_sum( Item *a, Item *b ) :quick_group(1), arg_count(2), args(tmp_args),
orig_args(tmp_orig_args), forced_const(FALSE)
{
args[0]=a; args[1]=b;
mark_as_sum_func();
+ init_aggregator();
}
Item_sum(List<Item> &list);
//Copy constructor, need to perform subselects with temporary tables
Item_sum(THD *thd, Item_sum *item);
enum Type type() const { return SUM_FUNC_ITEM; }
virtual enum Sumfunctype sum_func () const=0;
-
- /*
- This method is similar to add(), but it is called when the current
- aggregation group changes. Thus it performs a combination of
- clear() and add().
- */
- inline bool reset() { clear(); return add(); };
-
- /*
- Prepare this item for evaluation of an aggregate value. This is
- called by reset() when a group changes, or, for correlated
- subqueries, between subquery executions. E.g. for COUNT(), this
- method should set count= 0;
- */
- virtual void clear()= 0;
-
- /*
- This method is called for the next row in the same group. Its
- purpose is to aggregate the new value to the previous values in
- the group (i.e. since clear() was called last time). For example,
- for COUNT(), do count++.
- */
- virtual bool add()=0;
+ inline bool reset() { aggregator_clear(); return aggregator_add(); };
/*
Called when new group is started and results are being saved in
@@ -327,11 +416,6 @@ public:
{ return new Item_field(field); }
table_map used_tables() const { return used_tables_cache; }
void update_used_tables ();
- void cleanup()
- {
- Item::cleanup();
- forced_const= FALSE;
- }
bool is_null() { return null_value; }
void make_const ()
{
@@ -343,7 +427,9 @@ public:
virtual void print(String *str, enum_query_type query_type);
void fix_num_length_and_dec();
- /*
+ /**
+ Mark an aggregate as having no rows.
+
This function is called by the execution engine to assign 'NO ROWS
FOUND' value to an aggregate item, when the underlying result set
has no rows. Such value, in a general case, may be different from
@@ -351,10 +437,15 @@ public:
may be initialized to 0 by clear() and to NULL by
no_rows_in_result().
*/
- void no_rows_in_result() { clear(); }
-
- virtual bool setup(THD *thd) {return 0;}
- virtual void make_unique() {}
+ virtual void no_rows_in_result()
+ {
+ if (!aggr)
+ set_aggregator(with_distinct ?
+ Aggregator::DISTINCT_AGGREGATOR :
+ Aggregator::SIMPLE_AGGREGATOR);
+ reset();
+ }
+ virtual void make_unique() { force_copy_fields= TRUE; }
Item *get_tmp_table_item(THD *thd);
virtual Field *create_tmp_field(bool group, TABLE *table,
uint convert_blob_length);
@@ -365,9 +456,170 @@ public:
st_select_lex *depended_from()
{ return (nest_level == aggr_level ? 0 : aggr_sel); }
- Item *get_arg(int i) { return args[i]; }
- Item *set_arg(int i, THD *thd, Item *new_val);
+ Item *get_arg(uint i) { return args[i]; }
+ Item *set_arg(uint i, THD *thd, Item *new_val);
uint get_arg_count() { return arg_count; }
+
+ /* Initialization of distinct related members */
+ void init_aggregator()
+ {
+ aggr= NULL;
+ with_distinct= FALSE;
+ force_copy_fields= FALSE;
+ }
+
+ /**
+ Called to initialize the aggregator.
+ */
+
+ inline bool aggregator_setup(THD *thd) { return aggr->setup(thd); };
+
+ /**
+ Called to cleanup the aggregator.
+ */
+
+ inline void aggregator_clear() { aggr->clear(); }
+
+ /**
+ Called to add value to the aggregator.
+ */
+
+ inline bool aggregator_add() { return aggr->add(); };
+
+ /* stores the declared DISTINCT flag (from the parser) */
+ void set_distinct(bool distinct)
+ {
+ with_distinct= distinct;
+ quick_group= with_distinct ? 0 : 1;
+ }
+
+ /**
+ Set the type of aggregation : DISTINCT or not.
+
+ Called when the final determination is done about the aggregation
+ type and the object is about to be used.
+ */
+
+ int set_aggregator(Aggregator::Aggregator_type aggregator);
+
+ virtual void clear()= 0;
+ virtual bool add()= 0;
+ virtual bool setup(THD *thd) { return false; }
+
+ virtual void cleanup();
+};
+
+
+class Unique;
+
+
+/**
+ The distinct aggregator.
+ Implements AGGFN (DISTINCT ..)
+ Collects all the data into an Unique (similarly to what Item_sum_distinct
+ does currently) and then (if applicable) iterates over the list of
+ unique values and pumps them back into its object
+*/
+
+class Aggregator_distinct : public Aggregator
+{
+ friend class Item_sum_sum;
+
+ /*
+ flag to prevent consecutive runs of endup(). Normally in endup there are
+ expensive calculations (like walking the distinct tree for example)
+ which we must do only once if there are no data changes.
+ We can re-use the data for the second and subsequent val_xxx() calls.
+ endup_done set to TRUE also means that the calculated values for
+ the aggregate functions are correct and don't need recalculation.
+ */
+ bool endup_done;
+
+ /*
+ Used depending on the type of the aggregate function and the presence of
+ blob columns in it:
+ - For COUNT(DISTINCT) and no blob fields this points to a real temporary
+ table. It's used as a hash table.
+ - For AVG/SUM(DISTINCT) or COUNT(DISTINCT) with blob fields only the
+ in-memory data structure of a temporary table is constructed.
+ It's used by the Field classes to transform data into row format.
+ */
+ TABLE *table;
+
+ /*
+ An array of field lengths on row allocated and used only for
+ COUNT(DISTINCT) with multiple columns and no blobs. Used in
+ Aggregator_distinct::composite_key_cmp (called from Unique to compare
+ nodes
+ */
+ uint32 *field_lengths;
+
+ /*
+ Used in conjunction with 'table' to support the access to Field classes
+ for COUNT(DISTINCT). Needed by copy_fields()/copy_funcs().
+ */
+ TMP_TABLE_PARAM *tmp_table_param;
+
+ /*
+ If there are no blobs in the COUNT(DISTINCT) arguments, we can use a tree,
+ which is faster than heap table. In that case, we still use the table
+ to help get things set up, but we insert nothing in it.
+ For AVG/SUM(DISTINCT) we always use this tree (as it takes a single
+ argument) to get the distinct rows.
+ */
+ Unique *tree;
+
+ /*
+ The length of the temp table row. Must be a member of the class as it
+ gets passed down to simple_raw_key_cmp () as a compare function argument
+ to Unique. simple_raw_key_cmp () is used as a fast comparison function
+ when the entire row can be binary compared.
+ */
+ uint tree_key_length;
+
+ /*
+ Set to true if the result is known to be always NULL.
+ If set deactivates creation and usage of the temporary table (in the
+ 'table' member) and the Unique instance (in the 'tree' member) as well as
+ the calculation of the final value on the first call to
+ Item_[sum|avg|count]::val_xxx().
+ */
+ bool always_null;
+
+public:
+ Aggregator_distinct (Item_sum *sum) :
+ Aggregator(sum), table(NULL), tmp_table_param(NULL), tree(NULL),
+ always_null(FALSE) {}
+ virtual ~Aggregator_distinct ();
+ Aggregator_type Aggrtype() { return DISTINCT_AGGREGATOR; }
+
+ bool setup(THD *);
+ void clear();
+ bool add();
+ void endup();
+
+ bool unique_walk_function(void *element);
+ static int composite_key_cmp(void* arg, uchar* key1, uchar* key2);
+};
+
+
+/**
+ The pass-through aggregator.
+ Implements AGGFN (DISTINCT ..) by knowing it gets distinct data on input.
+ So it just pumps them back to the Item_sum descendant class.
+*/
+class Aggregator_simple : public Aggregator
+{
+public:
+
+ Aggregator_simple (Item_sum *sum) :
+ Aggregator(sum) {}
+ Aggregator_type Aggrtype() { return Aggregator::SIMPLE_AGGREGATOR; }
+
+ bool setup(THD * thd) { return item_sum->setup(thd); }
+ void clear() { item_sum->clear(); }
+ bool add() { return item_sum->add(); }
+ void endup() {};
};
@@ -427,9 +679,15 @@ protected:
void fix_length_and_dec();
public:
- Item_sum_sum(Item *item_par) :Item_sum_num(item_par) {}
+ Item_sum_sum(Item *item_par, bool distinct) :Item_sum_num(item_par)
+ {
+ set_distinct(distinct);
+ }
Item_sum_sum(THD *thd, Item_sum_sum *item);
- enum Sumfunctype sum_func () const {return SUM_FUNC;}
+ enum Sumfunctype sum_func () const
+ {
+ return has_with_distinct() ? SUM_DISTINCT_FUNC : SUM_FUNC;
+ }
void clear();
bool add();
double val_real();
@@ -440,109 +698,50 @@ public:
void reset_field();
void update_field();
void no_rows_in_result() {}
- const char *func_name() const { return "sum("; }
+ const char *func_name() const
+ {
+ return has_with_distinct() ? "sum(distinct " : "sum(";
+ }
Item *copy_or_same(THD* thd);
};
-
-/* Common class for SUM(DISTINCT), AVG(DISTINCT) */
-
-class Unique;
-
-class Item_sum_distinct :public Item_sum_num
+class Item_sum_count :public Item_sum_int
{
-protected:
- /* storage for the summation result */
- ulonglong count;
- Hybrid_type val;
- /* storage for unique elements */
- Unique *tree;
- TABLE *table;
- enum enum_field_types table_field_type;
- uint tree_key_length;
-protected:
- Item_sum_distinct(THD *thd, Item_sum_distinct *item);
-public:
- Item_sum_distinct(Item *item_par);
- ~Item_sum_distinct();
+ longlong count;
+
+ friend class Aggregator_distinct;
- bool setup(THD *thd);
void clear();
- void cleanup();
bool add();
- double val_real();
- my_decimal *val_decimal(my_decimal *);
- longlong val_int();
- String *val_str(String *str);
-
- /* XXX: does it need make_unique? */
-
- enum Sumfunctype sum_func () const { return SUM_DISTINCT_FUNC; }
- void reset_field() {} // not used
- void update_field() {} // not used
- virtual void no_rows_in_result() {}
- void fix_length_and_dec();
- enum Item_result result_type () const { return val.traits->type(); }
- virtual void calculate_val_and_count();
- virtual bool unique_walk_function(void *elem);
-};
-
-
-/*
- Item_sum_sum_distinct - implementation of SUM(DISTINCT expr).
- See also: MySQL manual, chapter 'Adding New Functions To MySQL'
- and comments in item_sum.cc.
-*/
-
-class Item_sum_sum_distinct :public Item_sum_distinct
-{
-private:
- Item_sum_sum_distinct(THD *thd, Item_sum_sum_distinct *item)
- :Item_sum_distinct(thd, item) {}
-public:
- Item_sum_sum_distinct(Item *item_arg) :Item_sum_distinct(item_arg) {}
-
- enum Sumfunctype sum_func () const { return SUM_DISTINCT_FUNC; }
- const char *func_name() const { return "sum(distinct "; }
- Item *copy_or_same(THD* thd) { return new Item_sum_sum_distinct(thd, this); }
-};
-
-
-/* Item_sum_avg_distinct - SELECT AVG(DISTINCT expr) FROM ... */
-
-class Item_sum_avg_distinct: public Item_sum_distinct
-{
-private:
- Item_sum_avg_distinct(THD *thd, Item_sum_avg_distinct *original)
- :Item_sum_distinct(thd, original) {}
-public:
- uint prec_increment;
- Item_sum_avg_distinct(Item *item_arg) : Item_sum_distinct(item_arg) {}
-
- void fix_length_and_dec();
- virtual void calculate_val_and_count();
- enum Sumfunctype sum_func () const { return AVG_DISTINCT_FUNC; }
- const char *func_name() const { return "avg(distinct "; }
- Item *copy_or_same(THD* thd) { return new Item_sum_avg_distinct(thd, this); }
-};
-
-
-class Item_sum_count :public Item_sum_int
-{
- longlong count;
+ void cleanup();
public:
Item_sum_count(Item *item_par)
:Item_sum_int(item_par),count(0)
{}
+
+ /**
+ Constructs an instance for COUNT(DISTINCT)
+
+ @param list a list of the arguments to the aggregate function
+
+ This constructor is called by the parser only for COUNT (DISTINCT).
+ */
+
+ Item_sum_count(List<Item> &list)
+ :Item_sum_int(list),count(0)
+ {
+ set_distinct(TRUE);
+ }
Item_sum_count(THD *thd, Item_sum_count *item)
:Item_sum_int(thd, item), count(item->count)
{}
- enum Sumfunctype sum_func () const { return COUNT_FUNC; }
- void clear();
+ enum Sumfunctype sum_func () const
+ {
+ return has_with_distinct() ? COUNT_DISTINCT_FUNC : COUNT_FUNC;
+ }
void no_rows_in_result() { count=0; }
- bool add();
void make_const(longlong count_arg)
{
count=count_arg;
@@ -550,76 +749,12 @@ class Item_sum_count :public Item_sum_int
}
longlong val_int();
void reset_field();
- void cleanup();
void update_field();
- const char *func_name() const { return "count("; }
- Item *copy_or_same(THD* thd);
-};
-
-
-class TMP_TABLE_PARAM;
-
-class Item_sum_count_distinct :public Item_sum_int
-{
- TABLE *table;
- uint32 *field_lengths;
- TMP_TABLE_PARAM *tmp_table_param;
- bool force_copy_fields;
- /*
- If there are no blobs, we can use a tree, which
- is faster than heap table. In that case, we still use the table
- to help get things set up, but we insert nothing in it
- */
- Unique *tree;
- /*
- Storage for the value of count between calls to val_int() so val_int()
- will not recalculate on each call. Validitiy of the value is stored in
- is_evaluated.
- */
- longlong count;
- /*
- Following is 0 normal object and pointer to original one for copy
- (to correctly free resources)
- */
- Item_sum_count_distinct *original;
- uint tree_key_length;
-
-
- bool always_null; // Set to 1 if the result is always NULL
-
-
- friend int composite_key_cmp(void* arg, uchar* key1, uchar* key2);
- friend int simple_str_key_cmp(void* arg, uchar* key1, uchar* key2);
-
-public:
- Item_sum_count_distinct(List<Item> &list)
- :Item_sum_int(list), table(0), field_lengths(0), tmp_table_param(0),
- force_copy_fields(0), tree(0), count(0),
- original(0), always_null(FALSE)
- { quick_group= 0; }
- Item_sum_count_distinct(THD *thd, Item_sum_count_distinct *item)
- :Item_sum_int(thd, item), table(item->table),
- field_lengths(item->field_lengths),
- tmp_table_param(item->tmp_table_param),
- force_copy_fields(0), tree(item->tree), count(item->count),
- original(item), tree_key_length(item->tree_key_length),
- always_null(item->always_null)
- {}
- ~Item_sum_count_distinct();
-
- void cleanup();
-
- enum Sumfunctype sum_func () const { return COUNT_DISTINCT_FUNC; }
- void clear();
- bool add();
- longlong val_int();
- void reset_field() { return ;} // Never called
- void update_field() { return ; } // Never called
- const char *func_name() const { return "count(distinct "; }
- bool setup(THD *thd);
- void make_unique();
+ const char *func_name() const
+ {
+ return has_with_distinct() ? "count(distinct " : "count(";
+ }
Item *copy_or_same(THD* thd);
- void no_rows_in_result() {}
};
@@ -659,13 +794,18 @@ public:
uint prec_increment;
uint f_precision, f_scale, dec_bin_size;
- Item_sum_avg(Item *item_par) :Item_sum_sum(item_par), count(0) {}
+ Item_sum_avg(Item *item_par, bool distinct)
+ :Item_sum_sum(item_par, distinct), count(0)
+ {}
Item_sum_avg(THD *thd, Item_sum_avg *item)
:Item_sum_sum(thd, item), count(item->count),
prec_increment(item->prec_increment) {}
void fix_length_and_dec();
- enum Sumfunctype sum_func () const {return AVG_FUNC;}
+ enum Sumfunctype sum_func () const
+ {
+ return has_with_distinct() ? AVG_DISTINCT_FUNC : AVG_FUNC;
+ }
void clear();
bool add();
double val_real();
@@ -678,7 +818,10 @@ public:
Item *result_item(Field *field)
{ return new Item_avg_field(hybrid_type, this); }
void no_rows_in_result() {}
- const char *func_name() const { return "avg("; }
+ const char *func_name() const
+ {
+ return has_with_distinct() ? "avg(distinct " : "avg(";
+ }
Item *copy_or_same(THD* thd);
Field *create_tmp_field(bool group, TABLE *table, uint convert_blob_length);
void cleanup()
@@ -1172,12 +1315,9 @@ public:
#endif /* HAVE_DLOPEN */
-class MYSQL_ERROR;
-
class Item_func_group_concat : public Item_sum
{
TMP_TABLE_PARAM *tmp_table_param;
- MYSQL_ERROR *warning;
String result;
String *separator;
TREE tree_base;
@@ -1198,7 +1338,7 @@ class Item_func_group_concat : public Item_sum
uint arg_count_order;
/** The number of selected items, aka the expr list. */
uint arg_count_field;
- uint count_cut_values;
+ uint row_count;
bool distinct;
bool warning_for_row;
bool always_null;
@@ -1270,3 +1410,5 @@ public:
virtual bool change_context_processor(uchar *cntx)
{ context= (Name_resolution_context *)cntx; return FALSE; }
};
+
+#endif /* ITEM_SUM_INCLUDED */
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index b293145cc27..8208b0b930d 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -597,7 +597,7 @@ err:
{
char buff[128];
strmake(buff, val_begin, min(length, sizeof(buff)-1));
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WRONG_VALUE_FOR_TYPE, ER(ER_WRONG_VALUE_FOR_TYPE),
date_time_type, buff, "str_to_date");
}
@@ -936,6 +936,48 @@ longlong Item_func_to_days::val_int()
}
+longlong Item_func_to_seconds::val_int_endpoint(bool left_endp,
+ bool *incl_endp)
+{
+ DBUG_ASSERT(fixed == 1);
+ MYSQL_TIME ltime;
+ longlong seconds;
+ longlong days;
+ int dummy; /* unused */
+ if (get_arg0_date(&ltime, TIME_FUZZY_DATE))
+ {
+ /* got NULL, leave the incl_endp intact */
+ return LONGLONG_MIN;
+ }
+ seconds= ltime.hour * 3600L + ltime.minute * 60 + ltime.second;
+ seconds= ltime.neg ? -seconds : seconds;
+ days= (longlong) calc_daynr(ltime.year, ltime.month, ltime.day);
+ seconds+= days * 24L * 3600L;
+ /* Set to NULL if invalid date, but keep the value */
+ null_value= check_date(&ltime,
+ (ltime.year || ltime.month || ltime.day),
+ (TIME_NO_ZERO_IN_DATE | TIME_NO_ZERO_DATE),
+ &dummy);
+ /*
+ Even if the evaluation return NULL, seconds is useful for pruning
+ */
+ return seconds;
+}
+
+longlong Item_func_to_seconds::val_int()
+{
+ DBUG_ASSERT(fixed == 1);
+ MYSQL_TIME ltime;
+ longlong seconds;
+ longlong days;
+ if (get_arg0_date(&ltime, TIME_NO_ZERO_DATE))
+ return 0;
+ seconds= ltime.hour * 3600L + ltime.minute * 60 + ltime.second;
+ seconds=ltime.neg ? -seconds : seconds;
+ days= (longlong) calc_daynr(ltime.year, ltime.month, ltime.day);
+ return seconds + days * 24L * 3600L;
+}
+
/*
Get information about this Item tree monotonicity
@@ -962,6 +1004,17 @@ enum_monotonicity_info Item_func_to_days::get_monotonicity_info() const
return NON_MONOTONIC;
}
+enum_monotonicity_info Item_func_to_seconds::get_monotonicity_info() const
+{
+ if (args[0]->type() == Item::FIELD_ITEM)
+ {
+ if (args[0]->field_type() == MYSQL_TYPE_DATE ||
+ args[0]->field_type() == MYSQL_TYPE_DATETIME)
+ return MONOTONIC_STRICT_INCREASING_NOT_NULL;
+ }
+ return NON_MONOTONIC;
+}
+
longlong Item_func_to_days::val_int_endpoint(bool left_endp, bool *incl_endp)
{
@@ -2493,10 +2546,11 @@ String *Item_char_typecast::val_str(String *str)
str_value= *res; // Not malloced string
res= &str_value;
}
+ ErrConvString err(res);
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE), char_type,
- res->c_ptr_safe());
+ err.ptr());
res->length((uint) length);
}
else if (cast_cs == &my_charset_bin && res->length() < (uint) cast_length)
diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h
index 9e3c2e8c89f..cdd74c8c601 100644
--- a/sql/item_timefunc.h
+++ b/sql/item_timefunc.h
@@ -1,3 +1,6 @@
+#ifndef ITEM_TIMEFUNC_INCLUDED
+#define ITEM_TIMEFUNC_INCLUDED
+
/* Copyright (C) 2000-2006 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -73,6 +76,24 @@ public:
};
+class Item_func_to_seconds :public Item_int_func
+{
+public:
+ Item_func_to_seconds(Item *a) :Item_int_func(a) {}
+ longlong val_int();
+ const char *func_name() const { return "to_seconds"; }
+ void fix_length_and_dec()
+ {
+ decimals=0;
+ max_length=6*MY_CHARSET_BIN_MB_MAXLEN;
+ maybe_null=1;
+ }
+ enum_monotonicity_info get_monotonicity_info() const;
+ longlong val_int_endpoint(bool left_endp, bool *incl_endp);
+ bool check_partition_func_processor(uchar *bool_arg) { return FALSE;}
+};
+
+
class Item_func_dayofmonth :public Item_int_func
{
public:
@@ -871,6 +892,7 @@ public:
{
decimals=0;
max_length=MAX_DATE_WIDTH*MY_CHARSET_BIN_MB_MAXLEN;
+ maybe_null= 1;
}
longlong val_int();
};
@@ -1025,4 +1047,11 @@ public:
Item_func_last_day(Item *a) :Item_date(a) {}
const char *func_name() const { return "last_day"; }
bool get_date(MYSQL_TIME *res, uint fuzzy_date);
+ void fix_length_and_dec()
+ {
+ Item_date::fix_length_and_dec();
+ maybe_null= 1;
+ }
};
+
+#endif /* ITEM_TIMEFUNC_INCLUDED */
diff --git a/sql/item_xmlfunc.h b/sql/item_xmlfunc.h
index dadbb5ccf42..6373bde0aab 100644
--- a/sql/item_xmlfunc.h
+++ b/sql/item_xmlfunc.h
@@ -1,3 +1,6 @@
+#ifndef ITEM_XMLFUNC_INCLUDED
+#define ITEM_XMLFUNC_INCLUDED
+
/* Copyright (C) 2000-2005 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -61,3 +64,4 @@ public:
String *val_str(String *);
};
+#endif /* ITEM_XMLFUNC_INCLUDED */
diff --git a/sql/key.cc b/sql/key.cc
index 5b2ae8029dd..93197ec4ff5 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -349,10 +349,29 @@ void key_unpack(String *to,TABLE *table,uint idx)
}
if ((field=key_part->field))
{
+ CHARSET_INFO *cs= field->charset();
field->val_str(&tmp);
+ if (cs->mbmaxlen > 1 &&
+ table->field[key_part->fieldnr - 1]->field_length !=
+ key_part->length)
+ {
+ /*
+ Prefix key, multi-byte charset.
+ For the columns of type CHAR(N), the above val_str()
+ call will return exactly "key_part->length" bytes,
+ which can break a multi-byte characters in the middle.
+ Align, returning not more than "char_length" characters.
+ */
+ uint charpos, char_length= key_part->length / cs->mbmaxlen;
+ if ((charpos= my_charpos(cs, tmp.ptr(),
+ tmp.ptr() + tmp.length(),
+ char_length)) < tmp.length())
+ tmp.length(charpos);
+ }
if (key_part->length < field->pack_length())
tmp.length(min(tmp.length(),key_part->length));
- to->append(tmp);
+ ErrConvString err(&tmp);
+ to->append(err.ptr());
}
else
to->append(STRING_WITH_LEN("???"));
diff --git a/sql/lex.h b/sql/lex.h
index 0a85824f6f7..cd0c042159f 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -1,3 +1,6 @@
+#ifndef LEX_INCLUDED
+#define LEX_INCLUDED
+
/* Copyright (C) 2000-2002 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -96,6 +99,7 @@ static SYMBOL symbols[] = {
{ "CASCADE", SYM(CASCADE)},
{ "CASCADED", SYM(CASCADED)},
{ "CASE", SYM(CASE_SYM)},
+ { "CATALOG_NAME", SYM(CATALOG_NAME_SYM)},
{ "CHAIN", SYM(CHAIN_SYM)},
{ "CHANGE", SYM(CHANGE)},
{ "CHANGED", SYM(CHANGED)},
@@ -105,6 +109,7 @@ static SYMBOL symbols[] = {
{ "CHECK", SYM(CHECK_SYM)},
{ "CHECKSUM", SYM(CHECKSUM_SYM)},
{ "CIPHER", SYM(CIPHER_SYM)},
+ { "CLASS_ORIGIN", SYM(CLASS_ORIGIN_SYM)},
{ "CLIENT", SYM(CLIENT_SYM)},
{ "CLOSE", SYM(CLOSE_SYM)},
{ "COALESCE", SYM(COALESCE)},
@@ -112,6 +117,7 @@ static SYMBOL symbols[] = {
{ "COLLATE", SYM(COLLATE_SYM)},
{ "COLLATION", SYM(COLLATION_SYM)},
{ "COLUMN", SYM(COLUMN_SYM)},
+ { "COLUMN_NAME", SYM(COLUMN_NAME_SYM)},
{ "COLUMNS", SYM(COLUMNS)},
{ "COMMENT", SYM(COMMENT_SYM)},
{ "COMMIT", SYM(COMMIT_SYM)},
@@ -124,6 +130,9 @@ static SYMBOL symbols[] = {
{ "CONNECTION", SYM(CONNECTION_SYM)},
{ "CONSISTENT", SYM(CONSISTENT_SYM)},
{ "CONSTRAINT", SYM(CONSTRAINT)},
+ { "CONSTRAINT_CATALOG", SYM(CONSTRAINT_CATALOG_SYM)},
+ { "CONSTRAINT_NAME", SYM(CONSTRAINT_NAME_SYM)},
+ { "CONSTRAINT_SCHEMA", SYM(CONSTRAINT_SCHEMA_SYM)},
{ "CONTAINS", SYM(CONTAINS_SYM)},
{ "CONTEXT", SYM(CONTEXT_SYM)},
{ "CONTINUE", SYM(CONTINUE_SYM)},
@@ -138,6 +147,7 @@ static SYMBOL symbols[] = {
{ "CURRENT_TIMESTAMP", SYM(NOW_SYM)},
{ "CURRENT_USER", SYM(CURRENT_USER)},
{ "CURSOR", SYM(CURSOR_SYM)},
+ { "CURSOR_NAME", SYM(CURSOR_NAME_SYM)},
{ "DATA", SYM(DATA_SYM)},
{ "DATABASE", SYM(DATABASE)},
{ "DATABASES", SYM(DATABASES)},
@@ -241,6 +251,7 @@ static SYMBOL symbols[] = {
{ "IDENTIFIED", SYM(IDENTIFIED_SYM)},
{ "IF", SYM(IF)},
{ "IGNORE", SYM(IGNORE_SYM)},
+ { "IGNORE_SERVER_IDS", SYM(IGNORE_SERVER_IDS_SYM)},
{ "IMPORT", SYM(IMPORT)},
{ "IN", SYM(IN_SYM)},
{ "INDEX", SYM(INDEX_SYM)},
@@ -320,6 +331,7 @@ static SYMBOL symbols[] = {
{ "MASTER_SSL_KEY", SYM(MASTER_SSL_KEY_SYM)},
{ "MASTER_SSL_VERIFY_SERVER_CERT", SYM(MASTER_SSL_VERIFY_SERVER_CERT_SYM)},
{ "MASTER_USER", SYM(MASTER_USER_SYM)},
+ { "MASTER_HEARTBEAT_PERIOD", SYM(MASTER_HEARTBEAT_PERIOD_SYM)},
{ "MATCH", SYM(MATCH)},
{ "MAX_CONNECTIONS_PER_HOUR", SYM(MAX_CONNECTIONS_PER_HOUR)},
{ "MAX_QUERIES_PER_HOUR", SYM(MAX_QUERIES_PER_HOUR)},
@@ -334,6 +346,7 @@ static SYMBOL symbols[] = {
{ "MEDIUMTEXT", SYM(MEDIUMTEXT)},
{ "MEMORY", SYM(MEMORY_SYM)},
{ "MERGE", SYM(MERGE_SYM)},
+ { "MESSAGE_TEXT", SYM(MESSAGE_TEXT_SYM)},
{ "MICROSECOND", SYM(MICROSECOND_SYM)},
{ "MIDDLEINT", SYM(MEDIUMINT)}, /* For powerbuilder */
{ "MIGRATE", SYM(MIGRATE_SYM)},
@@ -350,6 +363,7 @@ static SYMBOL symbols[] = {
{ "MULTIPOINT", SYM(MULTIPOINT)},
{ "MULTIPOLYGON", SYM(MULTIPOLYGON)},
{ "MUTEX", SYM(MUTEX_SYM)},
+ { "MYSQL_ERRNO", SYM(MYSQL_ERRNO_SYM)},
{ "NAME", SYM(NAME_SYM)},
{ "NAMES", SYM(NAMES_SYM)},
{ "NATIONAL", SYM(NATIONAL_SYM)},
@@ -426,6 +440,7 @@ static SYMBOL symbols[] = {
{ "REDUNDANT", SYM(REDUNDANT_SYM)},
{ "REFERENCES", SYM(REFERENCES)},
{ "REGEXP", SYM(REGEXP)},
+ { "RELAYLOG", SYM(RELAYLOG_SYM)},
{ "RELAY_LOG_FILE", SYM(RELAY_LOG_FILE_SYM)},
{ "RELAY_LOG_POS", SYM(RELAY_LOG_POS_SYM)},
{ "RELAY_THREAD", SYM(RELAY_THREAD)},
@@ -441,6 +456,7 @@ static SYMBOL symbols[] = {
{ "REPEAT", SYM(REPEAT_SYM)},
{ "REQUIRE", SYM(REQUIRE_SYM)},
{ "RESET", SYM(RESET_SYM)},
+ { "RESIGNAL", SYM(RESIGNAL_SYM)},
{ "RESTORE", SYM(RESTORE_SYM)},
{ "RESTRICT", SYM(RESTRICT)},
{ "RESUME", SYM(RESUME_SYM)},
@@ -459,6 +475,7 @@ static SYMBOL symbols[] = {
{ "SAVEPOINT", SYM(SAVEPOINT_SYM)},
{ "SCHEDULE", SYM(SCHEDULE_SYM)},
{ "SCHEMA", SYM(DATABASE)},
+ { "SCHEMA_NAME", SYM(SCHEMA_NAME_SYM)},
{ "SCHEMAS", SYM(DATABASES)},
{ "SECOND", SYM(SECOND_SYM)},
{ "SECOND_MICROSECOND", SYM(SECOND_MICROSECOND_SYM)},
@@ -474,6 +491,7 @@ static SYMBOL symbols[] = {
{ "SHARE", SYM(SHARE_SYM)},
{ "SHOW", SYM(SHOW)},
{ "SHUTDOWN", SYM(SHUTDOWN)},
+ { "SIGNAL", SYM(SIGNAL_SYM)},
{ "SIGNED", SYM(SIGNED_SYM)},
{ "SIMPLE", SYM(SIMPLE_SYM)},
{ "SLAVE", SYM(SLAVE)},
@@ -515,6 +533,7 @@ static SYMBOL symbols[] = {
{ "STORAGE", SYM(STORAGE_SYM)},
{ "STRAIGHT_JOIN", SYM(STRAIGHT_JOIN)},
{ "STRING", SYM(STRING_SYM)},
+ { "SUBCLASS_ORIGIN", SYM(SUBCLASS_ORIGIN_SYM)},
{ "SUBJECT", SYM(SUBJECT_SYM)},
{ "SUBPARTITION", SYM(SUBPARTITION_SYM)},
{ "SUBPARTITIONS", SYM(SUBPARTITIONS_SYM)},
@@ -523,6 +542,7 @@ static SYMBOL symbols[] = {
{ "SWAPS", SYM(SWAPS_SYM)},
{ "SWITCHES", SYM(SWITCHES_SYM)},
{ "TABLE", SYM(TABLE_SYM)},
+ { "TABLE_NAME", SYM(TABLE_NAME_SYM)},
{ "TABLES", SYM(TABLES)},
{ "TABLESPACE", SYM(TABLESPACE)},
{ "TABLE_CHECKSUM", SYM(TABLE_CHECKSUM_SYM)},
@@ -593,6 +613,7 @@ static SYMBOL symbols[] = {
{ "X509", SYM(X509_SYM)},
{ "XOR", SYM(XOR)},
{ "XA", SYM(XA_SYM)},
+ { "XML", SYM(XML_SYM)}, /* LOAD XML Arnold/Erik */
{ "YEAR", SYM(YEAR_SYM)},
{ "YEAR_MONTH", SYM(YEAR_MONTH_SYM)},
{ "ZEROFILL", SYM(ZEROFILL)},
@@ -634,3 +655,5 @@ static SYMBOL sql_functions[] = {
{ "VAR_POP", SYM(VARIANCE_SYM)},
{ "VAR_SAMP", SYM(VAR_SAMP_SYM)},
};
+
+#endif /* LEX_INCLUDED */
diff --git a/sql/lock.cc b/sql/lock.cc
index 93d8b868688..c0cda1dbf03 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -1029,11 +1029,11 @@ int lock_table_name(THD *thd, TABLE_LIST *table_list, bool check_in_use)
if (check_in_use)
{
/* Only insert the table if we haven't insert it already */
- for (table=(TABLE*) hash_first(&open_cache, (uchar*)key,
- key_length, &state);
+ for (table=(TABLE*) my_hash_first(&open_cache, (uchar*)key,
+ key_length, &state);
table ;
- table = (TABLE*) hash_next(&open_cache,(uchar*) key,
- key_length, &state))
+ table = (TABLE*) my_hash_next(&open_cache,(uchar*) key,
+ key_length, &state))
{
if (table->in_use == thd)
{
@@ -1060,7 +1060,7 @@ void unlock_table_name(THD *thd, TABLE_LIST *table_list)
{
if (table_list->table)
{
- hash_delete(&open_cache, (uchar*) table_list->table);
+ my_hash_delete(&open_cache, (uchar*) table_list->table);
broadcast_refresh();
}
}
@@ -1235,11 +1235,11 @@ is_table_name_exclusively_locked_by_this_thread(THD *thd, uchar *key,
HASH_SEARCH_STATE state;
TABLE *table;
- for (table= (TABLE*) hash_first(&open_cache, key,
- key_length, &state);
+ for (table= (TABLE*) my_hash_first(&open_cache, key,
+ key_length, &state);
table ;
- table= (TABLE*) hash_next(&open_cache, key,
- key_length, &state))
+ table= (TABLE*) my_hash_next(&open_cache, key,
+ key_length, &state))
{
if (table->in_use == thd &&
table->open_placeholder == 1 &&
diff --git a/sql/log.cc b/sql/log.cc
index b4c9e5eb4cc..64a4def103c 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -33,11 +33,12 @@
#include <stdarg.h>
#include <m_ctype.h> // For test_if_number
-#ifdef __NT__
+#ifdef _WIN32
#include "message.h"
#endif
#include <mysql/plugin.h>
+#include "rpl_handler.h"
/* max size of the log message */
#define MAX_LOG_BUFFER_SIZE 1024
@@ -49,8 +50,7 @@
LOGGER logger;
-MYSQL_BIN_LOG mysql_bin_log;
-ulong sync_binlog_counter= 0;
+MYSQL_BIN_LOG mysql_bin_log(&sync_binlog_period);
static bool test_if_number(const char *str,
long *res, bool allow_wildcards);
@@ -80,23 +80,28 @@ public:
virtual ~Silence_log_table_errors() {}
- virtual bool handle_error(uint sql_errno, const char *message,
- MYSQL_ERROR::enum_warning_level level,
- THD *thd);
+ virtual bool handle_condition(THD *thd,
+ uint sql_errno,
+ const char* sql_state,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl);
const char *message() const { return m_message; }
};
bool
-Silence_log_table_errors::handle_error(uint /* sql_errno */,
- const char *message_arg,
- MYSQL_ERROR::enum_warning_level /* level */,
- THD * /* thd */)
-{
- strmake(m_message, message_arg, sizeof(m_message)-1);
+Silence_log_table_errors::handle_condition(THD *,
+ uint,
+ const char*,
+ MYSQL_ERROR::enum_warning_level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl)
+{
+ *cond_hdl= NULL;
+ strmake(m_message, msg, sizeof(m_message)-1);
return TRUE;
}
-
sql_print_message_func sql_print_message_handlers[3] =
{
sql_print_information,
@@ -105,9 +110,16 @@ sql_print_message_func sql_print_message_handlers[3] =
};
+/**
+ Create the name of the default general log file
+
+ @param[IN] buff Location for building new string.
+ @param[IN] log_ext The extension for the file (e.g .log)
+ @returns Pointer to a new string containing the name
+*/
char *make_default_log_name(char *buff,const char* log_ext)
{
- strmake(buff, pidfile_name, FN_REFLEN-5);
+ strmake(buff, default_logfile_name, FN_REFLEN-5);
return fn_format(buff, buff, mysql_data_home, log_ext,
MYF(MY_UNPACK_FILENAME|MY_REPLACE_EXT));
}
@@ -964,6 +976,7 @@ bool LOGGER::slow_log_print(THD *thd, const char *query, uint query_length,
uint user_host_len= 0;
ulonglong query_utime, lock_utime;
+ DBUG_ASSERT(thd->enable_slow_log);
/*
Print the message to the buffer if we have slow log enabled
*/
@@ -1670,7 +1683,7 @@ bool MYSQL_BIN_LOG::check_write_error(THD *thd)
if (!thd->is_error())
DBUG_RETURN(checked);
- switch (thd->main_da.sql_errno())
+ switch (thd->stmt_da->sql_errno())
{
case ER_TRANS_CACHE_FULL:
case ER_ERROR_ON_WRITE:
@@ -1798,7 +1811,7 @@ err:
DBUG_RETURN(-1);
}
-#ifdef __NT__
+#ifdef _WIN32
static int eventSource = 0;
static void setup_windows_event_source()
@@ -1833,7 +1846,7 @@ static void setup_windows_event_source()
RegCloseKey(hRegKey);
}
-#endif /* __NT__ */
+#endif /* _WIN32 */
/**
@@ -1964,7 +1977,7 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg,
#ifdef EMBEDDED_LIBRARY
"embedded library\n",
my_progname, server_version, MYSQL_COMPILATION_COMMENT
-#elif __NT__
+#elif _WIN32
"started with:\nTCP Port: %d, Named Pipe: %s\n",
my_progname, server_version, MYSQL_COMPILATION_COMMENT,
mysqld_port, mysqld_unix_port
@@ -2415,10 +2428,11 @@ const char *MYSQL_LOG::generate_name(const char *log_name,
-MYSQL_BIN_LOG::MYSQL_BIN_LOG()
+MYSQL_BIN_LOG::MYSQL_BIN_LOG(uint *sync_period)
:bytes_written(0), prepared_xids(0), file_id(1), open_count(1),
need_start_event(TRUE), m_table_map_version(0),
- is_relay_log(0),
+ sync_period_ptr(sync_period),
+ is_relay_log(0), signal_cnt(0),
description_event_for_exec(0), description_event_for_queue(0)
{
/*
@@ -2926,7 +2940,7 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd)
}
else
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_BINLOG_PURGE_FATAL_ERR,
"a problem with deleting %s; "
"consider examining correspondence "
@@ -2957,7 +2971,7 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd)
}
else
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_BINLOG_PURGE_FATAL_ERR,
"a problem with deleting %s; "
"consider examining correspondence "
@@ -3288,7 +3302,7 @@ int MYSQL_BIN_LOG::purge_logs(const char *to_log,
*/
if (thd)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_BINLOG_PURGE_FATAL_ERR,
"a problem with getting info on being purged %s; "
"consider examining correspondence "
@@ -3334,7 +3348,7 @@ int MYSQL_BIN_LOG::purge_logs(const char *to_log,
{
if (thd)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_BINLOG_PURGE_FATAL_ERR,
"a problem with deleting %s; "
"consider examining correspondence "
@@ -3433,7 +3447,7 @@ int MYSQL_BIN_LOG::purge_logs_before_date(time_t purge_time)
*/
if (thd)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_BINLOG_PURGE_FATAL_ERR,
"a problem with getting info on being purged %s; "
"consider examining correspondence "
@@ -3650,6 +3664,8 @@ bool MYSQL_BIN_LOG::append(Log_event* ev)
}
bytes_written+= ev->data_written;
DBUG_PRINT("info",("max_size: %lu",max_size));
+ if (flush_and_sync(0))
+ goto err;
if ((uint) my_b_append_tell(&log_file) > max_size)
new_file_without_locking();
@@ -3680,6 +3696,8 @@ bool MYSQL_BIN_LOG::appendv(const char* buf, uint len,...)
bytes_written += len;
} while ((buf=va_arg(args,const char*)) && (len=va_arg(args,uint)));
DBUG_PRINT("info",("max_size: %lu",max_size));
+ if (flush_and_sync(0))
+ goto err;
if ((uint) my_b_append_tell(&log_file) > max_size)
new_file_without_locking();
@@ -3689,17 +3707,21 @@ err:
DBUG_RETURN(error);
}
-
-bool MYSQL_BIN_LOG::flush_and_sync()
+bool MYSQL_BIN_LOG::flush_and_sync(bool *synced)
{
int err=0, fd=log_file.file;
+ if (synced)
+ *synced= 0;
safe_mutex_assert_owner(&LOCK_log);
if (flush_io_cache(&log_file))
return 1;
- if (++sync_binlog_counter >= sync_binlog_period && sync_binlog_period)
+ uint sync_period= get_sync_period();
+ if (sync_period && ++sync_counter >= sync_period)
{
- sync_binlog_counter= 0;
+ sync_counter= 0;
err=my_sync(fd, MYF(MY_WME));
+ if (synced)
+ *synced= 1;
}
return err;
}
@@ -3990,7 +4012,7 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd,
if (file == &log_file)
{
- error= flush_and_sync();
+ error= flush_and_sync(0);
if (!error)
{
signal_update();
@@ -4176,8 +4198,16 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
if (file == &log_file) // we are writing to the real log (disk)
{
- if (flush_and_sync())
+ bool synced= 0;
+ if (flush_and_sync(&synced))
goto err;
+
+ if (RUN_HOOK(binlog_storage, after_flush,
+ (thd, log_file_name, file->pos_in_file, synced))) {
+ sql_print_error("Failed to run 'after_flush' hooks");
+ goto err;
+ }
+
signal_update();
rotate_and_purge(RP_LOCK_LOG_IS_ALREADY_LOCKED);
}
@@ -4432,7 +4462,7 @@ int MYSQL_BIN_LOG::write_cache(IO_CACHE *cache, bool lock_log, bool sync_log)
DBUG_ASSERT(carry == 0);
if (sync_log)
- flush_and_sync();
+ return flush_and_sync(0);
return 0; // All OK
}
@@ -4446,9 +4476,9 @@ int query_error_code(THD *thd, bool not_killed)
if (not_killed)
{
- error= thd->is_error() ? thd->main_da.sql_errno() : 0;
+ error= thd->is_error() ? thd->stmt_da->sql_errno() : 0;
- /* thd->main_da.sql_errno() might be ER_SERVER_SHUTDOWN or
+ /* thd->stmt_da->sql_errno() might be ER_SERVER_SHUTDOWN or
ER_QUERY_INTERRUPTED, So here we need to make sure that error
is not set to these errors when specified not_killed by the
caller.
@@ -4479,7 +4509,7 @@ bool MYSQL_BIN_LOG::write_incident(THD *thd, bool lock)
ev.write(&log_file);
if (lock)
{
- if (!error && !(error= flush_and_sync()))
+ if (!error && !(error= flush_and_sync(0)))
{
signal_update();
rotate_and_purge(RP_LOCK_LOG_IS_ALREADY_LOCKED);
@@ -4567,7 +4597,8 @@ bool MYSQL_BIN_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event,
if (incident && write_incident(thd, FALSE))
goto err;
- if (flush_and_sync())
+ bool synced= 0;
+ if (flush_and_sync(&synced))
goto err;
DBUG_EXECUTE_IF("half_binlogged_transaction", abort(););
if (cache->error) // Error on read
@@ -4576,6 +4607,15 @@ bool MYSQL_BIN_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event,
write_error=1; // Don't give more errors
goto err;
}
+
+ if (RUN_HOOK(binlog_storage, after_flush,
+ (thd, log_file_name, log_file.pos_in_file, synced)))
+ {
+ sql_print_error("Failed to run 'after_flush' hooks");
+ write_error=1;
+ goto err;
+ }
+
signal_update();
}
@@ -4612,12 +4652,9 @@ err:
/**
- Wait until we get a signal that the binary log has been updated.
+ Wait until we get a signal that the relay log has been updated.
@param thd Thread variable
- @param is_slave If 0, the caller is the Binlog_dump thread from master;
- if 1, the caller is the SQL thread from the slave. This
- influences only thd->proc_info.
@note
One must have a lock on LOCK_log before calling this function.
@@ -4625,22 +4662,53 @@ err:
THD::enter_cond() (see NOTES in sql_class.h).
*/
-void MYSQL_BIN_LOG::wait_for_update(THD* thd, bool is_slave)
+void MYSQL_BIN_LOG::wait_for_update_relay_log(THD* thd)
{
const char *old_msg;
- DBUG_ENTER("wait_for_update");
+ DBUG_ENTER("wait_for_update_relay_log");
old_msg= thd->enter_cond(&update_cond, &LOCK_log,
- is_slave ?
- "Has read all relay log; waiting for the slave I/O "
- "thread to update it" :
- "Has sent all binlog to slave; waiting for binlog "
- "to be updated");
+ "Slave has read all relay log; "
+ "waiting for the slave I/O "
+ "thread to update it" );
pthread_cond_wait(&update_cond, &LOCK_log);
thd->exit_cond(old_msg);
DBUG_VOID_RETURN;
}
+/**
+ Wait until we get a signal that the binary log has been updated.
+ Applies to master only.
+
+ NOTES
+ @param[in] thd a THD struct
+ @param[in] timeout a pointer to a timespec;
+ NULL means to wait w/o timeout.
+ @retval 0 if got signalled on update
+ @retval non-0 if wait timeout elapsed
+ @note
+ LOCK_log must be taken before calling this function.
+ LOCK_log is being released while the thread is waiting.
+ LOCK_log is released by the caller.
+*/
+
+int MYSQL_BIN_LOG::wait_for_update_bin_log(THD* thd,
+ const struct timespec *timeout)
+{
+ int ret= 0;
+ const char* old_msg = thd->proc_info;
+ DBUG_ENTER("wait_for_update_bin_log");
+ old_msg= thd->enter_cond(&update_cond, &LOCK_log,
+ "Master has sent all binlog to slave; "
+ "waiting for binlog to be updated");
+ if (!timeout)
+ pthread_cond_wait(&update_cond, &LOCK_log);
+ else
+ ret= pthread_cond_timedwait(&update_cond, &LOCK_log,
+ const_cast<struct timespec *>(timeout));
+ DBUG_RETURN(ret);
+}
+
/**
Close the log file.
@@ -4853,11 +4921,12 @@ bool flush_error_log()
void MYSQL_BIN_LOG::signal_update()
{
DBUG_ENTER("MYSQL_BIN_LOG::signal_update");
+ signal_cnt++;
pthread_cond_broadcast(&update_cond);
DBUG_VOID_RETURN;
}
-#ifdef __NT__
+#ifdef _WIN32
static void print_buffer_to_nt_eventlog(enum loglevel level, char *buff,
size_t length, size_t buffLen)
{
@@ -4890,7 +4959,7 @@ static void print_buffer_to_nt_eventlog(enum loglevel level, char *buff,
DBUG_VOID_RETURN;
}
-#endif /* __NT__ */
+#endif /* _WIN32 */
/**
@@ -4952,7 +5021,7 @@ int vprint_msg_to_log(enum loglevel level, const char *format, va_list args)
length= my_vsnprintf(buff, sizeof(buff), format, args);
print_buffer_to_file(level, buff);
-#ifdef __NT__
+#ifdef _WIN32
print_buffer_to_nt_eventlog(level, buff, length, sizeof(buff));
#endif
@@ -5432,8 +5501,8 @@ int TC_LOG_MMAP::recover()
goto err1;
}
- if (hash_init(&xids, &my_charset_bin, tc_log_page_size/3, 0,
- sizeof(my_xid), 0, 0, MYF(0)))
+ if (my_hash_init(&xids, &my_charset_bin, tc_log_page_size/3, 0,
+ sizeof(my_xid), 0, 0, MYF(0)))
goto err1;
for ( ; p < end_p ; p++)
@@ -5446,12 +5515,12 @@ int TC_LOG_MMAP::recover()
if (ha_recover(&xids))
goto err2;
- hash_free(&xids);
+ my_hash_free(&xids);
bzero(data, (size_t)file_length);
return 0;
err2:
- hash_free(&xids);
+ my_hash_free(&xids);
err1:
sql_print_error("Crash recovery failed. Either correct the problem "
"(if it's, for example, out of memory error) and restart, "
@@ -5635,8 +5704,8 @@ int TC_LOG_BINLOG::recover(IO_CACHE *log, Format_description_log_event *fdle)
MEM_ROOT mem_root;
if (! fdle->is_valid() ||
- hash_init(&xids, &my_charset_bin, TC_LOG_PAGE_SIZE/3, 0,
- sizeof(my_xid), 0, 0, MYF(0)))
+ my_hash_init(&xids, &my_charset_bin, TC_LOG_PAGE_SIZE/3, 0,
+ sizeof(my_xid), 0, 0, MYF(0)))
goto err1;
init_alloc_root(&mem_root, TC_LOG_PAGE_SIZE, TC_LOG_PAGE_SIZE);
@@ -5660,12 +5729,12 @@ int TC_LOG_BINLOG::recover(IO_CACHE *log, Format_description_log_event *fdle)
goto err2;
free_root(&mem_root, MYF(0));
- hash_free(&xids);
+ my_hash_free(&xids);
return 0;
err2:
free_root(&mem_root, MYF(0));
- hash_free(&xids);
+ my_hash_free(&xids);
err1:
sql_print_error("Crash recovery failed. Either correct the problem "
"(if it's, for example, out of memory error) and restart, "
diff --git a/sql/log.h b/sql/log.h
index d306d6f7182..a31be6dcce6 100644
--- a/sql/log.h
+++ b/sql/log.h
@@ -269,6 +269,18 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
ulonglong m_table_map_version;
+ /* pointer to the sync period variable, for binlog this will be
+ sync_binlog_period, for relay log this will be
+ sync_relay_log_period
+ */
+ uint *sync_period_ptr;
+ uint sync_counter;
+
+ inline uint get_sync_period()
+ {
+ return *sync_period_ptr;
+ }
+
int write_to_file(IO_CACHE *cache);
/*
This is used to start writing to a new log file. The difference from
@@ -284,7 +296,7 @@ public:
/* This is relay log */
bool is_relay_log;
-
+ ulong signal_cnt; // update of the counter is checked by heartbeat
/*
These describe the log's format. This is used only for relay logs.
_for_exec is used by the SQL thread, _for_queue by the I/O thread. It's
@@ -296,7 +308,7 @@ public:
Format_description_log_event *description_event_for_exec,
*description_event_for_queue;
- MYSQL_BIN_LOG();
+ MYSQL_BIN_LOG(uint *sync_period);
/*
note that there's no destructor ~MYSQL_BIN_LOG() !
The reason is that we don't want it to be automatically called
@@ -339,7 +351,8 @@ public:
}
void set_max_size(ulong max_size_arg);
void signal_update();
- void wait_for_update(THD* thd, bool master_or_slave);
+ void wait_for_update_relay_log(THD* thd);
+ int wait_for_update_bin_log(THD* thd, const struct timespec * timeout);
void set_need_start_event() { need_start_event = 1; }
void init(bool no_auto_events_arg, ulong max_size);
void init_pthread_objects();
@@ -378,7 +391,20 @@ public:
bool is_active(const char* log_file_name);
int update_log_index(LOG_INFO* linfo, bool need_update_threads);
void rotate_and_purge(uint flags);
- bool flush_and_sync();
+ /**
+ Flush binlog cache and synchronize to disk.
+
+ This function flushes events in binlog cache to binary log file,
+ it will do synchronizing according to the setting of system
+ variable 'sync_binlog'. If file is synchronized, @c synced will
+ be set to 1, otherwise 0.
+
+ @param[out] synced if not NULL, set to 1 if file is synchronized, otherwise 0
+
+ @retval 0 Success
+ @retval other Failure
+ */
+ bool flush_and_sync(bool *synced);
int purge_logs(const char *to_log, bool included,
bool need_mutex, bool need_update_threads,
ulonglong *decrease_log_space);
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 5e49f7b3312..3a54717a45f 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -134,7 +134,7 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error,
char buff[MAX_SLAVE_ERRMSG], *slider;
const char *buff_end= buff + sizeof(buff);
uint len;
- List_iterator_fast<MYSQL_ERROR> it(thd->warn_list);
+ List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list());
MYSQL_ERROR *err;
buff[0]= 0;
@@ -142,10 +142,11 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error,
slider += len, err= it++)
{
len= my_snprintf(slider, buff_end - slider,
- " %s, Error_code: %d;", err->msg, err->code);
+ " %s, Error_code: %d;", err->get_message_text(),
+ err->get_sql_errno());
}
- rli->report(level, thd->is_error()? thd->main_da.sql_errno() : 0,
+ rli->report(level, thd->is_error()? thd->stmt_da->sql_errno() : 0,
"Could not execute %s event on table %s.%s;"
"%s handler error %s; "
"the event's master log %s, end_log_pos %lu",
@@ -353,13 +354,13 @@ inline int ignored_error_code(int err_code)
*/
int convert_handler_error(int error, THD* thd, TABLE *table)
{
- uint actual_error= (thd->is_error() ? thd->main_da.sql_errno() :
+ uint actual_error= (thd->is_error() ? thd->stmt_da->sql_errno() :
0);
if (actual_error == 0)
{
table->file->print_error(error, MYF(0));
- actual_error= (thd->is_error() ? thd->main_da.sql_errno() :
+ actual_error= (thd->is_error() ? thd->stmt_da->sql_errno() :
ER_UNKNOWN_ERROR);
if (actual_error == ER_UNKNOWN_ERROR)
if (global_system_variables.log_warnings)
@@ -1199,14 +1200,14 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
*/
if (description_event->event_type_permutation)
{
- IF_DBUG({
- int new_event_type=
- description_event->event_type_permutation[event_type];
- DBUG_PRINT("info",
- ("converting event type %d to %d (%s)",
- event_type, new_event_type,
- get_type_str((Log_event_type)new_event_type)));
- });
+#ifndef DBUG_OFF
+ int new_event_type=
+ description_event->event_type_permutation[event_type];
+ DBUG_PRINT("info",
+ ("converting event type %d to %d (%s)",
+ event_type, new_event_type,
+ get_type_str((Log_event_type)new_event_type)));
+#endif
event_type= description_event->event_type_permutation[event_type];
}
@@ -2393,13 +2394,29 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg,
charset_database_number= thd_arg->variables.collation_database->number;
/*
- If we don't use flags2 for anything else than options contained in
- thd_arg->options, it would be more efficient to flags2=thd_arg->options
- (OPTIONS_WRITTEN_TO_BIN_LOG would be used only at reading time).
- But it's likely that we don't want to use 32 bits for 3 bits; in the future
- we will probably want to reclaim the 29 bits. So we need the &.
+ We only replicate over the bits of flags2 that we need: the rest
+ are masked out by "& OPTIONS_WRITTEN_TO_BINLOG".
+
+ We also force AUTOCOMMIT=1. Rationale (cf. BUG#29288): After
+ fixing BUG#26395, we always write BEGIN and COMMIT around all
+ transactions (even single statements in autocommit mode). This is
+ so that replication from non-transactional to transactional table
+ and error recovery from XA to non-XA table should work as
+ expected. The BEGIN/COMMIT are added in log.cc. However, there is
+ one exception: MyISAM bypasses log.cc and writes directly to the
+ binlog. So if autocommit is off, master has MyISAM, and slave has
+ a transactional engine, then the slave will just see one long
+ never-ending transaction. The only way to bypass explicit
+ BEGIN/COMMIT in the binlog is by using a non-transactional table.
+ So setting AUTOCOMMIT=1 will make this work as expected.
+
+ Note: explicitly replicate AUTOCOMMIT=1 from master. We do not
+ assume AUTOCOMMIT=1 on slave; the slave still reads the state of
+ the autocommit flag as written by the master to the binlog. This
+ behavior may change after WL#4162 has been implemented.
*/
- flags2= (uint32) (thd_arg->options & OPTIONS_WRITTEN_TO_BIN_LOG);
+ flags2= (uint32) (thd_arg->options &
+ (OPTIONS_WRITTEN_TO_BIN_LOG & ~OPTION_NOT_AUTOCOMMIT));
DBUG_ASSERT(thd_arg->variables.character_set_client->number < 256*256);
DBUG_ASSERT(thd_arg->variables.collation_connection->number < 256*256);
DBUG_ASSERT(thd_arg->variables.collation_server->number < 256*256);
@@ -3166,7 +3183,7 @@ START SLAVE; . Query: '%s'", expected_error, thd->query());
}
/* If the query was not ignored, it is printed to the general log */
- if (!thd->is_error() || thd->main_da.sql_errno() != ER_SLAVE_IGNORED_TABLE)
+ if (!thd->is_error() || thd->stmt_da->sql_errno() != ER_SLAVE_IGNORED_TABLE)
general_log_write(thd, COM_QUERY, thd->query(), thd->query_length());
compare_errors:
@@ -3175,7 +3192,7 @@ compare_errors:
If we expected a non-zero error code, and we don't get the same error
code, and it should be ignored or is related to a concurrency issue.
*/
- actual_error= thd->is_error() ? thd->main_da.sql_errno() : 0;
+ actual_error= thd->is_error() ? thd->stmt_da->sql_errno() : 0;
DBUG_PRINT("info",("expected_error: %d sql_errno: %d",
expected_error, actual_error));
if ((expected_error && expected_error != actual_error &&
@@ -3190,7 +3207,7 @@ Error on master: '%s' (%d), Error on slave: '%s' (%d). \
Default database: '%s'. Query: '%s'",
ER_SAFE(expected_error),
expected_error,
- actual_error ? thd->main_da.message() : "no error",
+ actual_error ? thd->stmt_da->message() : "no error",
actual_error,
print_slave_db_safe(db), query_arg);
thd->is_slave_error= 1;
@@ -3231,7 +3248,7 @@ Default database: '%s'. Query: '%s'",
{
rli->report(ERROR_LEVEL, actual_error,
"Error '%s' on query. Default database: '%s'. Query: '%s'",
- (actual_error ? thd->main_da.message() :
+ (actual_error ? thd->stmt_da->message() :
"unexpected success or fatal error"),
print_slave_db_safe(thd->db), query_arg);
thd->is_slave_error= 1;
@@ -3260,6 +3277,21 @@ Default database: '%s'. Query: '%s'",
*/
} /* End of if (db_ok(... */
+ {/**
+ The following failure injecion works in cooperation with tests
+ setting @@global.debug= 'd,stop_slave_middle_group'.
+ The sql thread receives the killed status and will proceed
+ to shutdown trying to finish incomplete events group.
+ */
+ DBUG_EXECUTE_IF("stop_slave_middle_group",
+ if (strcmp("COMMIT", query) != 0 &&
+ strcmp("BEGIN", query) != 0)
+ {
+ if (thd->transaction.all.modified_non_trans_table)
+ const_cast<Relay_log_info*>(rli)->abort_slave= 1;
+ };);
+ }
+
end:
/*
Probably we have set thd->query, thd->db, thd->catalog to point to places
@@ -3577,10 +3609,12 @@ Format_description_log_event(uint8 binlog_ver, const char* server_ver)
*/
if (post_header_len)
{
+#ifndef DBUG_OFF
// Allows us to sanity-check that all events initialized their
// events (see the end of this 'if' block).
- IF_DBUG(memset(post_header_len, 255,
- number_of_event_types*sizeof(uint8)););
+ memset(post_header_len, 255,
+ number_of_event_types*sizeof(uint8));
+#endif
/* Note: all event types must explicitly fill in their lengths here. */
post_header_len[START_EVENT_V3-1]= START_V3_HEADER_LEN;
@@ -3631,13 +3665,12 @@ Format_description_log_event(uint8 binlog_ver, const char* server_ver)
post_header_len[UPDATE_ROWS_EVENT-1]=
post_header_len[DELETE_ROWS_EVENT-1]= 6;);
post_header_len[INCIDENT_EVENT-1]= INCIDENT_HEADER_LEN;
+ post_header_len[HEARTBEAT_LOG_EVENT-1]= 0;
// Sanity-check that all post header lengths are initialized.
- IF_DBUG({
- int i;
- for (i=0; i<number_of_event_types; i++)
- assert(post_header_len[i] != 255);
- });
+ int i;
+ for (i=0; i<number_of_event_types; i++)
+ DBUG_ASSERT(post_header_len[i] != 255);
}
break;
@@ -4551,13 +4584,7 @@ int Load_log_event::do_apply_event(NET* net, Relay_log_info const *rli,
VOID(pthread_mutex_lock(&LOCK_thread_count));
thd->query_id = next_query_id();
VOID(pthread_mutex_unlock(&LOCK_thread_count));
- /*
- Initing thd->row_count is not necessary in theory as this variable has no
- influence in the case of the slave SQL thread (it is used to generate a
- "data truncated" warning but which is absorbed and never gets to the
- error log); still we init it to avoid a Valgrind message.
- */
- mysql_reset_errors(thd, 0);
+ thd->warning_info->opt_clear_warning_info(thd->query_id);
TABLE_LIST tables;
bzero((char*) &tables,sizeof(tables));
@@ -4717,8 +4744,8 @@ error:
int sql_errno;
if (thd->is_error())
{
- err= thd->main_da.message();
- sql_errno= thd->main_da.sql_errno();
+ err= thd->stmt_da->message();
+ sql_errno= thd->stmt_da->sql_errno();
}
else
{
@@ -7281,7 +7308,7 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli)
if (simple_open_n_lock_tables(thd, rli->tables_to_lock))
{
- uint actual_error= thd->main_da.sql_errno();
+ uint actual_error= thd->stmt_da->sql_errno();
if (thd->is_slave_error || thd->is_fatal_error)
{
/*
@@ -7292,7 +7319,7 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli)
*/
rli->report(ERROR_LEVEL, actual_error,
"Error '%s' on opening tables",
- (actual_error ? thd->main_da.message() :
+ (actual_error ? thd->stmt_da->message() :
"unexpected success or fatal error"));
thd->is_slave_error= 1;
}
@@ -7474,8 +7501,16 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli)
thd->transaction.stmt.modified_non_trans_table= TRUE;
} // row processing loop
- DBUG_EXECUTE_IF("STOP_SLAVE_after_first_Rows_event",
- const_cast<Relay_log_info*>(rli)->abort_slave= 1;);
+ {/**
+ The following failure injecion works in cooperation with tests
+ setting @@global.debug= 'd,stop_slave_middle_group'.
+ The sql thread receives the killed status and will proceed
+ to shutdown trying to finish incomplete events group.
+ */
+ DBUG_EXECUTE_IF("stop_slave_middle_group",
+ if (thd->transaction.all.modified_non_trans_table)
+ const_cast<Relay_log_info*>(rli)->abort_slave= 1;);
+ }
if ((error= do_after_row_operations(rli, error)) &&
ignored_error_code(convert_handler_error(error, thd, table)))
@@ -7513,32 +7548,6 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli)
thd->is_slave_error= 1;
DBUG_RETURN(error);
}
- /*
- This code would ideally be placed in do_update_pos() instead, but
- since we have no access to table there, we do the setting of
- last_event_start_time here instead.
- */
- else if (table && (table->s->primary_key == MAX_KEY) &&
- !cache_stmt && get_flags(STMT_END_F) == RLE_NO_FLAGS)
- {
- /*
- ------------ Temporary fix until WL#2975 is implemented ---------
-
- This event is not the last one (no STMT_END_F). If we stop now
- (in case of terminate_slave_thread()), how will we restart? We
- have to restart from Table_map_log_event, but as this table is
- not transactional, the rows already inserted will still be
- present, and idempotency is not guaranteed (no PK) so we risk
- that repeating leads to double insert. So we desperately try to
- continue, hope we'll eventually leave this buggy situation (by
- executing the final Rows_log_event). If we are in a hopeless
- wait (reached end of last relay log and nothing gets appended
- there), we timeout after one minute, and notify DBA about the
- problem. When WL#2975 is implemented, just remove the member
- Relay_log_info::last_event_start_time and all its occurrences.
- */
- const_cast<Relay_log_info*>(rli)->last_event_start_time= my_time(0);
- }
if (get_flags(STMT_END_F))
if ((error= rows_event_stmt_cleanup(rli, thd)))
@@ -9441,3 +9450,16 @@ st_print_event_info::st_print_event_info()
open_cached_file(&body_cache, NULL, NULL, 0, flags);
}
#endif
+
+
+#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
+Heartbeat_log_event::Heartbeat_log_event(const char* buf, uint event_len,
+ const Format_description_log_event* description_event)
+ :Log_event(buf, description_event)
+{
+ uint8 header_size= description_event->common_header_len;
+ ident_len = event_len - header_size;
+ set_if_smaller(ident_len,FN_REFLEN-1);
+ log_ident= buf + header_size;
+}
+#endif
diff --git a/sql/log_event.h b/sql/log_event.h
index 0b4c63a73af..cd5e659c910 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -250,6 +250,7 @@ struct sql_ex_info
#define EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN (4 + 4 + 4 + 1)
#define EXECUTE_LOAD_QUERY_HEADER_LEN (QUERY_HEADER_LEN + EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN)
#define INCIDENT_HEADER_LEN 2
+#define HEARTBEAT_HEADER_LEN 0
/*
Max number of possible extra bytes in a replication event compared to a
packet (i.e. a query) sent from client to master;
@@ -575,6 +576,12 @@ enum Log_event_type
INCIDENT_EVENT= 26,
/*
+ Heartbeat event to be send by master at its idle time
+ to ensure master's online status to slave
+ */
+ HEARTBEAT_LOG_EVENT= 27,
+
+ /*
Add new events here - right above this comment!
Existing events (except ENUM_END_EVENT) should never change their numbers
*/
@@ -689,6 +696,20 @@ typedef struct st_print_event_info
} PRINT_EVENT_INFO;
#endif
+/**
+ the struct aggregates two paramenters that identify an event
+ uniquely in scope of communication of a particular master and slave couple.
+ I.e there can not be 2 events from the same staying connected master which
+ have the same coordinates.
+ @note
+ Such identifier is not yet unique generally as the event originating master
+ is resetable. Also the crashed master can be replaced with some other.
+*/
+struct event_coordinates
+{
+ char * file_name; // binlog file name (directories stripped)
+ my_off_t pos; // event's position in the binlog file
+};
/**
@class Log_event
@@ -3920,6 +3941,42 @@ static inline bool copy_event_cache_to_file_and_reinit(IO_CACHE *cache,
reinit_io_cache(cache, WRITE_CACHE, 0, FALSE, TRUE);
}
+#ifndef MYSQL_CLIENT
+/*****************************************************************************
+
+ Heartbeat Log Event class
+
+ Replication event to ensure to slave that master is alive.
+ The event is originated by master's dump thread and sent straight to
+ slave without being logged. Slave itself does not store it in relay log
+ but rather uses a data for immediate checks and throws away the event.
+
+ Two members of the class log_ident and Log_event::log_pos comprise
+ @see the event_coordinates instance. The coordinates that a heartbeat
+ instance carries correspond to the last event master has sent from
+ its binlog.
+
+ ****************************************************************************/
+class Heartbeat_log_event: public Log_event
+{
+public:
+ Heartbeat_log_event(const char* buf, uint event_len,
+ const Format_description_log_event* description_event);
+ Log_event_type get_type_code() { return HEARTBEAT_LOG_EVENT; }
+ bool is_valid() const
+ {
+ return (log_ident != NULL &&
+ log_pos >= BIN_LOG_HEADER_SIZE);
+ }
+ const char * get_log_ident() { return log_ident; }
+ uint get_ident_len() { return ident_len; }
+
+private:
+ const char* log_ident;
+ uint ident_len;
+};
+#endif
+
/**
@} (end of group Replication)
*/
diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc
index 357bc78b1cd..fbcbb388236 100644
--- a/sql/log_event_old.cc
+++ b/sql/log_event_old.cc
@@ -78,7 +78,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
if (simple_open_n_lock_tables(thd, rli->tables_to_lock))
{
- uint actual_error= thd->main_da.sql_errno();
+ uint actual_error= thd->stmt_da->sql_errno();
if (thd->is_slave_error || thd->is_fatal_error)
{
/*
@@ -87,7 +87,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
*/
rli->report(ERROR_LEVEL, actual_error,
"Error '%s' on opening tables",
- (actual_error ? thd->main_da.message() :
+ (actual_error ? thd->stmt_da->message() :
"unexpected success or fatal error"));
thd->is_slave_error= 1;
}
@@ -216,17 +216,17 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
break;
default:
- rli->report(ERROR_LEVEL, thd->main_da.sql_errno(),
+ rli->report(ERROR_LEVEL, thd->stmt_da->sql_errno(),
"Error in %s event: row application failed. %s",
ev->get_type_str(),
- thd->is_error() ? thd->main_da.message() : "");
+ thd->is_error() ? thd->stmt_da->message() : "");
thd->is_slave_error= 1;
break;
}
row_start= row_end;
}
- DBUG_EXECUTE_IF("STOP_SLAVE_after_first_Rows_event",
+ DBUG_EXECUTE_IF("stop_slave_middle_group",
const_cast<Relay_log_info*>(rli)->abort_slave= 1;);
error= do_after_row_operations(table, error);
if (!ev->cache_stmt)
@@ -245,12 +245,12 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
if (error)
{ /* error has occured during the transaction */
- rli->report(ERROR_LEVEL, thd->main_da.sql_errno(),
+ rli->report(ERROR_LEVEL, thd->stmt_da->sql_errno(),
"Error in %s event: error during transaction execution "
"on table %s.%s. %s",
ev->get_type_str(), table->s->db.str,
table->s->table_name.str,
- thd->is_error() ? thd->main_da.message() : "");
+ thd->is_error() ? thd->stmt_da->message() : "");
/*
If one day we honour --skip-slave-errors in row-based replication, and
@@ -269,34 +269,6 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
DBUG_RETURN(error);
}
- /*
- This code would ideally be placed in do_update_pos() instead, but
- since we have no access to table there, we do the setting of
- last_event_start_time here instead.
- */
- if (table && (table->s->primary_key == MAX_KEY) &&
- !ev->cache_stmt &&
- ev->get_flags(Old_rows_log_event::STMT_END_F) == Old_rows_log_event::RLE_NO_FLAGS)
- {
- /*
- ------------ Temporary fix until WL#2975 is implemented ---------
-
- This event is not the last one (no STMT_END_F). If we stop now
- (in case of terminate_slave_thread()), how will we restart? We
- have to restart from Table_map_log_event, but as this table is
- not transactional, the rows already inserted will still be
- present, and idempotency is not guaranteed (no PK) so we risk
- that repeating leads to double insert. So we desperately try to
- continue, hope we'll eventually leave this buggy situation (by
- executing the final Old_rows_log_event). If we are in a hopeless
- wait (reached end of last relay log and nothing gets appended
- there), we timeout after one minute, and notify DBA about the
- problem. When WL#2975 is implemented, just remove the member
- st_relay_log_info::last_event_start_time and all its occurences.
- */
- const_cast<Relay_log_info*>(rli)->last_event_start_time= my_time(0);
- }
-
DBUG_RETURN(0);
}
#endif
@@ -1744,7 +1716,7 @@ int Old_rows_log_event::do_apply_event(Relay_log_info const *rli)
} // row processing loop
- DBUG_EXECUTE_IF("STOP_SLAVE_after_first_Rows_event",
+ DBUG_EXECUTE_IF("stop_slave_middle_group",
const_cast<Relay_log_info*>(rli)->abort_slave= 1;);
error= do_after_row_operations(rli, error);
if (!cache_stmt)
diff --git a/sql/message.h b/sql/message.h
index 0e7c282d5a1..97d039352b4 100644
--- a/sql/message.h
+++ b/sql/message.h
@@ -1,3 +1,6 @@
+#ifndef MESSAGE_INCLUDED
+#define MESSAGE_INCLUDED
+
/*
To change or add messages mysqld writes to the Windows error log, run
mc.exe message.mc
@@ -6,6 +9,8 @@
mc.exe can be installed with Windows SDK, some Visual Studio distributions
do not include it.
*/
+
+
//
// Values are 32 bit values layed out as follows:
//
@@ -53,3 +58,5 @@
//
#define MSG_DEFAULT 0xC0000064L
+#endif /* MESSAGE_INCLUDED */
+
diff --git a/sql/my_decimal.cc b/sql/my_decimal.cc
index 208ddefb890..16d07526a0f 100644
--- a/sql/my_decimal.cc
+++ b/sql/my_decimal.cc
@@ -41,17 +41,17 @@ int decimal_operation_results(int result)
"", (long)-1);
break;
case E_DEC_OVERFLOW:
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE),
"DECIMAL", "");
break;
case E_DEC_DIV_ZERO:
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_DIVISION_BY_ZERO, ER(ER_DIVISION_BY_ZERO));
break;
case E_DEC_BAD_NUM:
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
"decimal", "", "", (long)-1);
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index e4effd12af7..b539e004481 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -28,6 +28,16 @@
#ifndef MYSQL_CLIENT
+/*
+ the following #define adds server-only members to enum_mysql_show_type,
+ that is defined in mysql/plugin.h
+ it has to be before mysql/plugin.h is included.
+*/
+#define SHOW_always_last SHOW_KEY_CACHE_LONG, \
+ SHOW_KEY_CACHE_LONGLONG, SHOW_LONG_STATUS, SHOW_DOUBLE_STATUS, \
+ SHOW_HAVE, SHOW_MY_BOOL, SHOW_HA_ROWS, SHOW_SYS, \
+ SHOW_LONG_NOFLUSH, SHOW_LONGLONG_STATUS
+
#include <my_global.h>
#include <mysql_version.h>
#include <mysql_embed.h>
@@ -128,6 +138,10 @@ extern MYSQL_PLUGIN_IMPORT CHARSET_INFO *files_charset_info ;
extern MYSQL_PLUGIN_IMPORT CHARSET_INFO *national_charset_info;
extern MYSQL_PLUGIN_IMPORT CHARSET_INFO *table_alias_charset;
+/**
+ Character set of the buildin error messages loaded from errmsg.sys.
+*/
+extern CHARSET_INFO *error_message_charset_info;
enum Derivation
{
@@ -140,6 +154,14 @@ enum Derivation
};
+typedef struct my_locale_errmsgs
+{
+ const char *language;
+ const char **errmsgs;
+} MY_LOCALE_ERRMSGS;
+
+extern char err_shared_dir[];
+
typedef struct my_locale_st
{
uint number;
@@ -152,29 +174,42 @@ typedef struct my_locale_st
TYPELIB *ab_day_names;
uint max_month_name_length;
uint max_day_name_length;
+ uint decimal_point;
+ uint thousand_sep;
+ const char *grouping;
+ MY_LOCALE_ERRMSGS *errmsgs;
#ifdef __cplusplus
my_locale_st(uint number_par,
const char *name_par, const char *descr_par, bool is_ascii_par,
TYPELIB *month_names_par, TYPELIB *ab_month_names_par,
TYPELIB *day_names_par, TYPELIB *ab_day_names_par,
- uint max_month_name_length_par, uint max_day_name_length_par) :
+ uint max_month_name_length_par, uint max_day_name_length_par,
+ uint decimal_point_par, uint thousand_sep_par,
+ const char *grouping_par, MY_LOCALE_ERRMSGS *errmsgs_par) :
number(number_par),
name(name_par), description(descr_par), is_ascii(is_ascii_par),
month_names(month_names_par), ab_month_names(ab_month_names_par),
day_names(day_names_par), ab_day_names(ab_day_names_par),
max_month_name_length(max_month_name_length_par),
- max_day_name_length(max_day_name_length_par)
+ max_day_name_length(max_day_name_length_par),
+ decimal_point(decimal_point_par),
+ thousand_sep(thousand_sep_par),
+ grouping(grouping_par),
+ errmsgs(errmsgs_par)
{}
#endif
} MY_LOCALE;
extern MY_LOCALE my_locale_en_US;
extern MY_LOCALE *my_locales[];
+extern MY_LOCALE *my_default_lc_messages;
extern MY_LOCALE *my_default_lc_time_names;
MY_LOCALE *my_locale_by_name(const char *name);
MY_LOCALE *my_locale_by_number(uint number);
+void cleanup_errmsgs(void);
+
/*************************************************************************/
/**
@@ -270,7 +305,7 @@ protected:
#define TABLE_OPEN_CACHE_DEFAULT 400
#define TABLE_DEF_CACHE_DEFAULT 400
/**
- We must have room for at least 256 table definitions in the table
+ We must have room for at least 400 table definitions in the table
cache, since otherwise there is no chance prepared
statements that use these many tables can work.
Prepared statements use table definition cache ids (table_map_id)
@@ -637,7 +672,6 @@ enum enum_parsing_place
IN_ON
};
-struct st_table;
#define thd_proc_info(thd, msg) set_thd_proc_info(thd, msg, __func__, __FILE__, __LINE__)
class THD;
@@ -875,6 +909,7 @@ bool general_log_write(THD *thd, enum enum_server_command command,
#include "tztime.h"
#ifdef MYSQL_SERVER
#include "sql_servers.h"
+#include "records.h"
#include "opt_range.h"
#ifdef HAVE_QUERY_CACHE
@@ -882,7 +917,7 @@ struct Query_cache_query_flags
{
unsigned int client_long_flag:1;
unsigned int client_protocol_41:1;
- unsigned int result_in_binary_protocol:1;
+ unsigned int protocol_type:2;
unsigned int more_results_exists:1;
unsigned int in_trans:1;
unsigned int autocommit:1;
@@ -901,6 +936,8 @@ struct Query_cache_query_flags
};
#define QUERY_CACHE_FLAGS_SIZE sizeof(Query_cache_query_flags)
#include "sql_cache.h"
+#define query_cache_abort(A) query_cache.abort(A)
+#define query_cache_end_of_result(A) query_cache.end_of_result(A)
#define query_cache_store_query(A, B) query_cache.store_query(A, B)
#define query_cache_destroy() query_cache.destroy()
#define query_cache_result_size_limit(A) query_cache.result_size_limit(A)
@@ -938,100 +975,6 @@ struct Query_cache_query_flags
#define query_cache_is_cacheable_query(L) 0
#endif /*HAVE_QUERY_CACHE*/
-/*
- Error injector Macros to enable easy testing of recovery after failures
- in various error cases.
-*/
-#ifndef ERROR_INJECT_SUPPORT
-
-#define ERROR_INJECT(x) 0
-#define ERROR_INJECT_ACTION(x,action) 0
-#define ERROR_INJECT_CRASH(x) 0
-#define ERROR_INJECT_VALUE(x) 0
-#define ERROR_INJECT_VALUE_ACTION(x,action) 0
-#define ERROR_INJECT_VALUE_CRASH(x) 0
-#define SET_ERROR_INJECT_VALUE(x)
-
-#else
-
-inline bool check_and_unset_keyword(const char *dbug_str)
-{
- const char *extra_str= "-d,";
- char total_str[200];
- if (_db_strict_keyword_ (dbug_str))
- {
- strxmov(total_str, extra_str, dbug_str, NullS);
- DBUG_SET(total_str);
- return 1;
- }
- return 0;
-}
-
-
-inline bool
-check_and_unset_inject_value(int value)
-{
- THD *thd= current_thd;
- if (thd->error_inject_value == (uint)value)
- {
- thd->error_inject_value= 0;
- return 1;
- }
- return 0;
-}
-
-/*
- ERROR INJECT MODULE:
- --------------------
- These macros are used to insert macros from the application code.
- The event that activates those error injections can be activated
- from SQL by using:
- SET SESSION dbug=+d,code;
-
- After the error has been injected, the macros will automatically
- remove the debug code, thus similar to using:
- SET SESSION dbug=-d,code
- from SQL.
-
- ERROR_INJECT_CRASH will inject a crash of the MySQL Server if code
- is set when macro is called. ERROR_INJECT_CRASH can be used in
- if-statements, it will always return FALSE unless of course it
- crashes in which case it doesn't return at all.
-
- ERROR_INJECT_ACTION will inject the action specified in the action
- parameter of the macro, before performing the action the code will
- be removed such that no more events occur. ERROR_INJECT_ACTION
- can also be used in if-statements and always returns FALSE.
- ERROR_INJECT can be used in a normal if-statement, where the action
- part is performed in the if-block. The macro returns TRUE if the
- error was activated and otherwise returns FALSE. If activated the
- code is removed.
-
- Sometimes it is necessary to perform error inject actions as a serie
- of events. In this case one can use one variable on the THD object.
- Thus one sets this value by using e.g. SET_ERROR_INJECT_VALUE(100).
- Then one can later test for it by using ERROR_INJECT_CRASH_VALUE,
- ERROR_INJECT_ACTION_VALUE and ERROR_INJECT_VALUE. This have the same
- behaviour as the above described macros except that they use the
- error inject value instead of a code used by DBUG macros.
-*/
-#define SET_ERROR_INJECT_VALUE(x) \
- current_thd->error_inject_value= (x)
-#define ERROR_INJECT_CRASH(code) \
- DBUG_EVALUATE_IF(code, (abort(), 0), 0)
-#define ERROR_INJECT_ACTION(code, action) \
- (check_and_unset_keyword(code) ? ((action), 0) : 0)
-#define ERROR_INJECT(code) \
- check_and_unset_keyword(code)
-#define ERROR_INJECT_VALUE(value) \
- check_and_unset_inject_value(value)
-#define ERROR_INJECT_VALUE_ACTION(value,action) \
- (check_and_unset_inject_value(value) ? (action) : 0)
-#define ERROR_INJECT_VALUE_CRASH(value) \
- ERROR_INJECT_VALUE_ACTION(value, (abort(), 0))
-
-#endif
-
void write_bin_log(THD *thd, bool clear_error,
char const *query, ulong query_length);
@@ -1116,9 +1059,11 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
bool *write_to_binlog);
#ifndef NO_EMBEDDED_ACCESS_CHECKS
bool check_access(THD *thd, ulong access, const char *db, ulong *save_priv,
- bool no_grant, bool no_errors, bool schema_db);
-bool check_table_access(THD *thd, ulong want_access, TABLE_LIST *tables,
- uint number, bool no_errors);
+ bool no_grant, bool no_errors, bool schema_db);
+bool check_table_access(THD *thd, ulong requirements,TABLE_LIST *tables,
+ bool any_combination_of_privileges_will_do,
+ uint number,
+ bool no_errors);
#else
inline bool check_access(THD *thd, ulong access, const char *db,
ulong *save_priv, bool no_grant, bool no_errors,
@@ -1128,8 +1073,10 @@ inline bool check_access(THD *thd, ulong access, const char *db,
*save_priv= GLOBAL_ACLS;
return false;
}
-inline bool check_table_access(THD *thd, ulong want_access, TABLE_LIST *tables,
- uint number, bool no_errors)
+inline bool check_table_access(THD *thd, ulong requirements,TABLE_LIST *tables,
+ bool no_errors,
+ bool any_combination_of_privileges_will_do,
+ uint number)
{ return false; }
#endif /*NO_EMBEDDED_ACCESS_CHECKS*/
@@ -1222,6 +1169,8 @@ int prepare_create_field(Create_field *sql_field,
uint *blob_columns,
int *timestamps, int *timestamps_with_niladic,
longlong table_flags);
+CHARSET_INFO* get_sql_field_charset(Create_field *sql_field,
+ HA_CREATE_INFO *create_info);
bool mysql_create_table(THD *thd,const char *db, const char *table_name,
HA_CREATE_INFO *create_info,
Alter_info *alter_info,
@@ -1393,7 +1342,6 @@ void remove_status_vars(SHOW_VAR *list);
void init_status_vars();
void free_status_vars();
void reset_status_vars();
-
/* information schema */
extern LEX_STRING INFORMATION_SCHEMA_NAME;
/* log tables */
@@ -1420,19 +1368,6 @@ enum enum_schema_tables get_schema_table_idx(ST_SCHEMA_TABLE *schema_table);
#define is_schema_db(X) \
!my_strcasecmp(system_charset_info, INFORMATION_SCHEMA_NAME.str, (X))
-/* sql_prepare.cc */
-
-void mysqld_stmt_prepare(THD *thd, const char *packet, uint packet_length);
-void mysqld_stmt_execute(THD *thd, char *packet, uint packet_length);
-void mysqld_stmt_close(THD *thd, char *packet);
-void mysql_sql_stmt_prepare(THD *thd);
-void mysql_sql_stmt_execute(THD *thd);
-void mysql_sql_stmt_close(THD *thd);
-void mysqld_stmt_fetch(THD *thd, char *packet, uint packet_length);
-void mysqld_stmt_reset(THD *thd, char *packet);
-void mysql_stmt_get_longdata(THD *thd, char *pos, ulong packet_length);
-void reinit_stmt_before_use(THD *thd, LEX *lex);
-
/* sql_handler.cc */
bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen);
bool mysql_ha_close(THD *thd, TABLE_LIST *tables);
@@ -1611,6 +1546,11 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
handlerton *old_db_type,
bool *partition_changed,
uint *fast_alter_partition);
+char *generate_partition_syntax(partition_info *part_info,
+ uint *buf_length, bool use_sql_alloc,
+ bool show_partition_options,
+ HA_CREATE_INFO *create_info,
+ Alter_info *alter_info);
#endif
/* bits for last argument to remove_table_from_cache() */
@@ -1829,6 +1769,8 @@ extern "C" int key_rec_cmp(void *key_info, uchar *a, uchar *b);
bool init_errmessage(void);
#endif /* MYSQL_SERVER */
void sql_perror(const char *message);
+bool read_texts(const char *file_name, const char *language,
+ const char ***point, uint error_messages);
bool fn_format_relative_to_data_home(char * to, const char *name,
const char *dir, const char *extension);
@@ -1921,7 +1863,7 @@ extern Gt_creator gt_creator;
extern Lt_creator lt_creator;
extern Ge_creator ge_creator;
extern Le_creator le_creator;
-extern char language[FN_REFLEN];
+extern char lc_messages_dir[FN_REFLEN];
#endif /* MYSQL_SERVER */
#if defined MYSQL_SERVER || defined INNODB_COMPATIBILITY_HOOKS
extern MYSQL_PLUGIN_IMPORT char reg_ext[FN_EXTLEN];
@@ -1930,6 +1872,7 @@ extern MYSQL_PLUGIN_IMPORT uint reg_ext_length;
#ifdef MYSQL_SERVER
extern char glob_hostname[FN_REFLEN], mysql_home[FN_REFLEN];
extern char pidfile_name[FN_REFLEN], system_time_zone[30], *opt_init_file;
+extern char default_logfile_name[FN_REFLEN];
extern char log_error_file[FN_REFLEN], *opt_tc_log_file;
extern ulonglong log_10_int[20];
extern ulonglong keybuff_size;
@@ -1964,10 +1907,13 @@ extern ulong MYSQL_PLUGIN_IMPORT specialflag;
#endif /* MYSQL_SERVER || INNODB_COMPATIBILITY_HOOKS */
#ifdef MYSQL_SERVER
extern ulong current_pid;
-extern ulong expire_logs_days, sync_binlog_period, sync_binlog_counter;
+extern ulong expire_logs_days;
+extern uint sync_binlog_period, sync_relaylog_period,
+ sync_relayloginfo_period, sync_masterinfo_period;
extern ulong opt_tc_log_size, tc_log_max_pages_used, tc_log_page_size;
extern ulong tc_log_page_waits;
extern my_bool relay_log_purge, opt_innodb_safe_binlog, opt_innodb;
+extern my_bool relay_log_recovery;
extern uint test_flags,select_errors,ha_open_options;
extern uint protocol_version, mysqld_port, dropping_tables;
extern uint delay_key_write_options;
@@ -2027,14 +1973,13 @@ extern FILE *bootstrap_file;
extern int bootstrap_error;
extern FILE *stderror_file;
extern pthread_key(MEM_ROOT**,THR_MALLOC);
-extern pthread_mutex_t LOCK_mysql_create_db,LOCK_Acl,LOCK_open, LOCK_lock_db,
+extern pthread_mutex_t LOCK_mysql_create_db, LOCK_open, LOCK_lock_db,
LOCK_mapped_file,LOCK_user_locks, LOCK_status,
LOCK_error_log, LOCK_delayed_insert, LOCK_uuid_generator,
LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone,
LOCK_slave_list, LOCK_active_mi, LOCK_manager, LOCK_global_read_lock,
LOCK_global_system_variables, LOCK_user_conn,
- LOCK_prepared_stmt_count,
- LOCK_bytes_sent, LOCK_bytes_received, LOCK_connection_count;
+ LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_connection_count;
extern MYSQL_PLUGIN_IMPORT pthread_mutex_t LOCK_thread_count;
#ifdef HAVE_OPENSSL
extern pthread_mutex_t LOCK_des_key_file;
@@ -2078,7 +2023,7 @@ extern uint sql_command_flags[];
extern TYPELIB log_output_typelib;
/* optional things, have_* variables */
-extern SHOW_COMP_OPTION have_community_features;
+extern SHOW_COMP_OPTION have_profiling;
extern handlerton *partition_hton;
extern handlerton *myisam_hton;
@@ -2229,12 +2174,6 @@ longlong get_datetime_value(THD *thd, Item ***item_arg, Item **cache_arg,
int test_if_number(char *str,int *res,bool allow_wildcards);
void change_byte(uchar *,uint,char,char);
-void init_read_record(READ_RECORD *info, THD *thd, TABLE *reg_form,
- SQL_SELECT *select, int use_record_cache,
- bool print_errors, bool disable_rr_cache);
-void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
- bool print_error, uint idx);
-void end_read_record(READ_RECORD *info);
ha_rows filesort(THD *thd, TABLE *form,struct st_sort_field *sortorder,
uint s_length, SQL_SELECT *select,
ha_rows max_rows, bool sort_positions,
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 1926d447ef7..5916cace54e 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -33,6 +33,8 @@
#include "rpl_injector.h"
+#include "rpl_handler.h"
+
#ifdef HAVE_SYS_PRCTL_H
#include <sys/prctl.h>
#endif
@@ -400,9 +402,10 @@ static const char *optimizer_switch_str="index_merge=on,index_merge_union=on,"
"index_merge_sort_union=on,"
"index_merge_intersection=on";
static char *mysqld_user, *mysqld_chroot, *log_error_file_ptr;
-static char *opt_init_slave, *language_ptr, *opt_init_connect;
+static char *opt_init_slave, *lc_messages_dir_ptr, *opt_init_connect;
static char *default_character_set_name;
static char *character_set_filesystem_name;
+static char *lc_messages;
static char *lc_time_names_name;
static char *my_bind_addr_str;
static char *default_collation_name;
@@ -489,6 +492,7 @@ extern const char *opt_ndb_distribution;
extern enum ndb_distribution opt_ndb_distribution_id;
#endif
my_bool opt_readonly, use_temp_pool, relay_log_purge;
+my_bool relay_log_recovery;
my_bool opt_sync_frm, opt_allow_suspicious_udfs;
my_bool opt_secure_auth= 0;
char* opt_secure_file_priv= 0;
@@ -568,7 +572,9 @@ ulong max_prepared_stmt_count;
*/
ulong prepared_stmt_count=0;
ulong thread_id=1L,current_pid;
-ulong slow_launch_threads = 0, sync_binlog_period;
+ulong slow_launch_threads = 0;
+uint sync_binlog_period= 0, sync_relaylog_period= 0,
+ sync_relayloginfo_period= 0, sync_masterinfo_period= 0;
ulong expire_logs_days = 0;
ulong rpl_recovery_rank=0;
const char *log_output_str= "FILE";
@@ -576,12 +582,15 @@ const char *log_output_str= "FILE";
time_t server_start_time, flush_status_time;
char mysql_home[FN_REFLEN], pidfile_name[FN_REFLEN], system_time_zone[30];
+char default_logfile_name[FN_REFLEN];
char *default_tz_name;
char log_error_file[FN_REFLEN], glob_hostname[FN_REFLEN];
char mysql_real_data_home[FN_REFLEN],
- language[FN_REFLEN], reg_ext[FN_EXTLEN], mysql_charsets_dir[FN_REFLEN],
+ lc_messages_dir[FN_REFLEN], reg_ext[FN_EXTLEN],
+ mysql_charsets_dir[FN_REFLEN],
*opt_init_file, *opt_tc_log_file,
def_ft_boolean_syntax[sizeof(ft_boolean_syntax)];
+char err_shared_dir[FN_REFLEN];
char mysql_unpacked_real_data_home[FN_REFLEN];
int mysql_unpacked_real_data_home_len;
uint reg_ext_length;
@@ -594,7 +603,6 @@ uint mysql_data_home_len;
char mysql_data_home_buff[2], *mysql_data_home=mysql_real_data_home;
char server_version[SERVER_VERSION_LENGTH];
char *mysqld_unix_port, *opt_mysql_tmpdir;
-const char **errmesg; /**< Error messages */
const char *myisam_recover_options_str="OFF";
const char *myisam_stats_method_str="nulls_unequal";
@@ -632,26 +640,28 @@ MY_BITMAP temp_pool;
CHARSET_INFO *system_charset_info, *files_charset_info ;
CHARSET_INFO *national_charset_info, *table_alias_charset;
CHARSET_INFO *character_set_filesystem;
+CHARSET_INFO *error_message_charset_info;
+MY_LOCALE *my_default_lc_messages;
MY_LOCALE *my_default_lc_time_names;
SHOW_COMP_OPTION have_ssl, have_symlink, have_dlopen, have_query_cache;
SHOW_COMP_OPTION have_geometry, have_rtree_keys;
SHOW_COMP_OPTION have_crypt, have_compress;
-SHOW_COMP_OPTION have_community_features;
+SHOW_COMP_OPTION have_profiling;
/* Thread specific variables */
pthread_key(MEM_ROOT**,THR_MALLOC);
pthread_key(THD*, THR_THD);
-pthread_mutex_t LOCK_mysql_create_db, LOCK_Acl, LOCK_open, LOCK_thread_count,
+pthread_mutex_t LOCK_mysql_create_db, LOCK_open, LOCK_thread_count,
LOCK_mapped_file, LOCK_status, LOCK_global_read_lock,
LOCK_error_log, LOCK_uuid_generator,
LOCK_delayed_insert, LOCK_delayed_status, LOCK_delayed_create,
- LOCK_crypt, LOCK_bytes_sent, LOCK_bytes_received,
+ LOCK_crypt,
LOCK_global_system_variables,
LOCK_user_conn, LOCK_slave_list, LOCK_active_mi,
- LOCK_connection_count;
+ LOCK_connection_count, LOCK_error_messages;
/**
The below lock protects access to two global server variables:
max_prepared_stmt_count and prepared_stmt_count. These variables
@@ -735,7 +745,7 @@ static NTService Service; ///< Service object for WinNT
#endif /* EMBEDDED_LIBRARY */
#endif /* __WIN__ */
-#ifdef __NT__
+#ifdef _WIN32
static char pipe_name[512];
static SECURITY_ATTRIBUTES saPipeSecurity;
static SECURITY_DESCRIPTOR sdPipeDescriptor;
@@ -808,11 +818,14 @@ static void set_server_version(void);
static int init_thread_environment();
static char *get_relative_path(const char *path);
static int fix_paths(void);
-pthread_handler_t handle_connections_sockets(void *arg);
+void handle_connections_sockets();
+#ifdef _WIN32
+pthread_handler_t handle_connections_sockets_thread(void *arg);
+#endif
pthread_handler_t kill_server_thread(void *arg);
static void bootstrap(FILE *file);
static bool read_init_file(char *file_name);
-#ifdef __NT__
+#ifdef _WIN32
pthread_handler_t handle_connections_namedpipes(void *arg);
#endif
#ifdef HAVE_SMEM
@@ -898,7 +911,7 @@ static void close_connections(void)
ip_sock= INVALID_SOCKET;
}
}
-#ifdef __NT__
+#ifdef _WIN32
if (hPipe != INVALID_HANDLE_VALUE && opt_enable_named_pipe)
{
HANDLE temp;
@@ -994,7 +1007,7 @@ static void close_connections(void)
if (tmp->vio_ok())
{
if (global_system_variables.log_warnings)
- sql_print_warning(ER(ER_FORCING_CLOSE),my_progname,
+ sql_print_warning(ER_DEFAULT(ER_FORCING_CLOSE),my_progname,
tmp->thread_id,
(tmp->main_security_ctx.user ?
tmp->main_security_ctx.user : ""));
@@ -1146,9 +1159,9 @@ static void __cdecl kill_server(int sig_ptr)
if (sig != 0) // 0 is not a valid signal number
my_sigset(sig, SIG_IGN); /* purify inspected */
if (sig == MYSQL_KILL_SIGNAL || sig == 0)
- sql_print_information(ER(ER_NORMAL_SHUTDOWN),my_progname);
+ sql_print_information(ER_DEFAULT(ER_NORMAL_SHUTDOWN),my_progname);
else
- sql_print_error(ER(ER_GOT_SIGNAL),my_progname,sig); /* purecov: inspected */
+ sql_print_error(ER_DEFAULT(ER_GOT_SIGNAL),my_progname,sig); /* purecov: inspected */
#if defined(HAVE_SMEM) && defined(__WIN__)
/*
@@ -1310,6 +1323,7 @@ void clean_up(bool print_message)
ha_end();
if (tc_log)
tc_log->close();
+ delegates_destroy();
xid_cache_free();
delete_elements(&key_caches, (void (*)(const char*, uchar*)) free_key_cache);
multi_keycache_free();
@@ -1358,12 +1372,11 @@ void clean_up(bool print_message)
if (!opt_bootstrap)
(void) my_delete(pidfile_name,MYF(0)); // This may not always exist
#endif
- if (print_message && errmesg && server_start_time)
- sql_print_information(ER(ER_SHUTDOWN_COMPLETE),my_progname);
+ if (print_message && /*errmesg &&*/ server_start_time)
+ sql_print_information(ER_DEFAULT(ER_SHUTDOWN_COMPLETE),my_progname);
+ cleanup_errmsgs();
thread_scheduler.end();
finish_client_errs();
- my_free((uchar*) my_error_unregister(ER_ERROR_FIRST, ER_ERROR_LAST),
- MYF(MY_WME | MY_FAE | MY_ALLOW_ZERO_PTR));
DBUG_PRINT("quit", ("Error messages freed"));
/* Tell main we are ready */
logger.cleanup_end();
@@ -1410,7 +1423,6 @@ static void clean_up_mutexes()
{
(void) pthread_mutex_destroy(&LOCK_mysql_create_db);
(void) pthread_mutex_destroy(&LOCK_lock_db);
- (void) pthread_mutex_destroy(&LOCK_Acl);
(void) rwlock_destroy(&LOCK_grant);
(void) pthread_mutex_destroy(&LOCK_open);
(void) pthread_mutex_destroy(&LOCK_thread_count);
@@ -1422,8 +1434,6 @@ static void clean_up_mutexes()
(void) pthread_mutex_destroy(&LOCK_delayed_create);
(void) pthread_mutex_destroy(&LOCK_manager);
(void) pthread_mutex_destroy(&LOCK_crypt);
- (void) pthread_mutex_destroy(&LOCK_bytes_sent);
- (void) pthread_mutex_destroy(&LOCK_bytes_received);
(void) pthread_mutex_destroy(&LOCK_user_conn);
(void) pthread_mutex_destroy(&LOCK_connection_count);
Events::destroy_mutexes();
@@ -1447,6 +1457,7 @@ static void clean_up_mutexes()
(void) pthread_mutex_destroy(&LOCK_global_read_lock);
(void) pthread_mutex_destroy(&LOCK_uuid_generator);
(void) pthread_mutex_destroy(&LOCK_prepared_stmt_count);
+ (void) pthread_mutex_destroy(&LOCK_error_messages);
(void) pthread_cond_destroy(&COND_thread_count);
(void) pthread_cond_destroy(&COND_refresh);
(void) pthread_cond_destroy(&COND_global_read_lock);
@@ -1704,7 +1715,7 @@ static void network_init(void)
}
}
-#ifdef __NT__
+#ifdef _WIN32
/* create named pipe */
if (Service.IsNT() && mysqld_unix_port[0] && !opt_bootstrap &&
opt_enable_named_pipe)
@@ -1819,14 +1830,15 @@ void close_connection(THD *thd, uint errcode, bool lock)
DBUG_PRINT("enter",("fd: %s error: '%s'",
thd->net.vio ? vio_description(thd->net.vio) :
"(not connected)",
- errcode ? ER(errcode) : ""));
+ errcode ? ER_DEFAULT(errcode) : ""));
if (lock)
(void) pthread_mutex_lock(&LOCK_thread_count);
thd->killed= THD::KILL_CONNECTION;
if ((vio= thd->net.vio) != 0)
{
if (errcode)
- net_send_error(thd, errcode, ER(errcode)); /* purecov: inspected */
+ net_send_error(thd, errcode,
+ ER_DEFAULT(errcode), NULL); /* purecov: inspected */
vio_close(vio); /* vio is freed in delete thd */
}
if (lock)
@@ -2051,29 +2063,7 @@ static BOOL WINAPI console_event_handler( DWORD type )
}
-/*
- In Visual Studio 2005 and later, default SIGABRT handler will overwrite
- any unhandled exception filter set by the application and will try to
- call JIT debugger. This is not what we want, this we calling __debugbreak
- to stop in debugger, if process is being debugged or to generate
- EXCEPTION_BREAKPOINT and then handle_segfault will do its magic.
-*/
-
-#if (_MSC_VER >= 1400)
-static void my_sigabrt_handler(int sig)
-{
- __debugbreak();
-}
-#endif /*_MSC_VER >=1400 */
-void win_install_sigabrt_handler(void)
-{
-#if (_MSC_VER >=1400)
- /*abort() should not override our exception filter*/
- _set_abort_behavior(0,_CALL_REPORTFAULT);
- signal(SIGABRT,my_sigabrt_handler);
-#endif /* _MSC_VER >=1400 */
-}
#ifdef DEBUG_UNHANDLED_EXCEPTION_FILTER
#define DEBUGGER_ATTACH_TIMEOUT 120
@@ -2152,7 +2142,6 @@ LONG WINAPI my_unhandler_exception_filter(EXCEPTION_POINTERS *ex_pointers)
static void init_signals(void)
{
- win_install_sigabrt_handler();
if(opt_console)
SetConsoleCtrlHandler(console_event_handler,TRUE);
@@ -2874,11 +2863,11 @@ static void check_data_home(const char *path)
for the client.
*/
/* ARGSUSED */
-extern "C" int my_message_sql(uint error, const char *str, myf MyFlags);
+extern "C" void my_message_sql(uint error, const char *str, myf MyFlags);
-int my_message_sql(uint error, const char *str, myf MyFlags)
+void my_message_sql(uint error, const char *str, myf MyFlags)
{
- THD *thd;
+ THD *thd= current_thd;
DBUG_ENTER("my_message_sql");
DBUG_PRINT("error", ("error: %u message: '%s'", error, str));
@@ -2900,70 +2889,18 @@ int my_message_sql(uint error, const char *str, myf MyFlags)
error= ER_UNKNOWN_ERROR;
}
- if ((thd= current_thd))
+ if (thd)
{
- /*
- TODO: There are two exceptions mechanism (THD and sp_rcontext),
- this could be improved by having a common stack of handlers.
- */
- if (thd->handle_error(error, str,
- MYSQL_ERROR::WARN_LEVEL_ERROR))
- DBUG_RETURN(0);
-
- thd->is_slave_error= 1; // needed to catch query errors during replication
-
- /*
- thd->lex->current_select == 0 if lex structure is not inited
- (not query command (COM_QUERY))
- */
- if (thd->lex->current_select &&
- thd->lex->current_select->no_error && !thd->is_fatal_error)
- {
- DBUG_PRINT("error",
- ("Error converted to warning: current_select: no_error %d "
- "fatal_error: %d",
- (thd->lex->current_select ?
- thd->lex->current_select->no_error : 0),
- (int) thd->is_fatal_error));
- }
- else
- {
- if (! thd->main_da.is_error()) // Return only first message
- {
- thd->main_da.set_error_status(thd, error, str);
- }
- query_cache_abort(&thd->net);
- }
- /*
- If a continue handler is found, the error message will be cleared
- by the stored procedures code.
- */
- if (thd->spcont &&
- ! (MyFlags & ME_NO_SP_HANDLER) &&
- thd->spcont->handle_error(error, MYSQL_ERROR::WARN_LEVEL_ERROR, thd))
- {
- /*
- Do not push any warnings, a handled error must be completely
- silenced.
- */
- DBUG_RETURN(0);
- }
-
- if (!thd->no_warnings_for_error &&
- !(MyFlags & ME_NO_WARNING_FOR_ERROR))
- {
- /*
- Suppress infinite recursion if there a memory allocation error
- inside push_warning.
- */
- thd->no_warnings_for_error= TRUE;
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, error, str);
- thd->no_warnings_for_error= FALSE;
- }
+ if (MyFlags & ME_FATALERROR)
+ thd->is_fatal_error= 1;
+ (void) thd->raise_condition(error,
+ NULL,
+ MYSQL_ERROR::WARN_LEVEL_ERROR,
+ str);
}
if (!thd || MyFlags & ME_NOREFRESH)
sql_print_error("%s: %s",my_progname,str); /* purecov: inspected */
- DBUG_RETURN(0);
+ DBUG_VOID_RETURN;
}
@@ -3127,6 +3064,7 @@ SHOW_VAR com_status_vars[]= {
{"replace", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPLACE]), SHOW_LONG_STATUS},
{"replace_select", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPLACE_SELECT]), SHOW_LONG_STATUS},
{"reset", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RESET]), SHOW_LONG_STATUS},
+ {"resignal", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RESIGNAL]), SHOW_LONG_STATUS},
{"restore_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RESTORE_TABLE]), SHOW_LONG_STATUS},
{"revoke", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REVOKE]), SHOW_LONG_STATUS},
{"revoke_all", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REVOKE_ALL]), SHOW_LONG_STATUS},
@@ -3135,12 +3073,12 @@ SHOW_VAR com_status_vars[]= {
{"savepoint", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SAVEPOINT]), SHOW_LONG_STATUS},
{"select", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SELECT]), SHOW_LONG_STATUS},
{"set_option", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SET_OPTION]), SHOW_LONG_STATUS},
+ {"signal", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SIGNAL]), SHOW_LONG_STATUS},
{"show_authors", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_AUTHORS]), SHOW_LONG_STATUS},
{"show_binlog_events", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_BINLOG_EVENTS]), SHOW_LONG_STATUS},
{"show_binlogs", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_BINLOGS]), SHOW_LONG_STATUS},
{"show_charsets", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CHARSETS]), SHOW_LONG_STATUS},
{"show_collations", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_COLLATIONS]), SHOW_LONG_STATUS},
- {"show_column_types", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_COLUMN_TYPES]), SHOW_LONG_STATUS},
{"show_contributors", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CONTRIBUTORS]), SHOW_LONG_STATUS},
{"show_create_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_DB]), SHOW_LONG_STATUS},
{"show_create_event", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_EVENT]), SHOW_LONG_STATUS},
@@ -3173,6 +3111,7 @@ SHOW_VAR com_status_vars[]= {
{"show_processlist", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PROCESSLIST]), SHOW_LONG_STATUS},
{"show_profile", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PROFILE]), SHOW_LONG_STATUS},
{"show_profiles", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PROFILES]), SHOW_LONG_STATUS},
+ {"show_relaylog_events", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_RELAYLOG_EVENTS]), SHOW_LONG_STATUS},
{"show_slave_hosts", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_SLAVE_HOSTS]), SHOW_LONG_STATUS},
{"show_slave_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_SLAVE_STAT]), SHOW_LONG_STATUS},
{"show_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STATUS]), SHOW_LONG_STATUS},
@@ -3257,10 +3196,13 @@ static int init_common_variables(const char *conf_file_name, int argc,
strmake(glob_hostname, STRING_WITH_LEN("localhost"));
sql_print_warning("gethostname failed, using '%s' as hostname",
glob_hostname);
- strmake(pidfile_name, STRING_WITH_LEN("mysql"));
+ strmake(default_logfile_name, STRING_WITH_LEN("mysql"));
}
else
- strmake(pidfile_name, glob_hostname, sizeof(pidfile_name)-5);
+ strmake(default_logfile_name, glob_hostname,
+ sizeof(default_logfile_name)-5);
+
+ strmake(pidfile_name, default_logfile_name, sizeof(pidfile_name)-5);
strmov(fn_ext(pidfile_name),".pid"); // Add proper extension
/*
@@ -3432,6 +3374,13 @@ static int init_common_variables(const char *conf_file_name, int argc,
open_files_limit= files;
}
unireg_init(opt_specialflag); /* Set up extern variabels */
+ if (!(my_default_lc_messages=
+ my_locale_by_name(lc_messages)))
+ {
+ sql_print_error("Unknown locale: '%s'", lc_messages);
+ return 1;
+ }
+ global_system_variables.lc_messages= my_default_lc_messages;
if (init_errmessage()) /* Read error messages from file */
return 1;
init_client_errs();
@@ -3482,12 +3431,12 @@ static int init_common_variables(const char *conf_file_name, int argc,
default_collation= get_charset_by_name(default_collation_name, MYF(0));
if (!default_collation)
{
- sql_print_error(ER(ER_UNKNOWN_COLLATION), default_collation_name);
+ sql_print_error(ER_DEFAULT(ER_UNKNOWN_COLLATION), default_collation_name);
return 1;
}
if (!my_charset_same(default_charset_info, default_collation))
{
- sql_print_error(ER(ER_COLLATION_CHARSET_MISMATCH),
+ sql_print_error(ER_DEFAULT(ER_COLLATION_CHARSET_MISMATCH),
default_collation_name,
default_charset_info->csname);
return 1;
@@ -3539,7 +3488,7 @@ static int init_common_variables(const char *conf_file_name, int argc,
if (opt_slow_log && opt_slow_logname && !(log_output_options & LOG_FILE)
&& !(log_output_options & LOG_NONE))
sql_print_warning("Although a path was specified for the "
- "--log_slow_queries option, log tables are used. "
+ "--log-slow-queries option, log tables are used. "
"To enable logging to files use the --log-output=file option.");
s= opt_logname ? opt_logname : make_default_log_name(buff, ".log");
@@ -3622,7 +3571,6 @@ static int init_thread_environment()
{
(void) pthread_mutex_init(&LOCK_mysql_create_db,MY_MUTEX_INIT_SLOW);
(void) pthread_mutex_init(&LOCK_lock_db,MY_MUTEX_INIT_SLOW);
- (void) pthread_mutex_init(&LOCK_Acl,MY_MUTEX_INIT_SLOW);
(void) pthread_mutex_init(&LOCK_open, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_thread_count,MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_mapped_file,MY_MUTEX_INIT_SLOW);
@@ -3633,14 +3581,13 @@ static int init_thread_environment()
(void) pthread_mutex_init(&LOCK_delayed_create,MY_MUTEX_INIT_SLOW);
(void) pthread_mutex_init(&LOCK_manager,MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_crypt,MY_MUTEX_INIT_FAST);
- (void) pthread_mutex_init(&LOCK_bytes_sent,MY_MUTEX_INIT_FAST);
- (void) pthread_mutex_init(&LOCK_bytes_received,MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_user_conn, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_active_mi, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_global_system_variables, MY_MUTEX_INIT_FAST);
(void) my_rwlock_init(&LOCK_system_variables_hash, NULL);
(void) pthread_mutex_init(&LOCK_global_read_lock, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_prepared_stmt_count, MY_MUTEX_INIT_FAST);
+ (void) pthread_mutex_init(&LOCK_error_messages, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_uuid_generator, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_connection_count, MY_MUTEX_INIT_FAST);
#ifdef HAVE_OPENSSL
@@ -3866,6 +3813,13 @@ static int init_server_components()
unireg_abort(1);
}
+ /* initialize delegates for extension observers */
+ if (delegates_init())
+ {
+ sql_print_error("Initialize extension delegates failed");
+ unireg_abort(1);
+ }
+
/* need to configure logging before initializing storage engines */
if (opt_update_log)
{
@@ -3924,17 +3878,17 @@ with --log-bin instead.");
}
if (opt_log_slave_updates && !opt_bin_log)
{
- sql_print_error("You need to use --log-bin to make "
+ sql_print_warning("You need to use --log-bin to make "
"--log-slave-updates work.");
- unireg_abort(1);
}
if (!opt_bin_log)
{
if (opt_binlog_format_id != BINLOG_FORMAT_UNSPEC)
{
- sql_print_error("You need to use --log-bin to make "
- "--binlog-format work.");
- unireg_abort(1);
+ sql_print_warning("You need to use --log-bin to make "
+ "--binlog-format work.");
+
+ global_system_variables.binlog_format= opt_binlog_format_id;
}
else
{
@@ -3956,11 +3910,17 @@ with --log-bin instead.");
#ifdef HAVE_REPLICATION
if (opt_log_slave_updates && replicate_same_server_id)
{
- sql_print_error("\
-using --replicate-same-server-id in conjunction with \
+ if (opt_bin_log)
+ {
+ sql_print_error("using --replicate-same-server-id in conjunction with \
--log-slave-updates is impossible, it would lead to infinite loops in this \
server.");
- unireg_abort(1);
+ unireg_abort(1);
+ }
+ else
+ sql_print_warning("using --replicate-same-server-id in conjunction with \
+--log-slave-updates would lead to infinite loops in this server. However this \
+will be ignored as the --log-bin option is not defined.");
}
#endif
@@ -4069,8 +4029,8 @@ a file name for --log-bin-index option", opt_binlog_index_name);
}
/* if the errmsg.sys is not loaded, terminate to maintain behaviour */
- if (!errmesg[0][0])
- unireg_abort(1);
+ if (!DEFAULT_ERRMSGS[0][0])
+ unireg_abort(1);
/* We have to initialize the storage engines before CSV logging */
if (ha_init())
@@ -4240,12 +4200,11 @@ static void create_shutdown_thread()
#endif /* EMBEDDED_LIBRARY */
-#if (defined(__NT__) || defined(HAVE_SMEM)) && !defined(EMBEDDED_LIBRARY)
+#if (defined(_WIN32) || defined(HAVE_SMEM)) && !defined(EMBEDDED_LIBRARY)
static void handle_connections_methods()
{
pthread_t hThread;
DBUG_ENTER("handle_connections_methods");
-#ifdef __NT__
if (hPipe == INVALID_HANDLE_VALUE &&
(!have_tcpip || opt_disable_networking) &&
!opt_enable_shared_memory)
@@ -4253,12 +4212,10 @@ static void handle_connections_methods()
sql_print_error("TCP/IP, --shared-memory, or --named-pipe should be configured on NT OS");
unireg_abort(1); // Will not return
}
-#endif
pthread_mutex_lock(&LOCK_thread_count);
(void) pthread_cond_init(&COND_handler_count,NULL);
handler_count=0;
-#ifdef __NT__
if (hPipe != INVALID_HANDLE_VALUE)
{
handler_count++;
@@ -4269,12 +4226,11 @@ static void handle_connections_methods()
handler_count--;
}
}
-#endif /* __NT__ */
if (have_tcpip && !opt_disable_networking)
{
handler_count++;
if (pthread_create(&hThread,&connection_attrib,
- handle_connections_sockets, 0))
+ handle_connections_sockets_thread, 0))
{
sql_print_warning("Can't create thread to handle TCP/IP");
handler_count--;
@@ -4309,7 +4265,7 @@ void decrement_handler_count()
}
#else
#define decrement_handler_count()
-#endif /* defined(__NT__) || defined(HAVE_SMEM) */
+#endif /* defined(_WIN32) || defined(HAVE_SMEM) */
#ifndef EMBEDDED_LIBRARY
@@ -4587,7 +4543,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
create_shutdown_thread();
start_handle_manager();
- sql_print_information(ER(ER_STARTUP),my_progname,server_version,
+ sql_print_information(ER_DEFAULT(ER_STARTUP),my_progname,server_version,
((unix_sock == INVALID_SOCKET) ? (char*) ""
: mysqld_unix_port),
mysqld_port,
@@ -4603,18 +4559,11 @@ we force server id to 2, but this MySQL server will not act as a slave.");
pthread_cond_signal(&COND_server_started);
pthread_mutex_unlock(&LOCK_server_started);
-#if defined(__NT__) || defined(HAVE_SMEM)
+#if defined(_WIN32) || defined(HAVE_SMEM)
handle_connections_methods();
#else
-#ifdef __WIN__
- if (!have_tcpip || opt_disable_networking)
- {
- sql_print_error("TCP/IP unavailable or disabled with --skip-networking; no available interfaces");
- unireg_abort(1);
- }
-#endif
- handle_connections_sockets(0);
-#endif /* __NT__ */
+ handle_connections_sockets();
+#endif /* _WIN32 || HAVE_SMEM */
/* (void) pthread_attr_destroy(&connection_attrib); */
@@ -4987,7 +4936,7 @@ void create_thread_to_handle_connection(THD *thd)
/* Can't use my_error() since store_globals has not been called. */
my_snprintf(error_message_buff, sizeof(error_message_buff),
ER(ER_CANT_CREATE_THREAD), error);
- net_send_error(thd, ER_CANT_CREATE_THREAD, error_message_buff);
+ net_send_error(thd, ER_CANT_CREATE_THREAD, error_message_buff, NULL);
(void) pthread_mutex_lock(&LOCK_thread_count);
close_connection(thd,0,0);
delete thd;
@@ -5019,9 +4968,6 @@ static void create_new_thread(THD *thd)
NET *net=&thd->net;
DBUG_ENTER("create_new_thread");
- if (protocol_version > 9)
- net->return_errno=1;
-
/*
Don't allow too many connections. We roughly check here that we allow
only (max_connections + 1) connections.
@@ -5089,7 +5035,7 @@ inline void kill_broken_server()
/* Handle new connections and spawn new process to handle them */
#ifndef EMBEDDED_LIBRARY
-pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused)))
+void handle_connections_sockets()
{
my_socket sock,new_sock;
uint error_count=0;
@@ -5292,13 +5238,19 @@ pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused)))
create_new_thread(thd);
}
+ DBUG_VOID_RETURN;
+}
+
+#ifdef _WIN32
+pthread_handler_t handle_connections_sockets_thread(void *arg)
+{
+ my_thread_init();
+ handle_connections_sockets();
decrement_handler_count();
- DBUG_RETURN(0);
+ return 0;
}
-
-#ifdef __NT__
pthread_handler_t handle_connections_namedpipes(void *arg)
{
HANDLE hConnectedPipe;
@@ -5393,7 +5345,7 @@ pthread_handler_t handle_connections_namedpipes(void *arg)
decrement_handler_count();
DBUG_RETURN(0);
}
-#endif /* __NT__ */
+#endif /* _WIN32 */
#ifdef HAVE_SMEM
@@ -5739,6 +5691,7 @@ enum options_mysqld
OPT_QUERY_CACHE_TYPE, OPT_QUERY_CACHE_WLOCK_INVALIDATE, OPT_RECORD_BUFFER,
OPT_RECORD_RND_BUFFER, OPT_DIV_PRECINCREMENT, OPT_RELAY_LOG_SPACE_LIMIT,
OPT_RELAY_LOG_PURGE,
+ OPT_RELAY_LOG_RECOVERY,
OPT_SLAVE_NET_TIMEOUT, OPT_SLAVE_COMPRESSED_PROTOCOL, OPT_SLOW_LAUNCH_TIME,
OPT_SLAVE_TRANS_RETRIES, OPT_READONLY, OPT_DEBUGGING,
OPT_SORT_BUFFER, OPT_TABLE_OPEN_CACHE, OPT_TABLE_DEF_CACHE,
@@ -5763,6 +5716,7 @@ enum options_mysqld
OPT_DEFAULT_COLLATION,
OPT_CHARACTER_SET_CLIENT_HANDSHAKE,
OPT_CHARACTER_SET_FILESYSTEM,
+ OPT_LC_ERROR_MESSAGES,
OPT_LC_TIME_NAMES,
OPT_INIT_CONNECT,
OPT_INIT_SLAVE,
@@ -5806,7 +5760,10 @@ enum options_mysqld
OPT_SLAVE_EXEC_MODE,
OPT_GENERAL_LOG_FILE,
OPT_SLOW_QUERY_LOG_FILE,
- OPT_IGNORE_BUILTIN_INNODB
+ OPT_IGNORE_BUILTIN_INNODB,
+ OPT_SYNC_RELAY_LOG,
+ OPT_SYNC_RELAY_LOG_INFO,
+ OPT_SYNC_MASTER_INFO
};
@@ -5942,10 +5899,6 @@ struct my_option my_long_options[] =
"Set the default storage engine (table type) for tables.",
(uchar**)&default_storage_engine_str, (uchar**)&default_storage_engine_str,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"default-table-type", OPT_STORAGE_ENGINE,
- "(deprecated) Use --default-storage-engine.",
- (uchar**)&default_storage_engine_str, (uchar**)&default_storage_engine_str,
- 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"default-time-zone", OPT_DEFAULT_TIME_ZONE, "Set the default time zone.",
(uchar**) &default_tz_name, (uchar**) &default_tz_name,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
@@ -5971,7 +5924,7 @@ struct my_option my_long_options[] =
"Deprecated option, use --external-locking instead.",
(uchar**) &opt_external_locking, (uchar**) &opt_external_locking,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
-#ifdef __NT__
+#ifdef _WIN32
{"enable-named-pipe", OPT_HAVE_NAMED_PIPE, "Enable the named pipe (NT).",
(uchar**) &opt_enable_named_pipe, (uchar**) &opt_enable_named_pipe, 0, GET_BOOL,
NO_ARG, 0, 0, 0, 0, 0, 0},
@@ -6034,9 +5987,17 @@ Disable with --skip-super-large-pages.",
(uchar**) &opt_init_slave, (uchar**) &opt_init_slave, 0, GET_STR_ALLOC,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"language", 'L',
- "Client error messages in given language. May be given as a full path.",
- (uchar**) &language_ptr, (uchar**) &language_ptr, 0, GET_STR, REQUIRED_ARG,
- 0, 0, 0, 0, 0, 0},
+ "Client error messages in given language. May be given as a full path. "
+ "Deprecated. Use --lc-messages-dir instead.",
+ (uchar**) &lc_messages_dir_ptr, (uchar**) &lc_messages_dir_ptr, 0,
+ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"lc-messages-dir", 'L',
+ "Directory where error messages are.", (uchar**) &lc_messages_dir_ptr,
+ (uchar**) &lc_messages_dir_ptr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"lc-messages", OPT_LC_ERROR_MESSAGES,
+ "Set the language used for the error messages.",
+ (uchar**) &lc_messages, (uchar**) &lc_messages, 0, GET_STR, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0 },
{"lc-time-names", OPT_LC_TIME_NAMES,
"Set the language used for the month names and the days of the week.",
(uchar**) &lc_time_names_name,
@@ -6126,14 +6087,14 @@ Disable with --skip-super-large-pages.",
(uchar**) &opt_log_slow_slave_statements,
(uchar**) &opt_log_slow_slave_statements,
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
- {"log_slow_queries", OPT_SLOW_QUERY_LOG,
- "Log slow queries to a table or log file. Defaults logging to table "
- "mysql.slow_log or hostname-slow.log if --log-output=file is used. "
- "Must be enabled to activate other slow log options. "
- "(deprecated option, use --slow_query_log/--slow_query_log_file instead)",
+ {"log-slow-queries", OPT_SLOW_QUERY_LOG,
+ "Log slow queries to a table or log file. Defaults logging to table "
+ "mysql.slow_log or hostname-slow.log if --log-output=file is used. "
+ "Must be enabled to activate other slow log options. "
+ "Deprecated option, use --slow-query-log/--slow-query-log-file instead.",
(uchar**) &opt_slow_logname, (uchar**) &opt_slow_logname, 0, GET_STR, OPT_ARG,
0, 0, 0, 0, 0, 0},
- {"slow_query_log_file", OPT_SLOW_QUERY_LOG_FILE,
+ {"slow-query-log-file", OPT_SLOW_QUERY_LOG_FILE,
"Log slow queries to given log file. Defaults logging to hostname-slow.log. Must be enabled to activate other slow log options.",
(uchar**) &opt_slow_logname, (uchar**) &opt_slow_logname, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -6371,7 +6332,7 @@ master-ssl",
"Maximum time in seconds to wait for the port to become free. "
"(Default: no wait)", (uchar**) &mysqld_port_timeout,
(uchar**) &mysqld_port_timeout, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
{"profiling_history_size", OPT_PROFILING, "Limit of query profiling memory",
(uchar**) &global_system_variables.profiling_history_size,
(uchar**) &max_system_variables.profiling_history_size,
@@ -6854,7 +6815,7 @@ The minimum value for this variable is 4096.",
(uchar**) &max_write_lock_count, (uchar**) &max_write_lock_count, 0, GET_ULONG,
REQUIRED_ARG, ULONG_MAX, 1, ULONG_MAX, 0, 1, 0},
{"min_examined_row_limit", OPT_MIN_EXAMINED_ROW_LIMIT,
- "Don't log queries which examine less than min_examined_row_limit rows to file.",
+ "Don't write queries to slow log that examine fewer than min_examined_row_limit rows.",
(uchar**) &global_system_variables.min_examined_row_limit,
(uchar**) &max_system_variables.min_examined_row_limit, 0, GET_ULONG,
REQUIRED_ARG, 0, 0, ULONG_MAX, 0, 1L, 0},
@@ -7039,6 +7000,13 @@ The minimum value for this variable is 4096.",
(uchar**) &relay_log_purge,
(uchar**) &relay_log_purge, 0, GET_BOOL, NO_ARG,
1, 0, 1, 0, 1, 0},
+ {"relay_log_recovery", OPT_RELAY_LOG_RECOVERY,
+ "Enables automatic relay log recovery right after the database startup, "
+ "which means that the IO Thread starts re-fetching from the master "
+ "right after the last transaction processed.",
+ (uchar**) &relay_log_recovery,
+ (uchar**) &relay_log_recovery, 0, GET_BOOL, NO_ARG,
+ 0, 0, 1, 0, 1, 0},
{"relay_log_space_limit", OPT_RELAY_LOG_SPACE_LIMIT,
"Maximum space to use for all relay logs.",
(uchar**) &relay_log_space_limit,
@@ -7073,8 +7041,23 @@ The minimum value for this variable is 4096.",
{"sync-binlog", OPT_SYNC_BINLOG,
"Synchronously flush binary log to disk after every #th event. "
"Use 0 (default) to disable synchronous flushing.",
- (uchar**) &sync_binlog_period, (uchar**) &sync_binlog_period, 0, GET_ULONG,
- REQUIRED_ARG, 0, 0, ULONG_MAX, 0, 1, 0},
+ (uchar**) &sync_binlog_period, (uchar**) &sync_binlog_period, 0, GET_UINT,
+ REQUIRED_ARG, 0, 0, (longlong) UINT_MAX, 0, 1, 0},
+ {"sync-relay-log", OPT_SYNC_RELAY_LOG,
+ "Synchronously flush relay log to disk after every #th event. "
+ "Use 0 (default) to disable synchronous flushing.",
+ (uchar**) &sync_relaylog_period, (uchar**) &sync_relaylog_period, 0, GET_UINT,
+ REQUIRED_ARG, 0, 0, (longlong) UINT_MAX, 0, 1, 0},
+ {"sync-relay-log-info", OPT_SYNC_RELAY_LOG_INFO,
+ "Synchronously flush relay log info to disk after #th transaction. "
+ "Use 0 (default) to disable synchronous flushing.",
+ (uchar**) &sync_relayloginfo_period, (uchar**) &sync_relayloginfo_period, 0, GET_UINT,
+ REQUIRED_ARG, 0, 0, (longlong) UINT_MAX, 0, 1, 0},
+ {"sync-master-info", OPT_SYNC_MASTER_INFO,
+ "Synchronously flush master info to disk after every #th event. "
+ "Use 0 (default) to disable synchronous flushing.",
+ (uchar**) &sync_masterinfo_period, (uchar**) &sync_masterinfo_period, 0, GET_UINT,
+ REQUIRED_ARG, 0, 0, (longlong) UINT_MAX, 0, 1, 0},
{"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default.",
(uchar**) &opt_sync_frm, (uchar**) &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0,
0, 0, 0, 0},
@@ -7177,7 +7160,7 @@ static int show_starttime(THD *thd, SHOW_VAR *var, char *buff)
return 0;
}
-#ifdef COMMUNITY_SERVER
+#ifdef ENABLED_PROFILING
static int show_flushstatustime(THD *thd, SHOW_VAR *var, char *buff)
{
var->type= SHOW_LONG;
@@ -7200,7 +7183,8 @@ static int show_slave_running(THD *thd, SHOW_VAR *var, char *buff)
var->type= SHOW_MY_BOOL;
pthread_mutex_lock(&LOCK_active_mi);
var->value= buff;
- *((my_bool *)buff)= (my_bool) (active_mi && active_mi->slave_running &&
+ *((my_bool *)buff)= (my_bool) (active_mi &&
+ active_mi->slave_running == MYSQL_SLAVE_RUN_CONNECT &&
active_mi->rli.slave_running);
pthread_mutex_unlock(&LOCK_active_mi);
return 0;
@@ -7226,6 +7210,40 @@ static int show_slave_retried_trans(THD *thd, SHOW_VAR *var, char *buff)
pthread_mutex_unlock(&LOCK_active_mi);
return 0;
}
+
+static int show_slave_received_heartbeats(THD *thd, SHOW_VAR *var, char *buff)
+{
+ pthread_mutex_lock(&LOCK_active_mi);
+ if (active_mi)
+ {
+ var->type= SHOW_LONGLONG;
+ var->value= buff;
+ pthread_mutex_lock(&active_mi->rli.data_lock);
+ *((longlong *)buff)= active_mi->received_heartbeats;
+ pthread_mutex_unlock(&active_mi->rli.data_lock);
+ }
+ else
+ var->type= SHOW_UNDEF;
+ pthread_mutex_unlock(&LOCK_active_mi);
+ return 0;
+}
+
+static int show_heartbeat_period(THD *thd, SHOW_VAR *var, char *buff)
+{
+ pthread_mutex_lock(&LOCK_active_mi);
+ if (active_mi)
+ {
+ var->type= SHOW_CHAR;
+ var->value= buff;
+ my_sprintf(buff, (buff, "%.3f",active_mi->heartbeat_period));
+ }
+ else
+ var->type= SHOW_UNDEF;
+ pthread_mutex_unlock(&LOCK_active_mi);
+ return 0;
+}
+
+
#endif /* HAVE_REPLICATION */
static int show_open_tables(THD *thd, SHOW_VAR *var, char *buff)
@@ -7590,6 +7608,8 @@ SHOW_VAR status_vars[]= {
{"Slave_open_temp_tables", (char*) &slave_open_temp_tables, SHOW_LONG},
#ifdef HAVE_REPLICATION
{"Slave_retried_transactions",(char*) &show_slave_retried_trans, SHOW_FUNC},
+ {"Slave_heartbeat_period", (char*) &show_heartbeat_period, SHOW_FUNC},
+ {"Slave_received_heartbeats",(char*) &show_slave_received_heartbeats, SHOW_FUNC},
{"Slave_running", (char*) &show_slave_running, SHOW_FUNC},
#endif
{"Slow_launch_threads", (char*) &slow_launch_threads, SHOW_LONG},
@@ -7635,7 +7655,7 @@ SHOW_VAR status_vars[]= {
{"Threads_created", (char*) &thread_created, SHOW_LONG_NOFLUSH},
{"Threads_running", (char*) &thread_running, SHOW_INT},
{"Uptime", (char*) &show_starttime, SHOW_FUNC},
-#ifdef COMMUNITY_SERVER
+#ifdef ENABLED_PROFILING
{"Uptime_since_flush_status",(char*) &show_flushstatustime, SHOW_FUNC},
#endif
{NullS, NullS, SHOW_LONG}
@@ -7645,10 +7665,7 @@ SHOW_VAR status_vars[]= {
static void print_version(void)
{
set_server_version();
- /*
- Note: the instance manager keys off the string 'Ver' so it can find the
- version from the output of 'mysqld --version', so don't change it!
- */
+
printf("%s Ver %s for %s on %s (%s)\n",my_progname,
server_version,SYSTEM_TYPE,MACHINE_TYPE, MYSQL_COMPILATION_COMMENT);
}
@@ -7765,7 +7782,6 @@ static int mysql_init_variables(void)
max_used_connections= slow_launch_threads = 0;
mysqld_user= mysqld_chroot= opt_init_file= opt_bin_logname = 0;
prepared_stmt_count= 0;
- errmesg= 0;
mysqld_unix_port= opt_mysql_tmpdir= my_bind_addr_str= NullS;
bzero((uchar*) &mysql_tmpdir_list, sizeof(mysql_tmpdir_list));
bzero((char *) &global_status_var, sizeof(global_status_var));
@@ -7798,7 +7814,7 @@ static int mysql_init_variables(void)
mysql_home_ptr= mysql_home;
pidfile_name_ptr= pidfile_name;
log_error_file_ptr= log_error_file;
- language_ptr= language;
+ lc_messages_dir_ptr= lc_messages_dir;
mysql_data_home= mysql_real_data_home;
thd_startup_options= (OPTION_AUTO_IS_NULL | OPTION_BIN_LOG |
OPTION_QUOTE_SHOW_CREATE | OPTION_SQL_NOTES);
@@ -7823,7 +7839,6 @@ static int mysql_init_variables(void)
multi_keycache_init();
/* Set directory paths */
- strmake(language, LANGUAGE, sizeof(language)-1);
strmake(mysql_real_data_home, get_relative_path(MYSQL_DATADIR),
sizeof(mysql_real_data_home)-1);
mysql_data_home_buff[0]=FN_CURLIB; // all paths are relative from here
@@ -7846,6 +7861,7 @@ static int mysql_init_variables(void)
default_collation_name= compiled_default_collation_name;
sys_charset_system.value= (char*) system_charset_info->csname;
character_set_filesystem_name= (char*) "binary";
+ lc_messages= (char*) "en_US";
lc_time_names_name= (char*) "en_US";
/* Set default values for some option variables */
default_storage_engine_str= (char*) "MyISAM";
@@ -7871,10 +7887,10 @@ static int mysql_init_variables(void)
"d:t:i:o,/tmp/mysqld.trace");
#endif
opt_error_log= IF_WIN(1,0);
-#ifdef COMMUNITY_SERVER
- have_community_features = SHOW_OPTION_YES;
+#ifdef ENABLED_PROFILING
+ have_profiling = SHOW_OPTION_YES;
#else
- have_community_features = SHOW_OPTION_NO;
+ have_profiling = SHOW_OPTION_NO;
#endif
global_system_variables.ndb_index_stat_enable=FALSE;
max_system_variables.ndb_index_stat_enable=TRUE;
@@ -7992,7 +8008,7 @@ mysqld_get_one_option(int optid,
default_collation_name= 0;
break;
case 'l':
- WARN_DEPRECATED(NULL, "7.0", "--log", "'--general_log'/'--general_log_file'");
+ WARN_DEPRECATED(NULL, "7.0", "--log", "'--general-log'/'--general-log-file'");
opt_log=1;
break;
case 'h':
@@ -8008,7 +8024,7 @@ mysqld_get_one_option(int optid,
sql_print_warning("Ignoring user change to '%s' because the user was set to '%s' earlier on the command line\n", argument, mysqld_user);
break;
case 'L':
- strmake(language, argument, sizeof(language)-1);
+ strmake(lc_messages_dir, argument, sizeof(lc_messages_dir)-1);
break;
#ifdef HAVE_REPLICATION
case OPT_SLAVE_SKIP_ERRORS:
@@ -8166,7 +8182,8 @@ mysqld_get_one_option(int optid,
}
#endif /* HAVE_REPLICATION */
case (int) OPT_SLOW_QUERY_LOG:
- WARN_DEPRECATED(NULL, "7.0", "--log_slow_queries", "'--slow_query_log'/'--slow_query_log_file'");
+ WARN_DEPRECATED(NULL, "7.0", "--log-slow-queries",
+ "'--slow-query-log'/'--slow-query-log-file'");
opt_slow_log= 1;
break;
#ifdef WITH_CSV_STORAGE_ENGINE
@@ -8763,7 +8780,7 @@ static int fix_paths(void)
pos[0]= FN_LIBCHAR;
pos[1]= 0;
}
- convert_dirname(language,language,NullS);
+ convert_dirname(lc_messages_dir, lc_messages_dir, NullS);
convert_dirname(mysql_real_data_home,mysql_real_data_home,NullS);
(void) my_load_path(mysql_home,mysql_home,""); // Resolve current dir
(void) my_load_path(mysql_real_data_home,mysql_real_data_home,mysql_home);
@@ -8784,7 +8801,7 @@ static int fix_paths(void)
else
strxnmov(buff,sizeof(buff)-1,mysql_home,sharedir,NullS);
convert_dirname(buff,buff,NullS);
- (void) my_load_path(language,language,buff);
+ (void) my_load_path(lc_messages_dir, lc_messages_dir, buff);
/* If --character-sets-dir isn't given, use shared library dir */
if (charsets_dir != mysql_charsets_dir)
@@ -8994,9 +9011,7 @@ void refresh_status(THD *thd)
/* Reset the counters of all key caches (default and named). */
process_key_caches(reset_key_cache_counters);
-#ifdef COMMUNITY_SERVER
flush_status_time= time((time_t*) 0);
-#endif
pthread_mutex_unlock(&LOCK_status);
/*
diff --git a/sql/mysqld_suffix.h b/sql/mysqld_suffix.h
index 654d7cf88c1..c7ab212f2a2 100644
--- a/sql/mysqld_suffix.h
+++ b/sql/mysqld_suffix.h
@@ -1,3 +1,6 @@
+#ifndef MYSQLD_SUFFIX_INCLUDED
+#define MYSQLD_SUFFIX_INCLUDED
+
/* Copyright (C) 2000-2004 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -27,3 +30,4 @@
#else
#define MYSQL_SERVER_SUFFIX_STR MYSQL_SERVER_SUFFIX_DEF
#endif
+#endif /* MYSQLD_SUFFIX_INCLUDED */
diff --git a/sql/net_serv.cc b/sql/net_serv.cc
index 7ff26c50afc..5cf3597c638 100644
--- a/sql/net_serv.cc
+++ b/sql/net_serv.cc
@@ -91,12 +91,11 @@ void sql_print_error(const char *format,...);
*/
extern uint test_flags;
extern ulong bytes_sent, bytes_received, net_big_packet_count;
-extern pthread_mutex_t LOCK_bytes_sent , LOCK_bytes_received;
#ifndef MYSQL_INSTANCE_MANAGER
#ifdef HAVE_QUERY_CACHE
#define USE_QUERY_CACHE
-extern void query_cache_init_query(NET *net);
-extern void query_cache_insert(NET *net, const char *packet, ulong length);
+extern void query_cache_insert(const char *packet, ulong length,
+ unsigned pkt_nr);
#endif // HAVE_QUERY_CACHE
#define update_statistics(A) A
#endif /* MYSQL_INSTANCE_MANGER */
@@ -125,18 +124,14 @@ my_bool my_net_init(NET *net, Vio* vio)
MYF(MY_WME))))
DBUG_RETURN(1);
net->buff_end=net->buff+net->max_packet;
- net->error=0; net->return_errno=0; net->return_status=0;
+ net->error=0; net->return_status=0;
net->pkt_nr=net->compress_pkt_nr=0;
net->write_pos=net->read_pos = net->buff;
net->last_error[0]=0;
net->compress=0; net->reading_or_writing=0;
net->where_b = net->remain_in_buf=0;
net->last_errno=0;
-#ifdef USE_QUERY_CACHE
- query_cache_init_query(net);
-#else
- net->query_cache_query= 0;
-#endif
+ net->unused= 0;
if (vio != 0) /* If real connection */
{
@@ -586,7 +581,7 @@ net_real_write(NET *net,const uchar *packet, size_t len)
DBUG_ENTER("net_real_write");
#if defined(MYSQL_SERVER) && defined(USE_QUERY_CACHE)
- query_cache_insert(net, (char*) packet, len);
+ query_cache_insert((char*) packet, len, net->pkt_nr);
#endif
if (net->error == 2)
diff --git a/sql/nt_servc.h b/sql/nt_servc.h
index 2f0d07df543..5bee42dedf0 100644
--- a/sql/nt_servc.h
+++ b/sql/nt_servc.h
@@ -1,3 +1,6 @@
+#ifndef NT_SERVC_INCLUDED
+#define NT_SERVC_INCLUDED
+
/**
@file
@@ -98,3 +101,5 @@ class NTService
};
/* ------------------------- the end -------------------------------------- */
+
+#endif /* NT_SERVC_INCLUDED */
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 94204962345..ac5b1f575de 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -1,4 +1,4 @@
-/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc.
+/* Copyright 2000-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -438,8 +438,19 @@ public:
return 0;
}
- /* returns a number of keypart values appended to the key buffer */
- int store_min_key(KEY_PART *key, uchar **range_key, uint *range_key_flag)
+ /*
+ Returns a number of keypart values appended to the key buffer
+ for min key and max key. This function is used by both Range
+ Analysis and Partition pruning. For partition pruning we have
+ to ensure that we don't store also subpartition fields. Thus
+ we have to stop at the last partition part and not step into
+ the subpartition fields. For Range Analysis we set last_part
+ to MAX_KEY which we should never reach.
+ */
+ int store_min_key(KEY_PART *key,
+ uchar **range_key,
+ uint *range_key_flag,
+ uint last_part)
{
SEL_ARG *key_tree= first();
uint res= key_tree->store_min(key[key_tree->part].store_length,
@@ -447,15 +458,21 @@ public:
*range_key_flag|= key_tree->min_flag;
if (key_tree->next_key_part &&
key_tree->next_key_part->type == SEL_ARG::KEY_RANGE &&
+ key_tree->part != last_part &&
key_tree->next_key_part->part == key_tree->part+1 &&
!(*range_key_flag & (NO_MIN_RANGE | NEAR_MIN)))
- res+= key_tree->next_key_part->store_min_key(key, range_key,
- range_key_flag);
+ res+= key_tree->next_key_part->store_min_key(key,
+ range_key,
+ range_key_flag,
+ last_part);
return res;
}
/* returns a number of keypart values appended to the key buffer */
- int store_max_key(KEY_PART *key, uchar **range_key, uint *range_key_flag)
+ int store_max_key(KEY_PART *key,
+ uchar **range_key,
+ uint *range_key_flag,
+ uint last_part)
{
SEL_ARG *key_tree= last();
uint res=key_tree->store_max(key[key_tree->part].store_length,
@@ -463,10 +480,13 @@ public:
(*range_key_flag)|= key_tree->max_flag;
if (key_tree->next_key_part &&
key_tree->next_key_part->type == SEL_ARG::KEY_RANGE &&
+ key_tree->part != last_part &&
key_tree->next_key_part->part == key_tree->part+1 &&
!(*range_key_flag & (NO_MAX_RANGE | NEAR_MAX)))
- res+= key_tree->next_key_part->store_max_key(key, range_key,
- range_key_flag);
+ res+= key_tree->next_key_part->store_max_key(key,
+ range_key,
+ range_key_flag,
+ last_part);
return res;
}
@@ -634,6 +654,14 @@ public:
using_real_indexes==TRUE
*/
uint real_keynr[MAX_KEY];
+
+ /*
+ Used to store 'current key tuples', in both range analysis and
+ partitioning (list) analysis
+ */
+ uchar min_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH],
+ max_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH];
+
/* Number of SEL_ARG objects allocated by SEL_ARG::clone_tree operations */
uint alloced_sel_args;
};
@@ -645,8 +673,6 @@ public:
longlong baseflag;
uint max_key_part, range_count;
- uchar min_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH],
- max_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH];
bool quick; // Don't calulate possible keys
uint fields_bitmap_size;
@@ -708,7 +734,8 @@ static
TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
double read_time);
static
-TRP_GROUP_MIN_MAX *get_best_group_min_max(PARAM *param, SEL_TREE *tree);
+TRP_GROUP_MIN_MAX *get_best_group_min_max(PARAM *param, SEL_TREE *tree,
+ double read_time);
static double get_index_only_read_time(const PARAM* param, ha_rows records,
int keynr);
@@ -2050,7 +2077,7 @@ public:
class TRP_GROUP_MIN_MAX : public TABLE_READ_PLAN
{
private:
- bool have_min, have_max;
+ bool have_min, have_max, have_agg_distinct;
KEY_PART_INFO *min_max_arg_part;
uint group_prefix_len;
uint used_key_parts;
@@ -2062,11 +2089,13 @@ private:
SEL_TREE *range_tree; /* Represents all range predicates in the query. */
SEL_ARG *index_tree; /* The SEL_ARG sub-tree corresponding to index_info. */
uint param_idx; /* Index of used key in param->key. */
- /* Number of records selected by the ranges in index_tree. */
+ bool is_index_scan; /* Use index_next() instead of random read */
public:
+ /* Number of records selected by the ranges in index_tree. */
ha_rows quick_prefix_records;
public:
- TRP_GROUP_MIN_MAX(bool have_min_arg, bool have_max_arg,
+ TRP_GROUP_MIN_MAX(bool have_min_arg, bool have_max_arg,
+ bool have_agg_distinct_arg,
KEY_PART_INFO *min_max_arg_part_arg,
uint group_prefix_len_arg, uint used_key_parts_arg,
uint group_key_parts_arg, KEY *index_info_arg,
@@ -2075,11 +2104,12 @@ public:
SEL_TREE *tree_arg, SEL_ARG *index_tree_arg,
uint param_idx_arg, ha_rows quick_prefix_records_arg)
: have_min(have_min_arg), have_max(have_max_arg),
+ have_agg_distinct(have_agg_distinct_arg),
min_max_arg_part(min_max_arg_part_arg),
group_prefix_len(group_prefix_len_arg), used_key_parts(used_key_parts_arg),
group_key_parts(group_key_parts_arg), index_info(index_info_arg),
index(index_arg), key_infix_len(key_infix_len_arg), range_tree(tree_arg),
- index_tree(index_tree_arg), param_idx(param_idx_arg),
+ index_tree(index_tree_arg), param_idx(param_idx_arg), is_index_scan(FALSE),
quick_prefix_records(quick_prefix_records_arg)
{
if (key_infix_len)
@@ -2089,6 +2119,7 @@ public:
QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows,
MEM_ROOT *parent_alloc);
+ void use_index_scan() { is_index_scan= TRUE; }
};
@@ -2350,7 +2381,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
Try to construct a QUICK_GROUP_MIN_MAX_SELECT.
Notice that it can be constructed no matter if there is a range tree.
*/
- group_trp= get_best_group_min_max(&param, tree);
+ group_trp= get_best_group_min_max(&param, tree, best_read_time);
if (group_trp)
{
param.table->quick_condition_rows= min(group_trp->records,
@@ -2600,6 +2631,8 @@ typedef struct st_part_prune_param
/* Same as above for subpartitioning */
my_bool *is_subpart_keypart;
+ my_bool ignore_part_fields; /* Ignore rest of partioning fields */
+
/***************************************************************
Following fields form find_used_partitions() recursion context:
**************************************************************/
@@ -2613,8 +2646,13 @@ typedef struct st_part_prune_param
/* Iterator to be used to obtain the "current" set of used partitions */
PARTITION_ITERATOR part_iter;
- /* Initialized bitmap of no_subparts size */
+ /* Initialized bitmap of num_subparts size */
MY_BITMAP subparts_bitmap;
+
+ uchar *cur_min_key;
+ uchar *cur_max_key;
+
+ uint cur_min_flag, cur_max_flag;
} PART_PRUNE_PARAM;
static bool create_partition_index_description(PART_PRUNE_PARAM *prune_par);
@@ -2732,6 +2770,11 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
prune_param.arg_stack_end= prune_param.arg_stack;
prune_param.cur_part_fields= 0;
prune_param.cur_subpart_fields= 0;
+
+ prune_param.cur_min_key= prune_param.range_param.min_key;
+ prune_param.cur_max_key= prune_param.range_param.max_key;
+ prune_param.cur_min_flag= prune_param.cur_max_flag= 0;
+
init_all_partitions_iterator(part_info, &prune_param.part_iter);
if (!tree->keys[0] || (-1 == (res= find_used_partitions(&prune_param,
tree->keys[0]))))
@@ -2869,8 +2912,8 @@ static void mark_full_partition_used_no_parts(partition_info* part_info,
static void mark_full_partition_used_with_parts(partition_info *part_info,
uint32 part_id)
{
- uint32 start= part_id * part_info->no_subparts;
- uint32 end= start + part_info->no_subparts;
+ uint32 start= part_id * part_info->num_subparts;
+ uint32 end= start + part_info->num_subparts;
DBUG_ENTER("mark_full_partition_used_with_parts");
for (; start != end; start++)
@@ -2968,6 +3011,11 @@ int find_used_partitions_imerge(PART_PRUNE_PARAM *ppar, SEL_IMERGE *imerge)
ppar->arg_stack_end= ppar->arg_stack;
ppar->cur_part_fields= 0;
ppar->cur_subpart_fields= 0;
+
+ ppar->cur_min_key= ppar->range_param.min_key;
+ ppar->cur_max_key= ppar->range_param.max_key;
+ ppar->cur_min_flag= ppar->cur_max_flag= 0;
+
init_all_partitions_iterator(ppar->part_info, &ppar->part_iter);
SEL_ARG *key_tree= (*ptree)->keys[0];
if (!key_tree || (-1 == (res |= find_used_partitions(ppar, key_tree))))
@@ -3091,9 +3139,14 @@ static
int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
{
int res, left_res=0, right_res=0;
- int partno= (int)key_tree->part;
- bool pushed= FALSE;
+ int key_tree_part= (int)key_tree->part;
bool set_full_part_if_bad_ret= FALSE;
+ bool ignore_part_fields= ppar->ignore_part_fields;
+ bool did_set_ignore_part_fields= FALSE;
+ RANGE_OPT_PARAM *range_par= &(ppar->range_param);
+
+ if (check_stack_overrun(range_par->thd, 3*STACK_MIN_SIZE, NULL))
+ return -1;
if (key_tree->left != &null_element)
{
@@ -3101,56 +3154,177 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
return -1;
}
+ /* Push SEL_ARG's to stack to enable looking backwards as well */
+ ppar->cur_part_fields+= ppar->is_part_keypart[key_tree_part];
+ ppar->cur_subpart_fields+= ppar->is_subpart_keypart[key_tree_part];
+ *(ppar->arg_stack_end++)= key_tree;
+
if (key_tree->type == SEL_ARG::KEY_RANGE)
{
- if (partno == 0 && (NULL != ppar->part_info->get_part_iter_for_interval))
+ if (ppar->part_info->get_part_iter_for_interval &&
+ key_tree->part <= ppar->last_part_partno)
{
- /*
- Partitioning is done by RANGE|INTERVAL(monotonic_expr(fieldX)), and
- we got "const1 CMP fieldX CMP const2" interval <-- psergey-todo: change
+ if (ignore_part_fields)
+ {
+ /*
+ We come here when a condition on the first partitioning
+ fields led to evaluating the partitioning condition
+ (due to finding a condition of the type a < const or
+ b > const). Thus we must ignore the rest of the
+ partitioning fields but we still want to analyse the
+ subpartitioning fields.
+ */
+ if (key_tree->next_key_part)
+ res= find_used_partitions(ppar, key_tree->next_key_part);
+ else
+ res= -1;
+ goto pop_and_go_right;
+ }
+ /* Collect left and right bound, their lengths and flags */
+ uchar *min_key= ppar->cur_min_key;
+ uchar *max_key= ppar->cur_max_key;
+ uchar *tmp_min_key= min_key;
+ uchar *tmp_max_key= max_key;
+ key_tree->store_min(ppar->key[key_tree->part].store_length,
+ &tmp_min_key, ppar->cur_min_flag);
+ key_tree->store_max(ppar->key[key_tree->part].store_length,
+ &tmp_max_key, ppar->cur_max_flag);
+ uint flag;
+ if (key_tree->next_key_part &&
+ key_tree->next_key_part->part == key_tree->part+1 &&
+ key_tree->next_key_part->part <= ppar->last_part_partno &&
+ key_tree->next_key_part->type == SEL_ARG::KEY_RANGE)
+ {
+ /*
+ There are more key parts for partition pruning to handle
+ This mainly happens when the condition is an equality
+ condition.
+ */
+ if ((tmp_min_key - min_key) == (tmp_max_key - max_key) &&
+ (memcmp(min_key, max_key, (uint)(tmp_max_key - max_key)) == 0) &&
+ !key_tree->min_flag && !key_tree->max_flag)
+ {
+ /* Set 'parameters' */
+ ppar->cur_min_key= tmp_min_key;
+ ppar->cur_max_key= tmp_max_key;
+ uint save_min_flag= ppar->cur_min_flag;
+ uint save_max_flag= ppar->cur_max_flag;
+
+ ppar->cur_min_flag|= key_tree->min_flag;
+ ppar->cur_max_flag|= key_tree->max_flag;
+
+ res= find_used_partitions(ppar, key_tree->next_key_part);
+
+ /* Restore 'parameters' back */
+ ppar->cur_min_key= min_key;
+ ppar->cur_max_key= max_key;
+
+ ppar->cur_min_flag= save_min_flag;
+ ppar->cur_max_flag= save_max_flag;
+ goto pop_and_go_right;
+ }
+ /* We have arrived at the last field in the partition pruning */
+ uint tmp_min_flag= key_tree->min_flag,
+ tmp_max_flag= key_tree->max_flag;
+ if (!tmp_min_flag)
+ key_tree->next_key_part->store_min_key(ppar->key,
+ &tmp_min_key,
+ &tmp_min_flag,
+ ppar->last_part_partno);
+ if (!tmp_max_flag)
+ key_tree->next_key_part->store_max_key(ppar->key,
+ &tmp_max_key,
+ &tmp_max_flag,
+ ppar->last_part_partno);
+ flag= tmp_min_flag | tmp_max_flag;
+ }
+ else
+ flag= key_tree->min_flag | key_tree->max_flag;
+
+ if (tmp_min_key != range_par->min_key)
+ flag&= ~NO_MIN_RANGE;
+ else
+ flag|= NO_MIN_RANGE;
+ if (tmp_max_key != range_par->max_key)
+ flag&= ~NO_MAX_RANGE;
+ else
+ flag|= NO_MAX_RANGE;
+
+ /*
+ We need to call the interval mapper if we have a condition which
+ makes sense to prune on. In the example of COLUMNS on a and
+ b it makes sense if we have a condition on a, or conditions on
+ both a and b. If we only have conditions on b it might make sense
+ but this is a harder case we will solve later. For the harder case
+ this clause then turns into use of all partitions and thus we
+ simply set res= -1 as if the mapper had returned that.
+ TODO: What to do here is defined in WL#4065.
*/
- DBUG_EXECUTE("info", dbug_print_segment_range(key_tree,
- ppar->range_param.
- key_parts););
- res= ppar->part_info->
- get_part_iter_for_interval(ppar->part_info,
- FALSE,
- key_tree->min_value,
- key_tree->max_value,
- key_tree->min_flag | key_tree->max_flag,
- &ppar->part_iter);
- if (!res)
- goto go_right; /* res==0 --> no satisfying partitions */
+ if (ppar->arg_stack[0]->part == 0)
+ {
+ uint32 i;
+ uint32 store_length_array[MAX_KEY];
+ uint32 num_keys= ppar->part_fields;
+
+ for (i= 0; i < num_keys; i++)
+ store_length_array[i]= ppar->key[i].store_length;
+ res= ppar->part_info->
+ get_part_iter_for_interval(ppar->part_info,
+ FALSE,
+ store_length_array,
+ range_par->min_key,
+ range_par->max_key,
+ tmp_min_key - range_par->min_key,
+ tmp_max_key - range_par->max_key,
+ flag,
+ &ppar->part_iter);
+ if (!res)
+ goto pop_and_go_right; /* res==0 --> no satisfying partitions */
+ }
+ else
+ res= -1;
+
if (res == -1)
{
- //get a full range iterator
+ /* get a full range iterator */
init_all_partitions_iterator(ppar->part_info, &ppar->part_iter);
}
/*
Save our intent to mark full partition as used if we will not be able
to obtain further limits on subpartitions
*/
+ if (key_tree_part < ppar->last_part_partno)
+ {
+ /*
+ We need to ignore the rest of the partitioning fields in all
+ evaluations after this
+ */
+ did_set_ignore_part_fields= TRUE;
+ ppar->ignore_part_fields= TRUE;
+ }
set_full_part_if_bad_ret= TRUE;
goto process_next_key_part;
}
- if (partno == ppar->last_subpart_partno &&
+ if (key_tree_part == ppar->last_subpart_partno &&
(NULL != ppar->part_info->get_subpart_iter_for_interval))
{
PARTITION_ITERATOR subpart_iter;
DBUG_EXECUTE("info", dbug_print_segment_range(key_tree,
- ppar->range_param.
- key_parts););
+ range_par->key_parts););
res= ppar->part_info->
get_subpart_iter_for_interval(ppar->part_info,
TRUE,
+ NULL, /* Currently not used here */
key_tree->min_value,
key_tree->max_value,
- key_tree->min_flag | key_tree->max_flag,
+ 0, 0, /* Those are ignored here */
+ key_tree->min_flag |
+ key_tree->max_flag,
&subpart_iter);
DBUG_ASSERT(res); /* We can't get "no satisfying subpartitions" */
if (res == -1)
- return -1; /* all subpartitions satisfy */
+ goto pop_and_go_right; /* all subpartitions satisfy */
uint32 subpart_id;
bitmap_clear_all(&ppar->subparts_bitmap);
@@ -3163,23 +3337,19 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) !=
NOT_A_PARTITION_ID)
{
- for (uint i= 0; i < ppar->part_info->no_subparts; i++)
+ for (uint i= 0; i < ppar->part_info->num_subparts; i++)
if (bitmap_is_set(&ppar->subparts_bitmap, i))
bitmap_set_bit(&ppar->part_info->used_partitions,
- part_id * ppar->part_info->no_subparts + i);
+ part_id * ppar->part_info->num_subparts + i);
}
- goto go_right;
+ goto pop_and_go_right;
}
if (key_tree->is_singlepoint())
{
- pushed= TRUE;
- ppar->cur_part_fields+= ppar->is_part_keypart[partno];
- ppar->cur_subpart_fields+= ppar->is_subpart_keypart[partno];
- *(ppar->arg_stack_end++) = key_tree;
-
- if (partno == ppar->last_part_partno &&
- ppar->cur_part_fields == ppar->part_fields)
+ if (key_tree_part == ppar->last_part_partno &&
+ ppar->cur_part_fields == ppar->part_fields &&
+ ppar->part_info->get_part_iter_for_interval == NULL)
{
/*
Ok, we've got "fieldN<=>constN"-type SEL_ARGs for all partitioning
@@ -3208,7 +3378,7 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
goto process_next_key_part;
}
- if (partno == ppar->last_subpart_partno &&
+ if (key_tree_part == ppar->last_subpart_partno &&
ppar->cur_subpart_fields == ppar->subpart_fields)
{
/*
@@ -3232,7 +3402,7 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
NOT_A_PARTITION_ID)
{
bitmap_set_bit(&part_info->used_partitions,
- part_id * part_info->no_subparts + subpart_id);
+ part_id * part_info->num_subparts + subpart_id);
}
res= 1; /* Some partitions were marked as used */
goto pop_and_go_right;
@@ -3245,8 +3415,11 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
we're processing subpartititoning's key parts, this means we'll not be
able to infer any suitable condition, so bail out.
*/
- if (partno >= ppar->last_part_partno)
- return -1;
+ if (key_tree_part >= ppar->last_part_partno)
+ {
+ res= -1;
+ goto pop_and_go_right;
+ }
}
}
@@ -3255,7 +3428,17 @@ process_next_key_part:
res= find_used_partitions(ppar, key_tree->next_key_part);
else
res= -1;
-
+
+ if (did_set_ignore_part_fields)
+ {
+ /*
+ We have returned from processing all key trees linked to our next
+ key part. We are ready to be moving down (using right pointers) and
+ this tree is a new evaluation requiring its own decision on whether
+ to ignore partitioning fields.
+ */
+ ppar->ignore_part_fields= FALSE;
+ }
if (set_full_part_if_bad_ret)
{
if (res == -1)
@@ -3278,18 +3461,14 @@ process_next_key_part:
init_all_partitions_iterator(ppar->part_info, &ppar->part_iter);
}
- if (pushed)
- {
pop_and_go_right:
- /* Pop this key part info off the "stack" */
- ppar->arg_stack_end--;
- ppar->cur_part_fields-= ppar->is_part_keypart[partno];
- ppar->cur_subpart_fields-= ppar->is_subpart_keypart[partno];
- }
+ /* Pop this key part info off the "stack" */
+ ppar->arg_stack_end--;
+ ppar->cur_part_fields-= ppar->is_part_keypart[key_tree_part];
+ ppar->cur_subpart_fields-= ppar->is_subpart_keypart[key_tree_part];
if (res == -1)
return -1;
-go_right:
if (key_tree->right != &null_element)
{
if (-1 == (right_res= find_used_partitions(ppar,key_tree->right)))
@@ -3371,13 +3550,14 @@ static bool create_partition_index_description(PART_PRUNE_PARAM *ppar)
uint used_part_fields, used_subpart_fields;
used_part_fields= fields_ok_for_partition_index(part_info->part_field_array) ?
- part_info->no_part_fields : 0;
+ part_info->num_part_fields : 0;
used_subpart_fields=
fields_ok_for_partition_index(part_info->subpart_field_array)?
- part_info->no_subpart_fields : 0;
+ part_info->num_subpart_fields : 0;
uint total_parts= used_part_fields + used_subpart_fields;
+ ppar->ignore_part_fields= FALSE;
ppar->part_fields= used_part_fields;
ppar->last_part_partno= (int)used_part_fields - 1;
@@ -3412,10 +3592,10 @@ static bool create_partition_index_description(PART_PRUNE_PARAM *ppar)
if (ppar->subpart_fields)
{
my_bitmap_map *buf;
- uint32 bufsize= bitmap_buffer_size(ppar->part_info->no_subparts);
+ uint32 bufsize= bitmap_buffer_size(ppar->part_info->num_subparts);
if (!(buf= (my_bitmap_map*) alloc_root(alloc, bufsize)))
return TRUE;
- bitmap_init(&ppar->subparts_bitmap, buf, ppar->part_info->no_subparts,
+ bitmap_init(&ppar->subparts_bitmap, buf, ppar->part_info->num_subparts,
FALSE);
}
range_par->key_parts= key_part;
@@ -3426,12 +3606,8 @@ static bool create_partition_index_description(PART_PRUNE_PARAM *ppar)
{
key_part->key= 0;
key_part->part= part;
- key_part->store_length= key_part->length= (uint16) (*field)->key_length();
- if ((*field)->real_maybe_null())
- key_part->store_length+= HA_KEY_NULL_LENGTH;
- if ((*field)->type() == MYSQL_TYPE_BLOB ||
- (*field)->real_type() == MYSQL_TYPE_VARCHAR)
- key_part->store_length+= HA_KEY_BLOB_LENGTH;
+ key_part->length= (uint16)(*field)->key_length();
+ key_part->store_length= (uint16)get_partition_field_store_length(*field);
DBUG_PRINT("info", ("part %u length %u store_length %u", part,
key_part->length, key_part->store_length));
@@ -5707,7 +5883,8 @@ get_mm_leaf(RANGE_OPT_PARAM *param, COND *conf_func, Field *field,
value->result_type() == STRING_RESULT &&
key_part->image_type == Field::itRAW &&
((Field_str*)field)->charset() != conf_func->compare_collation() &&
- !(conf_func->compare_collation()->state & MY_CS_BINSORT))
+ !(conf_func->compare_collation()->state & MY_CS_BINSORT &&
+ (type == Item_func::EQUAL_FUNC || type == Item_func::EQ_FUNC)))
goto end;
if (key_part->image_type == Field::itMBR)
@@ -7608,12 +7785,16 @@ check_quick_keys(PARAM *param, uint idx, SEL_ARG *key_tree,
tmp_max_flag=key_tree->max_flag;
if (!tmp_min_flag)
tmp_min_keypart+=
- key_tree->next_key_part->store_min_key(param->key[idx], &tmp_min_key,
- &tmp_min_flag);
+ key_tree->next_key_part->store_min_key(param->key[idx],
+ &tmp_min_key,
+ &tmp_min_flag,
+ MAX_KEY);
if (!tmp_max_flag)
tmp_max_keypart+=
- key_tree->next_key_part->store_max_key(param->key[idx], &tmp_max_key,
- &tmp_max_flag);
+ key_tree->next_key_part->store_max_key(param->key[idx],
+ &tmp_max_key,
+ &tmp_max_flag,
+ MAX_KEY);
min_key_length= (uint) (tmp_min_key - param->min_key);
max_key_length= (uint) (tmp_max_key - param->max_key);
}
@@ -7883,11 +8064,15 @@ get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key,
{
uint tmp_min_flag=key_tree->min_flag,tmp_max_flag=key_tree->max_flag;
if (!tmp_min_flag)
- min_part+= key_tree->next_key_part->store_min_key(key, &tmp_min_key,
- &tmp_min_flag);
+ min_part+= key_tree->next_key_part->store_min_key(key,
+ &tmp_min_key,
+ &tmp_min_flag,
+ MAX_KEY);
if (!tmp_max_flag)
- max_part+= key_tree->next_key_part->store_max_key(key, &tmp_max_key,
- &tmp_max_flag);
+ max_part+= key_tree->next_key_part->store_max_key(key,
+ &tmp_max_key,
+ &tmp_max_flag,
+ MAX_KEY);
flag=tmp_min_flag | tmp_max_flag;
}
}
@@ -9180,15 +9365,10 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
double *read_cost, ha_rows *records);
-/*
+/**
Test if this access method is applicable to a GROUP query with MIN/MAX
functions, and if so, construct a new TRP object.
- SYNOPSIS
- get_best_group_min_max()
- param Parameter from test_quick_select
- sel_tree Range tree generated by get_mm_tree
-
DESCRIPTION
Test whether a query can be computed via a QUICK_GROUP_MIN_MAX_SELECT.
Queries computable via a QUICK_GROUP_MIN_MAX_SELECT must satisfy the
@@ -9299,17 +9479,16 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
- Lift the limitation in condition (B3), that is, make this access method
applicable to ROLLUP queries.
- RETURN
- If mem_root != NULL
- - valid TRP_GROUP_MIN_MAX object if this QUICK class can be used for
- the query
- - NULL o/w.
- If mem_root == NULL
- - NULL
+ @param param Parameter from test_quick_select
+ @param sel_tree Range tree generated by get_mm_tree
+ @param read_time Best read time so far (=table/index scan time)
+ @return table read plan
+ @retval NULL Loose index scan not applicable or mem_root == NULL
+ @retval !NULL Loose index scan table read plan
*/
static TRP_GROUP_MIN_MAX *
-get_best_group_min_max(PARAM *param, SEL_TREE *tree)
+get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
{
THD *thd= param->thd;
JOIN *join= thd->lex->current_select->join;
@@ -9330,25 +9509,33 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
ORDER *tmp_group;
Item *item;
Item_field *item_field;
+ bool is_agg_distinct;
+ List<Item_field> agg_distinct_flds;
+
DBUG_ENTER("get_best_group_min_max");
/* Perform few 'cheap' tests whether this access method is applicable. */
if (!join)
DBUG_RETURN(NULL); /* This is not a select statement. */
if ((join->tables != 1) || /* The query must reference one table. */
- ((!join->group_list) && /* Neither GROUP BY nor a DISTINCT query. */
- (!join->select_distinct)) ||
(join->select_lex->olap == ROLLUP_TYPE)) /* Check (B3) for ROLLUP */
DBUG_RETURN(NULL);
if (table->s->keys == 0) /* There are no indexes to use. */
DBUG_RETURN(NULL);
- /* Analyze the query in more detail. */
- List_iterator<Item> select_items_it(join->fields_list);
-
/* Check (SA1,SA4) and store the only MIN/MAX argument - the C attribute.*/
if (join->make_sum_func_list(join->all_fields, join->fields_list, 1))
DBUG_RETURN(NULL);
+
+ List_iterator<Item> select_items_it(join->fields_list);
+ is_agg_distinct = is_indexed_agg_distinct(join, &agg_distinct_flds);
+
+ if ((!join->group_list) && /* Neither GROUP BY nor a DISTINCT query. */
+ (!join->select_distinct) &&
+ !is_agg_distinct)
+ DBUG_RETURN(NULL);
+ /* Analyze the query in more detail. */
+
if (join->sum_funcs[0])
{
Item_sum *min_max_item;
@@ -9359,6 +9546,10 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
have_min= TRUE;
else if (min_max_item->sum_func() == Item_sum::MAX_FUNC)
have_max= TRUE;
+ else if (min_max_item->sum_func() == Item_sum::COUNT_DISTINCT_FUNC ||
+ min_max_item->sum_func() == Item_sum::SUM_DISTINCT_FUNC ||
+ min_max_item->sum_func() == Item_sum::AVG_DISTINCT_FUNC)
+ continue;
else
DBUG_RETURN(NULL);
@@ -9375,13 +9566,12 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
DBUG_RETURN(NULL);
}
}
-
/* Check (SA5). */
if (join->select_distinct)
{
while ((item= select_items_it++))
{
- if (item->type() != Item::FIELD_ITEM)
+ if (item->real_item()->type() != Item::FIELD_ITEM)
DBUG_RETURN(NULL);
}
}
@@ -9389,7 +9579,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
/* Check (GA4) - that there are no expressions among the group attributes. */
for (tmp_group= join->group_list; tmp_group; tmp_group= tmp_group->next)
{
- if ((*tmp_group->item)->type() != Item::FIELD_ITEM)
+ if ((*tmp_group->item)->real_item()->type() != Item::FIELD_ITEM)
DBUG_RETURN(NULL);
}
@@ -9408,6 +9598,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
uint best_param_idx= 0;
const uint pk= param->table->s->primary_key;
+ uint max_key_part;
SEL_ARG *cur_index_tree= NULL;
ha_rows cur_quick_prefix_records= 0;
uint cur_param_idx=MAX_KEY;
@@ -9461,6 +9652,8 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
}
}
+ max_key_part= 0;
+ used_key_parts_map.clear_all();
/*
Check (GA1) for GROUP BY queries.
*/
@@ -9484,6 +9677,8 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
{
cur_group_prefix_len+= cur_part->store_length;
++cur_group_key_parts;
+ max_key_part= cur_part - cur_index_info->key_part + 1;
+ used_key_parts_map.set_bit(max_key_part);
}
else
goto next_index;
@@ -9497,14 +9692,26 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
Later group_fields_array of ORDER objects is used to convert the query
to a GROUP query.
*/
- else if (join->select_distinct)
+ if ((!join->group_list && join->select_distinct) ||
+ is_agg_distinct)
{
- select_items_it.rewind();
- used_key_parts_map.clear_all();
- uint max_key_part= 0;
- while ((item= select_items_it++))
+ if (!is_agg_distinct)
{
- item_field= (Item_field*) item; /* (SA5) already checked above. */
+ select_items_it.rewind();
+ }
+
+ List_iterator<Item_field> agg_distinct_flds_it (agg_distinct_flds);
+ while (NULL != (item = (is_agg_distinct ?
+ (Item *) agg_distinct_flds_it++ : select_items_it++)))
+ {
+ /* (SA5) already checked above. */
+ item_field= (Item_field*) item->real_item();
+ DBUG_ASSERT(item->real_item()->type() == Item::FIELD_ITEM);
+
+ /* not doing loose index scan for derived tables */
+ if (!item_field->field)
+ goto next_index;
+
/* Find the order of the key part in the index. */
key_part_nr= get_field_keypart(cur_index_info, item_field->field);
/*
@@ -9513,7 +9720,8 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
*/
if (used_key_parts_map.is_set(key_part_nr))
continue;
- if (key_part_nr < 1 || key_part_nr > join->fields_list.elements)
+ if (key_part_nr < 1 ||
+ (!is_agg_distinct && key_part_nr > join->fields_list.elements))
goto next_index;
cur_part= cur_index_info->key_part + key_part_nr - 1;
cur_group_prefix_len+= cur_part->store_length;
@@ -9533,10 +9741,6 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
if (all_parts != cur_parts)
goto next_index;
}
- else
- {
- DBUG_ASSERT(FALSE);
- }
/* Check (SA2). */
if (min_max_arg_item)
@@ -9690,7 +9894,8 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
/* The query passes all tests, so construct a new TRP object. */
read_plan= new (param->mem_root)
- TRP_GROUP_MIN_MAX(have_min, have_max, min_max_arg_part,
+ TRP_GROUP_MIN_MAX(have_min, have_max, is_agg_distinct,
+ min_max_arg_part,
group_prefix_len, used_key_parts,
group_key_parts, index_info, index,
key_infix_len,
@@ -9704,6 +9909,11 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
read_plan->read_cost= best_read_cost;
read_plan->records= best_records;
+ if (read_time < best_read_cost && is_agg_distinct)
+ {
+ read_plan->read_cost= 0;
+ read_plan->use_index_scan();
+ }
DBUG_PRINT("info",
("Returning group min/max plan: cost: %g, records: %lu",
@@ -10213,11 +10423,12 @@ TRP_GROUP_MIN_MAX::make_quick(PARAM *param, bool retrieve_full_rows,
quick= new QUICK_GROUP_MIN_MAX_SELECT(param->table,
param->thd->lex->current_select->join,
- have_min, have_max, min_max_arg_part,
+ have_min, have_max,
+ have_agg_distinct, min_max_arg_part,
group_prefix_len, group_key_parts,
used_key_parts, index_info, index,
read_cost, records, key_infix_len,
- key_infix, parent_alloc);
+ key_infix, parent_alloc, is_index_scan);
if (!quick)
DBUG_RETURN(NULL);
@@ -10297,6 +10508,9 @@ TRP_GROUP_MIN_MAX::make_quick(PARAM *param, bool retrieve_full_rows,
key_infix_len Length of the key infix appended to the group prefix
key_infix Infix of constants from equality predicates
parent_alloc Memory pool for this and quick_prefix_select data
+ is_index_scan get the next different key not by jumping on it via
+ index read, but by scanning until the end of the
+ rows with equal key value.
RETURN
None
@@ -10304,20 +10518,22 @@ TRP_GROUP_MIN_MAX::make_quick(PARAM *param, bool retrieve_full_rows,
QUICK_GROUP_MIN_MAX_SELECT::
QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join_arg, bool have_min_arg,
- bool have_max_arg,
+ bool have_max_arg, bool have_agg_distinct_arg,
KEY_PART_INFO *min_max_arg_part_arg,
uint group_prefix_len_arg, uint group_key_parts_arg,
uint used_key_parts_arg, KEY *index_info_arg,
uint use_index, double read_cost_arg,
ha_rows records_arg, uint key_infix_len_arg,
- uchar *key_infix_arg, MEM_ROOT *parent_alloc)
+ uchar *key_infix_arg, MEM_ROOT *parent_alloc,
+ bool is_index_scan_arg)
:join(join_arg), index_info(index_info_arg),
group_prefix_len(group_prefix_len_arg),
group_key_parts(group_key_parts_arg), have_min(have_min_arg),
- have_max(have_max_arg), seen_first_key(FALSE),
- min_max_arg_part(min_max_arg_part_arg), key_infix(key_infix_arg),
- key_infix_len(key_infix_len_arg), min_functions_it(NULL),
- max_functions_it(NULL)
+ have_max(have_max_arg), have_agg_distinct(have_agg_distinct_arg),
+ seen_first_key(FALSE), min_max_arg_part(min_max_arg_part_arg),
+ key_infix(key_infix_arg), key_infix_len(key_infix_len_arg),
+ min_functions_it(NULL), max_functions_it(NULL),
+ is_index_scan(is_index_scan_arg)
{
head= table;
file= head->file;
@@ -10880,6 +11096,56 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_max()
}
+/**
+ Find the next different key value by skiping all the rows with the same key
+ value.
+
+ Implements a specialized loose index access method for queries
+ containing aggregate functions with distinct of the form:
+ SELECT [SUM|COUNT|AVG](DISTINCT a,...) FROM t
+ This method comes to replace the index scan + Unique class
+ (distinct selection) for loose index scan that visits all the rows of a
+ covering index instead of jumping in the begining of each group.
+ TODO: Placeholder function. To be replaced by a handler API call
+
+ @param is_index_scan hint to use index scan instead of random index read
+ to find the next different value.
+ @param file table handler
+ @param key_part group key to compare
+ @param record row data
+ @param group_prefix current key prefix data
+ @param group_prefix_len length of the current key prefix data
+ @param group_key_parts number of the current key prefix columns
+ @return status
+ @retval 0 success
+ @retval !0 failure
+*/
+
+static int index_next_different (bool is_index_scan, handler *file,
+ KEY_PART_INFO *key_part, uchar * record,
+ const uchar * group_prefix,
+ uint group_prefix_len,
+ uint group_key_parts)
+{
+ if (is_index_scan)
+ {
+ int result= 0;
+
+ while (!key_cmp (key_part, group_prefix, group_prefix_len))
+ {
+ result= file->index_next(record);
+ if (result)
+ return(result);
+ }
+ return result;
+ }
+ else
+ return file->index_read_map(record, group_prefix,
+ make_prev_keypart_map(group_key_parts),
+ HA_READ_AFTER_KEY);
+}
+
+
/*
Determine the prefix of the next group.
@@ -10926,9 +11192,9 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_prefix()
else
{
/* Load the first key in this group into record. */
- result= file->index_read_map(record, group_prefix,
- make_prev_keypart_map(group_key_parts),
- HA_READ_AFTER_KEY);
+ result= index_next_different (is_index_scan, file, index_info->key_part,
+ record, group_prefix, group_prefix_len,
+ group_key_parts);
if (result)
DBUG_RETURN(result);
}
diff --git a/sql/opt_range.h b/sql/opt_range.h
index 8d2ba1bb0a6..393ffcb2115 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -616,6 +616,7 @@ private:
uchar *last_prefix; /* Prefix of the last group for detecting EOF. */
bool have_min; /* Specify whether we are computing */
bool have_max; /* a MIN, a MAX, or both. */
+ bool have_agg_distinct;/* aggregate_function(DISTINCT ...). */
bool seen_first_key; /* Denotes whether the first key was retrieved.*/
KEY_PART_INFO *min_max_arg_part; /* The keypart of the only argument field */
/* of all MIN/MAX functions. */
@@ -629,6 +630,11 @@ private:
List<Item_sum> *max_functions;
List_iterator<Item_sum> *min_functions_it;
List_iterator<Item_sum> *max_functions_it;
+ /*
+ Use index scan to get the next different key instead of jumping into it
+ through index read
+ */
+ bool is_index_scan;
public:
/*
The following two members are public to allow easy access from
@@ -646,12 +652,13 @@ private:
void update_max_result();
public:
QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join, bool have_min,
- bool have_max, KEY_PART_INFO *min_max_arg_part,
+ bool have_max, bool have_agg_distinct,
+ KEY_PART_INFO *min_max_arg_part,
uint group_prefix_len, uint group_key_parts,
uint used_key_parts, KEY *index_info, uint
use_index, double read_cost, ha_rows records, uint
key_infix_len, uchar *key_infix, MEM_ROOT
- *parent_alloc);
+ *parent_alloc, bool is_index_scan);
~QUICK_GROUP_MIN_MAX_SELECT();
bool add_range(SEL_ARG *sel_range);
void update_key_stat();
@@ -667,6 +674,12 @@ public:
#ifndef DBUG_OFF
void dbug_dump(int indent, bool verbose);
#endif
+ bool is_agg_distinct() { return have_agg_distinct; }
+ virtual void append_loose_scan_type(String *str)
+ {
+ if (is_index_scan)
+ str->append(STRING_WITH_LEN(" (scanning)"));
+ }
};
diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc
index e009cf1ca9f..13b10ac2e8f 100644
--- a/sql/opt_sum.cc
+++ b/sql/opt_sum.cc
@@ -356,10 +356,13 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
const_result= 0;
break;
}
+ item_sum->set_aggregator(item_sum->has_with_distinct() ?
+ Aggregator::DISTINCT_AGGREGATOR :
+ Aggregator::SIMPLE_AGGREGATOR);
if (!count)
{
/* If count == 0, then we know that is_exact_count == TRUE. */
- ((Item_sum_min*) item_sum)->clear(); /* Set to NULL. */
+ ((Item_sum_min*) item_sum)->aggregator_clear(); /* Set to NULL. */
}
else
((Item_sum_min*) item_sum)->reset(); /* Set to the constant value. */
@@ -444,10 +447,13 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
const_result= 0;
break;
}
+ item_sum->set_aggregator(item_sum->has_with_distinct() ?
+ Aggregator::DISTINCT_AGGREGATOR :
+ Aggregator::SIMPLE_AGGREGATOR);
if (!count)
{
/* If count != 1, then we know that is_exact_count == TRUE. */
- ((Item_sum_max*) item_sum)->clear(); /* Set to NULL. */
+ ((Item_sum_max*) item_sum)->aggregator_clear(); /* Set to NULL. */
}
else
((Item_sum_max*) item_sum)->reset(); /* Set to the constant value. */
diff --git a/sql/partition_element.h b/sql/partition_element.h
index 905bc38165b..73f85ac275f 100644
--- a/sql/partition_element.h
+++ b/sql/partition_element.h
@@ -1,4 +1,7 @@
-/* Copyright (C) 2006 MySQL AB
+#ifndef PARTITION_ELEMENT_INCLUDED
+#define PARTITION_ELEMENT_INCLUDED
+
+/* Copyright 2005-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -32,10 +35,40 @@ enum partition_state {
PART_REORGED_DROPPED= 5,
PART_CHANGED= 6,
PART_IS_CHANGED= 7,
- PART_IS_ADDED= 8
+ PART_IS_ADDED= 8,
+ PART_ADMIN= 9
};
/*
+ This struct is used to keep track of column expressions as part
+ of the COLUMNS concept in conjunction with RANGE and LIST partitioning.
+ The value can be either of MINVALUE, MAXVALUE and an expression that
+ must be constant and evaluate to the same type as the column it
+ represents.
+
+ The data in this fixed in two steps. The parser will only fill in whether
+ it is a max_value or provide an expression. Filling in
+ column_value, part_info, partition_id, null_value is done by the
+ function fix_column_value_function. However the item tree needs
+ fixed also before writing it into the frm file (in add_column_list_values).
+ To distinguish between those two variants, fixed= 1 after the
+ fixing in add_column_list_values and fixed= 2 otherwise. This is
+ since the fixing in add_column_list_values isn't a complete fixing.
+*/
+
+typedef struct p_column_list_val
+{
+ void* column_value;
+ Item* item_expression;
+ partition_info *part_info;
+ uint partition_id;
+ bool max_value;
+ bool null_value;
+ char fixed;
+} part_column_list_val;
+
+
+/*
This struct is used to contain the value of an element
in the VALUES IN struct. It needs to keep knowledge of
whether it is a signed/unsigned value and whether it is
@@ -45,8 +78,10 @@ enum partition_state {
typedef struct p_elem_val
{
longlong value;
+ uint added_items;
bool null_value;
bool unsigned_flag;
+ part_column_list_val *col_val_array;
} part_elem_value;
struct st_ddl_log_memory_entry;
@@ -68,8 +103,9 @@ public:
enum partition_state part_state;
uint16 nodegroup_id;
bool has_null_value;
- bool signed_flag;/* Indicate whether this partition uses signed constants */
- bool max_value; /* Indicate whether this partition uses MAXVALUE */
+ /* signed_flag and max_value only relevant for subpartitions */
+ bool signed_flag;
+ bool max_value;
partition_element()
: part_max_rows(0), part_min_rows(0), range_value(0),
@@ -97,3 +133,5 @@ public:
}
~partition_element() {}
};
+
+#endif /* PARTITION_ELEMENT_INCLUDED */
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index ba9ea0e876e..56d79ac0d45 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006 MySQL AB
+/* Copyright (C) 2006-2008 MySQL AB, Sun Microsystems Inc. 2008-2009
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -75,7 +75,7 @@ partition_info *partition_info::get_clone()
SYNOPSIS
create_default_partition_names()
part_no Partition number for subparts
- no_parts Number of partitions
+ num_parts Number of partitions
start_no Starting partition number
subpart Is it subpartitions
@@ -91,10 +91,10 @@ partition_info *partition_info::get_clone()
#define MAX_PART_NAME_SIZE 8
char *partition_info::create_default_partition_names(uint part_no,
- uint no_parts_arg,
+ uint num_parts_arg,
uint start_no)
{
- char *ptr= (char*) sql_calloc(no_parts_arg*MAX_PART_NAME_SIZE);
+ char *ptr= (char*) sql_calloc(num_parts_arg*MAX_PART_NAME_SIZE);
char *move_ptr= ptr;
uint i= 0;
DBUG_ENTER("create_default_partition_names");
@@ -105,11 +105,11 @@ char *partition_info::create_default_partition_names(uint part_no,
{
my_sprintf(move_ptr, (move_ptr,"p%u", (start_no + i)));
move_ptr+=MAX_PART_NAME_SIZE;
- } while (++i < no_parts_arg);
+ } while (++i < num_parts_arg);
}
else
{
- mem_alloc_error(no_parts_arg*MAX_PART_NAME_SIZE);
+ mem_alloc_error(num_parts_arg*MAX_PART_NAME_SIZE);
}
DBUG_RETURN(ptr);
}
@@ -189,19 +189,19 @@ bool partition_info::set_up_default_partitions(handler *file,
goto end;
}
- if ((no_parts == 0) &&
- ((no_parts= file->get_default_no_partitions(info)) == 0))
+ if ((num_parts == 0) &&
+ ((num_parts= file->get_default_no_partitions(info)) == 0))
{
my_error(ER_PARTITION_NOT_DEFINED_ERROR, MYF(0), "partitions");
goto end;
}
- if (unlikely(no_parts > MAX_PARTITIONS))
+ if (unlikely(num_parts > MAX_PARTITIONS))
{
my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
goto end;
}
- if (unlikely((!(default_name= create_default_partition_names(0, no_parts,
+ if (unlikely((!(default_name= create_default_partition_names(0, num_parts,
start_no)))))
goto end;
i= 0;
@@ -220,7 +220,7 @@ bool partition_info::set_up_default_partitions(handler *file,
mem_alloc_error(sizeof(partition_element));
goto end;
}
- } while (++i < no_parts);
+ } while (++i < num_parts);
result= FALSE;
end:
DBUG_RETURN(result);
@@ -259,9 +259,9 @@ bool partition_info::set_up_default_subpartitions(handler *file,
List_iterator<partition_element> part_it(partitions);
DBUG_ENTER("partition_info::set_up_default_subpartitions");
- if (no_subparts == 0)
- no_subparts= file->get_default_no_partitions(info);
- if (unlikely((no_parts * no_subparts) > MAX_PARTITIONS))
+ if (num_subparts == 0)
+ num_subparts= file->get_default_no_partitions(info);
+ if (unlikely((num_parts * num_subparts) > MAX_PARTITIONS))
{
my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
goto end;
@@ -288,8 +288,8 @@ bool partition_info::set_up_default_subpartitions(handler *file,
mem_alloc_error(sizeof(partition_element));
goto end;
}
- } while (++j < no_subparts);
- } while (++i < no_parts);
+ } while (++j < num_subparts);
+ } while (++i < num_parts);
result= FALSE;
end:
DBUG_RETURN(result);
@@ -334,6 +334,49 @@ bool partition_info::set_up_defaults_for_partitioning(handler *file,
/*
+ Support routine for check_partition_info
+
+ SYNOPSIS
+ has_unique_fields
+ no parameters
+
+ RETURN VALUE
+ Erroneus field name Error, there are two fields with same name
+ NULL Ok, no field defined twice
+
+ DESCRIPTION
+ Check that the user haven't defined the same field twice in
+ key or column list partitioning.
+*/
+char* partition_info::has_unique_fields()
+{
+ char *field_name_outer, *field_name_inner;
+ List_iterator<char> it_outer(part_field_list);
+ uint num_fields= part_field_list.elements;
+ uint i,j;
+ DBUG_ENTER("partition_info::has_unique_fields");
+
+ for (i= 0; i < num_fields; i++)
+ {
+ field_name_outer= it_outer++;
+ List_iterator<char> it_inner(part_field_list);
+ for (j= 0; j < num_fields; j++)
+ {
+ field_name_inner= it_inner++;
+ if (i >= j)
+ continue;
+ if (!(my_strcasecmp(system_charset_info,
+ field_name_outer,
+ field_name_inner)))
+ {
+ DBUG_RETURN(field_name_outer);
+ }
+ }
+ }
+ DBUG_RETURN(NULL);
+}
+
+/*
A support function to check if a partition element's name is unique
SYNOPSIS
@@ -518,12 +561,12 @@ bool partition_info::check_engine_mix(handlerton *engine_type,
{
handlerton *old_engine_type= engine_type;
bool first= TRUE;
- uint no_parts= partitions.elements;
+ uint num_parts= partitions.elements;
DBUG_ENTER("partition_info::check_engine_mix");
DBUG_PRINT("info", ("in: engine_type = %s, table_engine_set = %u",
ha_resolve_storage_engine_name(engine_type),
table_engine_set));
- if (no_parts)
+ if (num_parts)
{
List_iterator<partition_element> part_it(partitions);
uint i= 0;
@@ -536,7 +579,7 @@ bool partition_info::check_engine_mix(handlerton *engine_type,
if (is_sub_partitioned() &&
part_elem->subpartitions.elements)
{
- uint no_subparts= part_elem->subpartitions.elements;
+ uint num_subparts= part_elem->subpartitions.elements;
uint j= 0;
List_iterator<partition_element> sub_it(part_elem->subpartitions);
do
@@ -548,7 +591,7 @@ bool partition_info::check_engine_mix(handlerton *engine_type,
if (check_engine_condition(sub_elem, table_engine_set,
&engine_type, &first))
goto error;
- } while (++j < no_subparts);
+ } while (++j < num_subparts);
/* ensure that the partition also has correct engine */
if (check_engine_condition(part_elem, table_engine_set,
&engine_type, &first))
@@ -557,7 +600,7 @@ bool partition_info::check_engine_mix(handlerton *engine_type,
else if (check_engine_condition(part_elem, table_engine_set,
&engine_type, &first))
goto error;
- } while (++i < no_parts);
+ } while (++i < num_parts);
}
DBUG_PRINT("info", ("engine_type = %s",
ha_resolve_storage_engine_name(engine_type)));
@@ -589,6 +632,7 @@ error:
SYNOPSIS
check_range_constants()
+ thd Thread object
RETURN VALUE
TRUE An error occurred during creation of range constants
@@ -601,76 +645,112 @@ error:
called for RANGE PARTITIONed tables.
*/
-bool partition_info::check_range_constants()
+bool partition_info::check_range_constants(THD *thd)
{
partition_element* part_def;
- longlong current_largest;
- longlong part_range_value;
bool first= TRUE;
uint i;
List_iterator<partition_element> it(partitions);
- bool result= TRUE;
- bool signed_flag= !part_expr->unsigned_flag;
+ int result= TRUE;
DBUG_ENTER("partition_info::check_range_constants");
- DBUG_PRINT("enter", ("INT_RESULT with %d parts", no_parts));
-
- LINT_INIT(current_largest);
+ DBUG_PRINT("enter", ("RANGE with %d parts, column_list = %u", num_parts,
+ column_list));
- part_result_type= INT_RESULT;
- range_int_array= (longlong*)sql_alloc(no_parts * sizeof(longlong));
- if (unlikely(range_int_array == NULL))
+ if (column_list)
{
- mem_alloc_error(no_parts * sizeof(longlong));
- goto end;
- }
- i= 0;
- do
- {
- part_def= it++;
- if ((i != (no_parts - 1)) || !defined_max_value)
+ part_column_list_val *loc_range_col_array;
+ part_column_list_val *current_largest_col_val;
+ uint num_column_values= part_field_list.elements;
+ uint size_entries= sizeof(part_column_list_val) * num_column_values;
+ range_col_array= (part_column_list_val*)sql_calloc(num_parts *
+ size_entries);
+ LINT_INIT(current_largest_col_val);
+ if (unlikely(range_col_array == NULL))
{
- part_range_value= part_def->range_value;
- if (!signed_flag)
- part_range_value-= 0x8000000000000000ULL;
+ mem_alloc_error(num_parts * size_entries);
+ goto end;
}
- else
- part_range_value= LONGLONG_MAX;
- if (first)
+ loc_range_col_array= range_col_array;
+ i= 0;
+ do
{
- current_largest= part_range_value;
- range_int_array[0]= part_range_value;
+ part_def= it++;
+ {
+ List_iterator<part_elem_value> list_val_it(part_def->list_val_list);
+ part_elem_value *range_val= list_val_it++;
+ part_column_list_val *col_val= range_val->col_val_array;
+
+ if (fix_column_value_functions(thd, range_val, i))
+ goto end;
+ memcpy(loc_range_col_array, (const void*)col_val, size_entries);
+ loc_range_col_array+= num_column_values;
+ if (!first)
+ {
+ if (compare_column_values((const void*)current_largest_col_val,
+ (const void*)col_val) >= 0)
+ goto range_not_increasing_error;
+ }
+ current_largest_col_val= col_val;
+ }
first= FALSE;
+ } while (++i < num_parts);
+ }
+ else
+ {
+ longlong current_largest;
+ longlong part_range_value;
+ bool signed_flag= !part_expr->unsigned_flag;
+
+ LINT_INIT(current_largest);
+
+ part_result_type= INT_RESULT;
+ range_int_array= (longlong*)sql_alloc(num_parts * sizeof(longlong));
+ if (unlikely(range_int_array == NULL))
+ {
+ mem_alloc_error(num_parts * sizeof(longlong));
+ goto end;
}
- else
+ i= 0;
+ do
{
- if (likely(current_largest < part_range_value))
+ part_def= it++;
+ if ((i != (num_parts - 1)) || !defined_max_value)
{
- current_largest= part_range_value;
- range_int_array[i]= part_range_value;
- }
- else if (defined_max_value &&
- current_largest == part_range_value &&
- part_range_value == LONGLONG_MAX &&
- i == (no_parts - 1))
- {
- range_int_array[i]= part_range_value;
+ part_range_value= part_def->range_value;
+ if (!signed_flag)
+ part_range_value-= 0x8000000000000000ULL;
}
else
+ part_range_value= LONGLONG_MAX;
+
+ if (!first)
{
- my_error(ER_RANGE_NOT_INCREASING_ERROR, MYF(0));
- goto end;
+ if (unlikely(current_largest > part_range_value) ||
+ (unlikely(current_largest == part_range_value) &&
+ (part_range_value < LONGLONG_MAX ||
+ i != (num_parts - 1) ||
+ !defined_max_value)))
+ goto range_not_increasing_error;
}
- }
- } while (++i < no_parts);
+ range_int_array[i]= part_range_value;
+ current_largest= part_range_value;
+ first= FALSE;
+ } while (++i < num_parts);
+ }
result= FALSE;
end:
DBUG_RETURN(result);
+
+range_not_increasing_error:
+ my_error(ER_RANGE_NOT_INCREASING_ERROR, MYF(0));
+ goto end;
}
/*
Support routines for check_list_constants used by qsort to sort the
- constant list expressions. One routine for unsigned and one for signed.
+ constant list expressions. One routine for integers and one for
+ column lists.
SYNOPSIS
list_part_cmp()
@@ -695,6 +775,54 @@ int partition_info::list_part_cmp(const void* a, const void* b)
return 0;
}
+ /*
+ Compare two lists of column values in RANGE/LIST partitioning
+ SYNOPSIS
+ compare_column_values()
+ first First column list argument
+ second Second column list argument
+ RETURN VALUES
+ 0 Equal
+ -1 First argument is smaller
+ +1 First argument is larger
+*/
+
+int partition_info::compare_column_values(const void *first_arg,
+ const void *second_arg)
+{
+ const part_column_list_val *first= (part_column_list_val*)first_arg;
+ const part_column_list_val *second= (part_column_list_val*)second_arg;
+ partition_info *part_info= first->part_info;
+ Field **field;
+
+ for (field= part_info->part_field_array; *field;
+ field++, first++, second++)
+ {
+ if (first->max_value || second->max_value)
+ {
+ if (first->max_value && second->max_value)
+ return 0;
+ if (second->max_value)
+ return -1;
+ else
+ return +1;
+ }
+ if (first->null_value || second->null_value)
+ {
+ if (first->null_value && second->null_value)
+ continue;
+ if (second->null_value)
+ return +1;
+ else
+ return -1;
+ }
+ int res= (*field)->cmp((const uchar*)first->column_value,
+ (const uchar*)second->column_value);
+ if (res)
+ return res;
+ }
+ return 0;
+}
/*
This routine allocates an array for all list constants to achieve a fast
@@ -704,6 +832,7 @@ int partition_info::list_part_cmp(const void* a, const void* b)
SYNOPSIS
check_list_constants()
+ thd Thread object
RETURN VALUE
TRUE An error occurred during creation of list constants
@@ -716,20 +845,23 @@ int partition_info::list_part_cmp(const void* a, const void* b)
called for LIST PARTITIONed tables.
*/
-bool partition_info::check_list_constants()
+bool partition_info::check_list_constants(THD *thd)
{
- uint i;
+ uint i, size_entries, num_column_values;
uint list_index= 0;
part_elem_value *list_value;
bool result= TRUE;
- longlong curr_value, prev_value, type_add, calc_value;
+ longlong type_add, calc_value;
+ void *curr_value, *prev_value;
partition_element* part_def;
bool found_null= FALSE;
+ int (*compare_func)(const void *, const void*);
+ void *ptr;
List_iterator<partition_element> list_func_it(partitions);
DBUG_ENTER("partition_info::check_list_constants");
part_result_type= INT_RESULT;
- no_list_values= 0;
+ num_list_values= 0;
/*
We begin by calculating the number of list values that have been
defined in the first step.
@@ -762,51 +894,86 @@ bool partition_info::check_list_constants()
}
List_iterator<part_elem_value> list_val_it1(part_def->list_val_list);
while (list_val_it1++)
- no_list_values++;
- } while (++i < no_parts);
+ num_list_values++;
+ } while (++i < num_parts);
list_func_it.rewind();
- list_array= (LIST_PART_ENTRY*)sql_alloc((no_list_values+1) *
- sizeof(LIST_PART_ENTRY));
- if (unlikely(list_array == NULL))
+ num_column_values= part_field_list.elements;
+ size_entries= column_list ?
+ (num_column_values * sizeof(part_column_list_val)) :
+ sizeof(LIST_PART_ENTRY);
+ ptr= sql_calloc((num_list_values+1) * size_entries);
+ if (unlikely(ptr == NULL))
{
- mem_alloc_error(no_list_values * sizeof(LIST_PART_ENTRY));
+ mem_alloc_error(num_list_values * size_entries);
goto end;
}
-
- i= 0;
- /*
- Fix to be able to reuse signed sort functions also for unsigned
- partition functions.
- */
- type_add= (longlong)(part_expr->unsigned_flag ?
+ if (column_list)
+ {
+ part_column_list_val *loc_list_col_array;
+ loc_list_col_array= (part_column_list_val*)ptr;
+ list_col_array= (part_column_list_val*)ptr;
+ compare_func= compare_column_values;
+ i= 0;
+ do
+ {
+ part_def= list_func_it++;
+ List_iterator<part_elem_value> list_val_it2(part_def->list_val_list);
+ while ((list_value= list_val_it2++))
+ {
+ part_column_list_val *col_val= list_value->col_val_array;
+ if (unlikely(fix_column_value_functions(thd, list_value, i)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ memcpy(loc_list_col_array, (const void*)col_val, size_entries);
+ loc_list_col_array+= num_column_values;
+ }
+ } while (++i < num_parts);
+ }
+ else
+ {
+ compare_func= list_part_cmp;
+ list_array= (LIST_PART_ENTRY*)ptr;
+ i= 0;
+ /*
+ Fix to be able to reuse signed sort functions also for unsigned
+ partition functions.
+ */
+ type_add= (longlong)(part_expr->unsigned_flag ?
0x8000000000000000ULL :
0ULL);
- do
- {
- part_def= list_func_it++;
- List_iterator<part_elem_value> list_val_it2(part_def->list_val_list);
- while ((list_value= list_val_it2++))
+ do
{
- calc_value= list_value->value - type_add;
- list_array[list_index].list_value= calc_value;
- list_array[list_index++].partition_id= i;
- }
- } while (++i < no_parts);
-
- if (fixed && no_list_values)
+ part_def= list_func_it++;
+ List_iterator<part_elem_value> list_val_it2(part_def->list_val_list);
+ while ((list_value= list_val_it2++))
+ {
+ calc_value= list_value->value - type_add;
+ list_array[list_index].list_value= calc_value;
+ list_array[list_index++].partition_id= i;
+ }
+ } while (++i < num_parts);
+ }
+ DBUG_ASSERT(fixed);
+ if (num_list_values)
{
bool first= TRUE;
- my_qsort((void*)list_array, no_list_values, sizeof(LIST_PART_ENTRY),
- &list_part_cmp);
-
+ /*
+ list_array and list_col_array are unions, so this works for both
+ variants of LIST partitioning.
+ */
+ my_qsort((void*)list_array, num_list_values, size_entries,
+ compare_func);
+
i= 0;
LINT_INIT(prev_value);
do
{
- DBUG_ASSERT(i < no_list_values);
- curr_value= list_array[i].list_value;
- if (likely(first || prev_value != curr_value))
+ DBUG_ASSERT(i < num_list_values);
+ curr_value= column_list ? (void*)&list_col_array[num_column_values * i] :
+ (void*)&list_array[i];
+ if (likely(first || compare_func(curr_value, prev_value)))
{
prev_value= curr_value;
first= FALSE;
@@ -816,7 +983,7 @@ bool partition_info::check_list_constants()
my_error(ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR, MYF(0));
goto end;
}
- } while (++i < no_list_values);
+ } while (++i < num_list_values);
}
result= FALSE;
end:
@@ -829,10 +996,11 @@ end:
SYNOPSIS
check_partition_info()
+ thd Thread object
+ eng_type Return value for used engine in partitions
file A reference to a handler of the table
info Create info
- engine_type Return value for used engine in partitions
- check_partition_function Should we check the partition function
+ add_or_reorg_part Is it ALTER TABLE ADD/REORGANIZE command
RETURN VALUE
TRUE Error, something went wrong
@@ -848,7 +1016,7 @@ end:
bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
handler *file, HA_CREATE_INFO *info,
- bool check_partition_function)
+ bool add_or_reorg_part)
{
handlerton *table_engine= default_engine_type;
uint i, tot_partitions;
@@ -859,11 +1027,11 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
DBUG_PRINT("info", ("default table_engine = %s",
ha_resolve_storage_engine_name(table_engine)));
- if (check_partition_function)
+ if (!add_or_reorg_part)
{
int err= 0;
- if (part_type != HASH_PARTITION || !list_of_part_fields)
+ if (!list_of_part_fields)
{
DBUG_ASSERT(part_expr);
err= part_expr->walk(&Item::check_partition_func_processor, 0,
@@ -877,9 +1045,12 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
goto end;
}
+ if (thd->lex->sql_command == SQLCOM_CREATE_TABLE &&
+ fix_parser_data(thd))
+ goto end;
}
if (unlikely(!is_sub_partitioned() &&
- !(use_default_subpartitions && use_default_no_subpartitions)))
+ !(use_default_subpartitions && use_default_num_subpartitions)))
{
my_error(ER_SUBPARTITION_ERROR, MYF(0));
goto end;
@@ -937,6 +1108,12 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
}
}
+ if (part_field_list.elements > 0 &&
+ (same_name= has_unique_fields()))
+ {
+ my_error(ER_SAME_NAME_PARTITION_FIELD, MYF(0), same_name);
+ goto end;
+ }
if ((same_name= has_unique_names()))
{
my_error(ER_SAME_NAME_PARTITION, MYF(0), same_name);
@@ -945,8 +1122,8 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
i= 0;
{
List_iterator<partition_element> part_it(partitions);
- uint no_parts_not_set= 0;
- uint prev_no_subparts_not_set= no_subparts + 1;
+ uint num_parts_not_set= 0;
+ uint prev_num_subparts_not_set= num_subparts + 1;
do
{
partition_element *part_elem= part_it++;
@@ -968,7 +1145,7 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
{
if (part_elem->engine_type == NULL)
{
- no_parts_not_set++;
+ num_parts_not_set++;
part_elem->engine_type= default_engine_type;
}
if (check_table_name(part_elem->partition_name,
@@ -983,7 +1160,7 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
else
{
uint j= 0;
- uint no_subparts_not_set= 0;
+ uint num_subparts_not_set= 0;
List_iterator<partition_element> sub_it(part_elem->subpartitions);
partition_element *sub_elem;
do
@@ -1002,44 +1179,45 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
else
{
sub_elem->engine_type= default_engine_type;
- no_subparts_not_set++;
+ num_subparts_not_set++;
}
}
DBUG_PRINT("info", ("part = %d sub = %d engine = %s", i, j,
ha_resolve_storage_engine_name(sub_elem->engine_type)));
- } while (++j < no_subparts);
+ } while (++j < num_subparts);
- if (prev_no_subparts_not_set == (no_subparts + 1) &&
- (no_subparts_not_set == 0 || no_subparts_not_set == no_subparts))
- prev_no_subparts_not_set= no_subparts_not_set;
+ if (prev_num_subparts_not_set == (num_subparts + 1) &&
+ (num_subparts_not_set == 0 ||
+ num_subparts_not_set == num_subparts))
+ prev_num_subparts_not_set= num_subparts_not_set;
if (!table_engine_set &&
- prev_no_subparts_not_set != no_subparts_not_set)
+ prev_num_subparts_not_set != num_subparts_not_set)
{
- DBUG_PRINT("info", ("no_subparts_not_set = %u no_subparts = %u",
- no_subparts_not_set, no_subparts));
+ DBUG_PRINT("info", ("num_subparts_not_set = %u num_subparts = %u",
+ num_subparts_not_set, num_subparts));
my_error(ER_MIX_HANDLER_ERROR, MYF(0));
goto end;
}
if (part_elem->engine_type == NULL)
{
- if (no_subparts_not_set == 0)
+ if (num_subparts_not_set == 0)
part_elem->engine_type= sub_elem->engine_type;
else
{
- no_parts_not_set++;
+ num_parts_not_set++;
part_elem->engine_type= default_engine_type;
}
}
}
- } while (++i < no_parts);
+ } while (++i < num_parts);
if (!table_engine_set &&
- no_parts_not_set != 0 &&
- no_parts_not_set != no_parts)
+ num_parts_not_set != 0 &&
+ num_parts_not_set != num_parts)
{
- DBUG_PRINT("info", ("no_parts_not_set = %u no_parts = %u",
- no_parts_not_set, no_subparts));
+ DBUG_PRINT("info", ("num_parts_not_set = %u num_parts = %u",
+ num_parts_not_set, num_subparts));
my_error(ER_MIX_HANDLER_ERROR, MYF(0));
goto end;
}
@@ -1062,10 +1240,12 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
list constants.
*/
- if (fixed)
+ if (add_or_reorg_part)
{
- if (unlikely((part_type == RANGE_PARTITION && check_range_constants()) ||
- (part_type == LIST_PARTITION && check_list_constants())))
+ if (unlikely((part_type == RANGE_PARTITION &&
+ check_range_constants(thd)) ||
+ (part_type == LIST_PARTITION &&
+ check_list_constants(thd))))
goto end;
}
result= FALSE;
@@ -1096,20 +1276,101 @@ void partition_info::print_no_partition_found(TABLE *table)
if (check_single_table_access(current_thd,
SELECT_ACL, &table_list, TRUE))
+ {
my_message(ER_NO_PARTITION_FOR_GIVEN_VALUE,
ER(ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT), MYF(0));
+ }
else
{
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
- if (part_expr->null_value)
- buf_ptr= (char*)"NULL";
+ if (column_list)
+ buf_ptr= (char*)"from column_list";
else
- longlong2str(err_value, buf,
- part_expr->unsigned_flag ? 10 : -10);
+ {
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ if (part_expr->null_value)
+ buf_ptr= (char*)"NULL";
+ else
+ longlong2str(err_value, buf,
+ part_expr->unsigned_flag ? 10 : -10);
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+ }
my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0), buf_ptr);
- dbug_tmp_restore_column_map(table->read_set, old_map);
}
}
+
+
+/*
+ Set fields related to partition expression
+ SYNOPSIS
+ set_part_expr()
+ start_token Start of partition function string
+ item_ptr Pointer to item tree
+ end_token End of partition function string
+ is_subpart Subpartition indicator
+ RETURN VALUES
+ TRUE Memory allocation error
+ FALSE Success
+*/
+
+bool partition_info::set_part_expr(char *start_token, Item *item_ptr,
+ char *end_token, bool is_subpart)
+{
+ uint expr_len= end_token - start_token;
+ char *func_string= (char*) sql_memdup(start_token, expr_len);
+
+ if (!func_string)
+ {
+ mem_alloc_error(expr_len);
+ return TRUE;
+ }
+ if (is_subpart)
+ {
+ list_of_subpart_fields= FALSE;
+ subpart_expr= item_ptr;
+ subpart_func_string= func_string;
+ subpart_func_len= expr_len;
+ }
+ else
+ {
+ list_of_part_fields= FALSE;
+ part_expr= item_ptr;
+ part_func_string= func_string;
+ part_func_len= expr_len;
+ }
+ return FALSE;
+}
+
+
+/*
+ Check that partition fields and subpartition fields are not too long
+
+ SYNOPSIS
+ check_partition_field_length()
+
+ RETURN VALUES
+ TRUE Total length was too big
+ FALSE Length is ok
+*/
+
+bool partition_info::check_partition_field_length()
+{
+ uint store_length= 0;
+ uint i;
+ DBUG_ENTER("partition_info::check_partition_field_length");
+
+ for (i= 0; i < num_part_fields; i++)
+ store_length+= get_partition_field_store_length(part_field_array[i]);
+ if (store_length > MAX_KEY_LENGTH)
+ DBUG_RETURN(TRUE);
+ store_length= 0;
+ for (i= 0; i < num_subpart_fields; i++)
+ store_length+= get_partition_field_store_length(subpart_field_array[i]);
+ if (store_length > MAX_KEY_LENGTH)
+ DBUG_RETURN(TRUE);
+ DBUG_RETURN(FALSE);
+}
+
+
/*
Set up buffers and arrays for fields requiring preparation
SYNOPSIS
@@ -1221,46 +1482,6 @@ bool partition_info::set_up_charset_field_preps()
}
subpart_charset_field_array[i]= NULL;
}
- if (tot_fields)
- {
- uint k;
- size= tot_fields*sizeof(char**);
- if (!(char_ptrs= (uchar**)sql_calloc(size)))
- goto error;
- full_part_field_buffers= char_ptrs;
- if (!(char_ptrs= (uchar**)sql_calloc(size)))
- goto error;
- restore_full_part_field_ptrs= char_ptrs;
- size= (tot_fields + 1) * sizeof(char**);
- if (!(char_ptrs= (uchar**)sql_calloc(size)))
- goto error;
- full_part_charset_field_array= (Field**)char_ptrs;
- for (i= 0; i < tot_part_fields; i++)
- {
- full_part_charset_field_array[i]= part_charset_field_array[i];
- full_part_field_buffers[i]= part_field_buffers[i];
- }
- k= tot_part_fields;
- for (i= 0; i < tot_subpart_fields; i++)
- {
- uint j;
- bool found= FALSE;
- field= subpart_charset_field_array[i];
-
- for (j= 0; j < tot_part_fields; j++)
- {
- if (field == part_charset_field_array[i])
- found= TRUE;
- }
- if (!found)
- {
- full_part_charset_field_array[k]= subpart_charset_field_array[i];
- full_part_field_buffers[k]= subpart_field_buffers[i];
- k++;
- }
- }
- full_part_charset_field_array[k]= NULL;
- }
DBUG_RETURN(FALSE);
error:
mem_alloc_error(size);
@@ -1321,5 +1542,636 @@ id_err:
return 1;
}
+/*
+ Create a new column value in current list with maxvalue
+ Called from parser
+
+ SYNOPSIS
+ add_max_value()
+ RETURN
+ TRUE Error
+ FALSE Success
+*/
+
+int partition_info::add_max_value()
+{
+ DBUG_ENTER("partition_info::add_max_value");
+
+ part_column_list_val *col_val;
+ if (!(col_val= add_column_value()))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ col_val->max_value= TRUE;
+ DBUG_RETURN(FALSE);
+}
+
+/*
+ Create a new column value in current list
+ Called from parser
+
+ SYNOPSIS
+ add_column_value()
+ RETURN
+ >0 A part_column_list_val object which have been
+ inserted into its list
+ 0 Memory allocation failure
+*/
+
+part_column_list_val *partition_info::add_column_value()
+{
+ uint max_val= num_columns ? num_columns : MAX_REF_PARTS;
+ DBUG_ENTER("add_column_value");
+ DBUG_PRINT("enter", ("num_columns = %u, curr_list_object %u, max_val = %u",
+ num_columns, curr_list_object, max_val));
+ if (curr_list_object < max_val)
+ {
+ curr_list_val->added_items++;
+ DBUG_RETURN(&curr_list_val->col_val_array[curr_list_object++]);
+ }
+ if (!num_columns && part_type == LIST_PARTITION)
+ {
+ /*
+ We're trying to add more than MAX_REF_PARTS, this can happen
+ in ALTER TABLE using List partitions where the first partition
+ uses VALUES IN (1,2,3...,17) where the number of fields in
+ the list is more than MAX_REF_PARTS, in this case we know
+ that the number of columns must be 1 and we thus reorganize
+ into the structure used for 1 column. After this we call
+ ourselves recursively which should always succeed.
+ */
+ if (!reorganize_into_single_field_col_val())
+ {
+ DBUG_RETURN(add_column_value());
+ }
+ DBUG_RETURN(NULL);
+ }
+ if (column_list)
+ {
+ my_error(ER_PARTITION_COLUMN_LIST_ERROR, MYF(0));
+ }
+ else
+ {
+ if (part_type == RANGE_PARTITION)
+ my_error(ER_TOO_MANY_VALUES_ERROR, MYF(0), "RANGE");
+ else
+ my_error(ER_TOO_MANY_VALUES_ERROR, MYF(0), "LIST");
+ }
+ DBUG_RETURN(NULL);
+}
+
+
+/*
+ Initialise part_elem_value object at setting of a new object
+ (Helper functions to functions called by parser)
+
+ SYNOPSIS
+ init_col_val
+ col_val Column value object to be initialised
+ item Item object representing column value
+
+ RETURN VALUES
+ TRUE Failure
+ FALSE Success
+*/
+void partition_info::init_col_val(part_column_list_val *col_val, Item *item)
+{
+ DBUG_ENTER("partition_info::init_col_val");
+
+ col_val->item_expression= item;
+ col_val->null_value= item->null_value;
+ if (item->result_type() == INT_RESULT)
+ {
+ /*
+ This could be both column_list partitioning and function
+ partitioning, but it doesn't hurt to set the function
+ partitioning flags about unsignedness.
+ */
+ curr_list_val->value= item->val_int();
+ curr_list_val->unsigned_flag= TRUE;
+ if (!item->unsigned_flag &&
+ curr_list_val->value < 0)
+ curr_list_val->unsigned_flag= FALSE;
+ if (!curr_list_val->unsigned_flag)
+ curr_part_elem->signed_flag= TRUE;
+ }
+ col_val->part_info= NULL;
+ DBUG_VOID_RETURN;
+}
+/*
+ Add a column value in VALUES LESS THAN or VALUES IN
+ (Called from parser)
+
+ SYNOPSIS
+ add_column_list_value()
+ lex Parser's lex object
+ thd Thread object
+ item Item object representing column value
+
+ RETURN VALUES
+ TRUE Failure
+ FALSE Success
+*/
+bool partition_info::add_column_list_value(THD *thd, Item *item)
+{
+ part_column_list_val *col_val;
+ Name_resolution_context *context= &thd->lex->current_select->context;
+ TABLE_LIST *save_list= context->table_list;
+ const char *save_where= thd->where;
+ DBUG_ENTER("partition_info::add_column_list_value");
+
+ if (part_type == LIST_PARTITION &&
+ num_columns == 1U)
+ {
+ if (init_column_part())
+ {
+ DBUG_RETURN(TRUE);
+ }
+ }
+
+ context->table_list= 0;
+ if (column_list)
+ thd->where= "field list";
+ else
+ thd->where= "partition function";
+
+ if (item->walk(&Item::check_partition_func_processor, 0,
+ NULL))
+ {
+ my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (item->fix_fields(thd, (Item**)0) ||
+ ((context->table_list= save_list), FALSE) ||
+ (!item->const_item()))
+ {
+ context->table_list= save_list;
+ thd->where= save_where;
+ my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ thd->where= save_where;
+
+ if (!(col_val= add_column_value()))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ init_col_val(col_val, item);
+ DBUG_RETURN(FALSE);
+}
+
+/*
+ Initialise part_info object for receiving a set of column values
+ for a partition, called when parser reaches VALUES LESS THAN or
+ VALUES IN.
+
+ SYNOPSIS
+ init_column_part()
+ lex Parser's lex object
+
+ RETURN VALUES
+ TRUE Failure
+ FALSE Success
+*/
+bool partition_info::init_column_part()
+{
+ partition_element *p_elem= curr_part_elem;
+ part_column_list_val *col_val_array;
+ part_elem_value *list_val;
+ uint loc_num_columns;
+ DBUG_ENTER("partition_info::init_column_part");
+
+ if (!(list_val=
+ (part_elem_value*)sql_calloc(sizeof(part_elem_value))) ||
+ p_elem->list_val_list.push_back(list_val))
+ {
+ mem_alloc_error(sizeof(part_elem_value));
+ DBUG_RETURN(TRUE);
+ }
+ if (num_columns)
+ loc_num_columns= num_columns;
+ else
+ loc_num_columns= MAX_REF_PARTS;
+ if (!(col_val_array=
+ (part_column_list_val*)sql_calloc(loc_num_columns *
+ sizeof(part_column_list_val))))
+ {
+ mem_alloc_error(loc_num_columns * sizeof(part_elem_value));
+ DBUG_RETURN(TRUE);
+ }
+ list_val->col_val_array= col_val_array;
+ list_val->added_items= 0;
+ curr_list_val= list_val;
+ curr_list_object= 0;
+ DBUG_RETURN(FALSE);
+}
+
+/*
+ In the case of ALTER TABLE ADD/REORGANIZE PARTITION for LIST
+ partitions we can specify list values as:
+ VALUES IN (v1, v2,,,, v17) if we're using the first partitioning
+ variant with a function or a column list partitioned table with
+ one partition field. In this case the parser knows not the
+ number of columns start with and allocates MAX_REF_PARTS in the
+ array. If we try to allocate something beyond MAX_REF_PARTS we
+ will call this function to reorganize into a structure with
+ num_columns = 1. Also when the parser knows that we used LIST
+ partitioning and we used a VALUES IN like above where number of
+ values was smaller than MAX_REF_PARTS or equal, then we will
+ reorganize after discovering this in the parser.
+
+ SYNOPSIS
+ reorganize_into_single_field_col_val()
+
+ RETURN VALUES
+ TRUE Failure
+ FALSE Success
+*/
+int partition_info::reorganize_into_single_field_col_val()
+{
+ part_column_list_val *col_val, *new_col_val;
+ part_elem_value *val= curr_list_val;
+ uint loc_num_columns= num_columns;
+ uint i;
+ DBUG_ENTER("partition_info::reorganize_into_single_field_col_val");
+
+ num_columns= 1;
+ val->added_items= 1U;
+ col_val= &val->col_val_array[0];
+ init_col_val(col_val, col_val->item_expression);
+ for (i= 1; i < loc_num_columns; i++)
+ {
+ col_val= &val->col_val_array[i];
+ DBUG_ASSERT(part_type == LIST_PARTITION);
+ if (init_column_part())
+ {
+ DBUG_RETURN(TRUE);
+ }
+ if (!(new_col_val= add_column_value()))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ memcpy(new_col_val, col_val, sizeof(*col_val));
+ init_col_val(new_col_val, col_val->item_expression);
+ }
+ curr_list_val= val;
+ DBUG_RETURN(FALSE);
+}
+
+/*
+ This function handles the case of function-based partitioning.
+ It fixes some data structures created in the parser and puts
+ them in the format required by the rest of the partitioning
+ code.
+
+ SYNOPSIS
+ fix_func_partition()
+ thd Thread object
+ col_val Array of one value
+ part_elem The partition instance
+ part_id Id of partition instance
+
+ RETURN VALUES
+ TRUE Failure
+ FALSE Success
+*/
+int partition_info::fix_func_partition(THD *thd,
+ part_elem_value *val,
+ partition_element *part_elem,
+ uint part_id)
+{
+ part_column_list_val *col_val= val->col_val_array;
+ DBUG_ENTER("partition_info::fix_func_partition");
+
+ if (col_val->fixed)
+ {
+ DBUG_RETURN(FALSE);
+ }
+ if (val->added_items != 1)
+ {
+ my_error(ER_PARTITION_COLUMN_LIST_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (col_val->max_value)
+ {
+ /* The parser ensures we're not LIST partitioned here */
+ DBUG_ASSERT(part_type == RANGE_PARTITION);
+ if (defined_max_value)
+ {
+ my_error(ER_PARTITION_MAXVALUE_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (part_id == (num_parts - 1))
+ {
+ defined_max_value= TRUE;
+ part_elem->max_value= TRUE;
+ part_elem->range_value= LONGLONG_MAX;
+ }
+ else
+ {
+ my_error(ER_PARTITION_MAXVALUE_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else
+ {
+ Item *item_expr= col_val->item_expression;
+ if ((val->null_value= item_expr->null_value))
+ {
+ if (part_elem->has_null_value)
+ {
+ my_error(ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ part_elem->has_null_value= TRUE;
+ }
+ else if (item_expr->result_type() != INT_RESULT)
+ {
+ my_error(ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (part_type == RANGE_PARTITION)
+ {
+ if (part_elem->has_null_value)
+ {
+ my_error(ER_NULL_IN_VALUES_LESS_THAN, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ part_elem->range_value= val->value;
+ }
+ }
+ col_val->fixed= 2;
+ DBUG_RETURN(FALSE);
+}
+
+/*
+ Get column item with a proper character set according to the field
+
+ SYNOPSIS
+ get_column_item()
+ item Item object to start with
+ field Field for which the item will be compared to
+
+ RETURN VALUES
+ NULL Error
+ item Returned item
+*/
+
+Item* partition_info::get_column_item(Item *item, Field *field)
+{
+ if (field->result_type() == STRING_RESULT &&
+ item->collation.collation != field->charset())
+ {
+ if (!(item= convert_charset_partition_constant(item,
+ field->charset())))
+ {
+ my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
+ return NULL;
+ }
+ }
+ return item;
+}
+
+
+/*
+ Evaluate VALUES functions for column list values
+ SYNOPSIS
+ fix_column_value_functions()
+ thd Thread object
+ col_val List of column values
+ part_id Partition id we are fixing
+
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ DESCRIPTION
+ Fix column VALUES and store in memory array adapted to the data type
+*/
+
+bool partition_info::fix_column_value_functions(THD *thd,
+ part_elem_value *val,
+ uint part_id)
+{
+ uint num_columns= part_field_list.elements;
+ bool result= FALSE;
+ uint i;
+ part_column_list_val *col_val= val->col_val_array;
+ DBUG_ENTER("partition_info::fix_column_value_functions");
+
+ if (col_val->fixed > 1)
+ {
+ DBUG_RETURN(FALSE);
+ }
+ for (i= 0; i < num_columns; col_val++, i++)
+ {
+ Item *column_item= col_val->item_expression;
+ Field *field= part_field_array[i];
+ col_val->part_info= this;
+ col_val->partition_id= part_id;
+ if (col_val->max_value)
+ col_val->column_value= NULL;
+ else
+ {
+ col_val->column_value= NULL;
+ if (!col_val->null_value)
+ {
+ uchar *val_ptr;
+ uint len= field->pack_length();
+ ulong save_sql_mode;
+ bool save_got_warning;
+
+ if (!(column_item= get_column_item(column_item,
+ field)))
+ {
+ result= TRUE;
+ goto end;
+ }
+ save_sql_mode= thd->variables.sql_mode;
+ thd->variables.sql_mode= 0;
+ save_got_warning= thd->got_warning;
+ thd->got_warning= 0;
+ if (column_item->save_in_field(field, TRUE) ||
+ thd->got_warning)
+ {
+ my_error(ER_WRONG_TYPE_COLUMN_VALUE_ERROR, MYF(0));
+ result= TRUE;
+ goto end;
+ }
+ thd->got_warning= save_got_warning;
+ thd->variables.sql_mode= save_sql_mode;
+ if (!(val_ptr= (uchar*) sql_calloc(len)))
+ {
+ mem_alloc_error(len);
+ result= TRUE;
+ goto end;
+ }
+ col_val->column_value= val_ptr;
+ memcpy(val_ptr, field->ptr, len);
+ }
+ }
+ col_val->fixed= 2;
+ }
+end:
+ DBUG_RETURN(result);
+}
+
+/*
+ The parser generates generic data structures, we need to set them up
+ as the rest of the code expects to find them. This is in reality part
+ of the syntax check of the parser code.
+
+ It is necessary to call this function in the case of a CREATE TABLE
+ statement, in this case we do it early in the check_partition_info
+ function.
+
+ It is necessary to call this function for ALTER TABLE where we
+ assign a completely new partition structure, in this case we do it
+ in prep_alter_part_table after discovering that the partition
+ structure is entirely redefined.
+
+ It's necessary to call this method also for ALTER TABLE ADD/REORGANIZE
+ of partitions, in this we call it in prep_alter_part_table after
+ making some initial checks but before going deep to check the partition
+ info, we also assign the column_list variable before calling this function
+ here.
+
+ Finally we also call it immediately after returning from parsing the
+ partitioning text found in the frm file.
+
+ This function mainly fixes the VALUES parts, these are handled differently
+ whether or not we use column list partitioning. Since the parser doesn't
+ know which we are using we need to set-up the old data structures after
+ the parser is complete when we know if what type of partitioning the
+ base table is using.
+
+ For column lists we will handle this in the fix_column_value_function.
+ For column lists it is sufficient to verify that the number of columns
+ and number of elements are in synch with each other. So only partitioning
+ using functions need to be set-up to their data structures.
+
+ SYNOPSIS
+ fix_parser_data()
+ thd Thread object
+
+ RETURN VALUES
+ TRUE Failure
+ FALSE Success
+*/
+
+int partition_info::fix_parser_data(THD *thd)
+{
+ List_iterator<partition_element> it(partitions);
+ partition_element *part_elem;
+ uint num_elements;
+ uint i= 0, j, k;
+ DBUG_ENTER("partition_info::fix_parser_data");
+
+ if (!(part_type == RANGE_PARTITION ||
+ part_type == LIST_PARTITION))
+ {
+ /* Nothing to do for HASH/KEY partitioning */
+ DBUG_RETURN(FALSE);
+ }
+ do
+ {
+ part_elem= it++;
+ List_iterator<part_elem_value> list_val_it(part_elem->list_val_list);
+ j= 0;
+ num_elements= part_elem->list_val_list.elements;
+ DBUG_ASSERT(part_type == RANGE_PARTITION ?
+ num_elements == 1U : TRUE);
+ do
+ {
+ part_elem_value *val= list_val_it++;
+ if (column_list)
+ {
+ if (val->added_items != num_columns)
+ {
+ my_error(ER_PARTITION_COLUMN_LIST_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ for (k= 0; k < num_columns; k++)
+ {
+ part_column_list_val *col_val= &val->col_val_array[k];
+ if (col_val->null_value && part_type == RANGE_PARTITION)
+ {
+ my_error(ER_NULL_IN_VALUES_LESS_THAN, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
+ else
+ {
+ if (fix_func_partition(thd, val, part_elem, i))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ if (val->null_value)
+ {
+ /*
+ Null values aren't required in the value part, they are kept per
+ partition instance, only LIST partitions have NULL values.
+ */
+ list_val_it.remove();
+ }
+ }
+ } while (++j < num_elements);
+ } while (++i < num_parts);
+ DBUG_RETURN(FALSE);
+}
+
+void partition_info::print_debug(const char *str, uint *value)
+{
+ DBUG_ENTER("print_debug");
+ if (value)
+ DBUG_PRINT("info", ("parser: %s, val = %u", str, *value));
+ else
+ DBUG_PRINT("info", ("parser: %s", str));
+ DBUG_VOID_RETURN;
+}
+#else /* WITH_PARTITION_STORAGE_ENGINE */
+ /*
+ For builds without partitioning we need to define these functions
+ since we they are called from the parser. The parser cannot
+ remove code parts using ifdef, but the code parts cannot be called
+ so we simply need to add empty functions to make the linker happy.
+ */
+part_column_list_val *partition_info::add_column_value()
+{
+ return NULL;
+}
+
+bool partition_info::set_part_expr(char *start_token, Item *item_ptr,
+ char *end_token, bool is_subpart)
+{
+ (void)start_token;
+ (void)item_ptr;
+ (void)end_token;
+ (void)is_subpart;
+ return FALSE;
+}
+
+int partition_info::reorganize_into_single_field_col_val()
+{
+ return 0;
+}
+
+bool partition_info::init_column_part()
+{
+ return FALSE;
+}
+
+bool partition_info::add_column_list_value(THD *thd, Item *item)
+{
+ return FALSE;
+}
+int partition_info::add_max_value()
+{
+ return 0;
+}
+
+void partition_info::print_debug(const char *str, uint *value)
+{
+}
#endif /* WITH_PARTITION_STORAGE_ENGINE */
diff --git a/sql/partition_info.h b/sql/partition_info.h
index 9f438e8260b..0ac8dec4945 100644
--- a/sql/partition_info.h
+++ b/sql/partition_info.h
@@ -1,4 +1,7 @@
-/* Copyright 2006-2008 MySQL AB, 2008 Sun Microsystems, Inc.
+#ifndef PARTITION_INFO_INCLUDED
+#define PARTITION_INFO_INCLUDED
+
+/* Copyright 2006-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -64,10 +67,9 @@ public:
/*
When we have various string fields we might need some preparation
before and clean-up after calling the get_part_id_func's. We need
- one such method for get_partition_id and one for
- get_part_partition_id and one for get_subpartition_id.
+ one such method for get_part_partition_id and one for
+ get_subpartition_id.
*/
- get_part_id_func get_partition_id_charset;
get_part_id_func get_part_partition_id_charset;
get_subpart_id_func get_subpartition_id_charset;
@@ -81,7 +83,6 @@ public:
without duplicates, NULL-terminated.
*/
Field **full_part_field_array;
- Field **full_part_charset_field_array;
/*
Set of all fields used in partition and subpartition expression.
Required for testing of partition fields in write_set when
@@ -97,10 +98,8 @@ public:
*/
uchar **part_field_buffers;
uchar **subpart_field_buffers;
- uchar **full_part_field_buffers;
uchar **restore_part_field_ptrs;
uchar **restore_subpart_field_ptrs;
- uchar **restore_full_part_field_ptrs;
Item *part_expr;
Item *subpart_expr;
@@ -124,6 +123,8 @@ public:
union {
longlong *range_int_array;
LIST_PART_ENTRY *list_array;
+ part_column_list_val *range_col_array;
+ part_column_list_val *list_col_array;
};
/********************************************
@@ -154,6 +155,10 @@ public:
partition_element *curr_part_elem;
partition_element *current_partition;
+ part_elem_value *curr_list_val;
+ uint curr_list_object;
+ uint num_columns;
+
/*
These key_map's are used for Partitioning to enable quick decisions
on whether we can derive more information about which partition to
@@ -172,17 +177,17 @@ public:
uint part_func_len;
uint subpart_func_len;
- uint no_parts;
- uint no_subparts;
+ uint num_parts;
+ uint num_subparts;
uint count_curr_subparts;
uint part_error_code;
- uint no_list_values;
+ uint num_list_values;
- uint no_part_fields;
- uint no_subpart_fields;
- uint no_full_part_fields;
+ uint num_part_fields;
+ uint num_subpart_fields;
+ uint num_full_part_fields;
uint has_null_part_id;
/*
@@ -193,9 +198,9 @@ public:
uint16 linear_hash_mask;
bool use_default_partitions;
- bool use_default_no_partitions;
+ bool use_default_num_partitions;
bool use_default_subpartitions;
- bool use_default_no_subpartitions;
+ bool use_default_num_subpartitions;
bool default_partitions_setup;
bool defined_max_value;
bool list_of_part_fields;
@@ -205,7 +210,7 @@ public:
bool is_auto_partitioned;
bool from_openfrm;
bool has_null_value;
-
+ bool column_list;
partition_info()
: get_partition_id(NULL), get_part_partition_id(NULL),
@@ -214,11 +219,8 @@ public:
part_charset_field_array(NULL),
subpart_charset_field_array(NULL),
full_part_field_array(NULL),
- full_part_charset_field_array(NULL),
part_field_buffers(NULL), subpart_field_buffers(NULL),
- full_part_field_buffers(NULL),
restore_part_field_ptrs(NULL), restore_subpart_field_ptrs(NULL),
- restore_full_part_field_ptrs(NULL),
part_expr(NULL), subpart_expr(NULL), item_free_list(NULL),
first_log_entry(NULL), exec_log_entry(NULL), frm_log_entry(NULL),
list_array(NULL), err_value(0),
@@ -226,22 +228,23 @@ public:
part_func_string(NULL), subpart_func_string(NULL),
part_state(NULL),
curr_part_elem(NULL), current_partition(NULL),
+ curr_list_object(0), num_columns(0),
default_engine_type(NULL),
part_result_type(INT_RESULT),
part_type(NOT_A_PARTITION), subpart_type(NOT_A_PARTITION),
part_info_len(0), part_state_len(0),
part_func_len(0), subpart_func_len(0),
- no_parts(0), no_subparts(0),
+ num_parts(0), num_subparts(0),
count_curr_subparts(0), part_error_code(0),
- no_list_values(0), no_part_fields(0), no_subpart_fields(0),
- no_full_part_fields(0), has_null_part_id(0), linear_hash_mask(0),
- use_default_partitions(TRUE), use_default_no_partitions(TRUE),
- use_default_subpartitions(TRUE), use_default_no_subpartitions(TRUE),
+ num_list_values(0), num_part_fields(0), num_subpart_fields(0),
+ num_full_part_fields(0), has_null_part_id(0), linear_hash_mask(0),
+ use_default_partitions(TRUE), use_default_num_partitions(TRUE),
+ use_default_subpartitions(TRUE), use_default_num_subpartitions(TRUE),
default_partitions_setup(FALSE), defined_max_value(FALSE),
list_of_part_fields(FALSE), list_of_subpart_fields(FALSE),
linear_hash_ind(FALSE), fixed(FALSE),
is_auto_partitioned(FALSE), from_openfrm(FALSE),
- has_null_value(FALSE)
+ has_null_value(FALSE), column_list(FALSE)
{
all_fields_in_PF.clear_all();
all_fields_in_PPF.clear_all();
@@ -264,27 +267,47 @@ public:
/* Returns the total number of partitions on the leaf level */
uint get_tot_partitions()
{
- return no_parts * (is_sub_partitioned() ? no_subparts : 1);
+ return num_parts * (is_sub_partitioned() ? num_subparts : 1);
}
bool set_up_defaults_for_partitioning(handler *file, HA_CREATE_INFO *info,
uint start_no);
+ char *has_unique_fields();
char *has_unique_names();
bool check_engine_mix(handlerton *engine_type, bool default_engine);
- bool check_range_constants();
- bool check_list_constants();
+ bool check_range_constants(THD *thd);
+ bool check_list_constants(THD *thd);
bool check_partition_info(THD *thd, handlerton **eng_type,
handler *file, HA_CREATE_INFO *info,
bool check_partition_function);
void print_no_partition_found(TABLE *table);
+ void print_debug(const char *str, uint*);
+ Item* get_column_item(Item *item, Field *field);
+ int fix_func_partition(THD *thd,
+ part_elem_value *val,
+ partition_element *part_elem,
+ uint part_id);
+ bool fix_column_value_functions(THD *thd,
+ part_elem_value *val,
+ uint part_id);
+ int fix_parser_data(THD *thd);
+ int add_max_value();
+ void init_col_val(part_column_list_val *col_val, Item *item);
+ int reorganize_into_single_field_col_val();
+ part_column_list_val *add_column_value();
+ bool set_part_expr(char *start_token, Item *item_ptr,
+ char *end_token, bool is_subpart);
+ static int compare_column_values(const void *a, const void *b);
bool set_up_charset_field_preps();
+ bool check_partition_field_length();
+ bool init_column_part();
+ bool add_column_list_value(THD *thd, Item *item);
private:
static int list_part_cmp(const void* a, const void* b);
- static int list_part_cmp_unsigned(const void* a, const void* b);
bool set_up_default_partitions(handler *file, HA_CREATE_INFO *info,
uint start_no);
bool set_up_default_subpartitions(handler *file, HA_CREATE_INFO *info);
- char *create_default_partition_names(uint part_no, uint no_parts,
+ char *create_default_partition_names(uint part_no, uint num_parts,
uint start_no);
char *create_subpartition_name(uint subpart_no, const char *part_name);
bool has_unique_name(partition_element *element);
@@ -310,7 +333,9 @@ void init_all_partitions_iterator(partition_info *part_info,
PARTITION_ITERATOR *part_iter)
{
part_iter->part_nums.start= part_iter->part_nums.cur= 0;
- part_iter->part_nums.end= part_info->no_parts;
+ part_iter->part_nums.end= part_info->num_parts;
part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE;
part_iter->get_next= get_next_partition_id_range;
}
+
+#endif /* PARTITION_INFO_INCLUDED */
diff --git a/sql/procedure.h b/sql/procedure.h
index ceb586766b1..25b30ac593d 100644
--- a/sql/procedure.h
+++ b/sql/procedure.h
@@ -1,3 +1,6 @@
+#ifndef PROCEDURE_INCLUDED
+#define PROCEDURE_INCLUDED
+
/* Copyright (C) 2000-2005 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -23,7 +26,7 @@
#define PROC_NO_SORT 1 /**< Bits in flags */
#define PROC_GROUP 2 /**< proc must have group */
-/* Procedure items used by procedures to store values for send_fields */
+/* Procedure items used by procedures to store values for send_result_set_metadata */
class Item_proc :public Item
{
@@ -149,3 +152,5 @@ public:
Procedure *setup_procedure(THD *thd,ORDER *proc_param,select_result *result,
List<Item> &field_list,int *error);
+
+#endif /* PROCEDURE_INCLUDED */
diff --git a/sql/protocol.cc b/sql/protocol.cc
index 4f69a0fdb52..5990f0f001a 100644
--- a/sql/protocol.cc
+++ b/sql/protocol.cc
@@ -29,12 +29,13 @@
static const unsigned int PACKET_BUFFER_EXTRA_ALLOC= 1024;
/* Declared non-static only because of the embedded library. */
-bool net_send_error_packet(THD *thd, uint sql_errno, const char *err);
-bool net_send_ok(THD *, uint, uint, ha_rows, ulonglong, const char *);
-bool net_send_eof(THD *thd, uint server_status, uint total_warn_count);
+bool net_send_error_packet(THD *, uint, const char *, const char *);
+/* Declared non-static only because of the embedded library. */
+bool net_send_ok(THD *, uint, uint, ulonglong, ulonglong, const char *);
+/* Declared non-static only because of the embedded library. */
+bool net_send_eof(THD *thd, uint server_status, uint statement_warn_count);
#ifndef EMBEDDED_LIBRARY
-static bool write_eof_packet(THD *thd, NET *net,
- uint server_status, uint total_warn_count);
+static bool write_eof_packet(THD *, NET *, uint, uint);
#endif
#ifndef EMBEDDED_LIBRARY
@@ -58,6 +59,64 @@ bool Protocol_binary::net_store_data(const uchar *from, size_t length)
}
+
+
+/*
+ net_store_data() - extended version with character set conversion.
+
+ It is optimized for short strings whose length after
+ conversion is garanteed to be less than 251, which accupies
+ exactly one byte to store length. It allows not to use
+ the "convert" member as a temporary buffer, conversion
+ is done directly to the "packet" member.
+ The limit 251 is good enough to optimize send_result_set_metadata()
+ because column, table, database names fit into this limit.
+*/
+
+#ifndef EMBEDDED_LIBRARY
+bool Protocol::net_store_data(const uchar *from, size_t length,
+ CHARSET_INFO *from_cs, CHARSET_INFO *to_cs)
+{
+ uint dummy_errors;
+ /* Calculate maxumum possible result length */
+ uint conv_length= to_cs->mbmaxlen * length / from_cs->mbminlen;
+ if (conv_length > 250)
+ {
+ /*
+ For strings with conv_length greater than 250 bytes
+ we don't know how many bytes we will need to store length: one or two,
+ because we don't know result length until conversion is done.
+ For example, when converting from utf8 (mbmaxlen=3) to latin1,
+ conv_length=300 means that the result length can vary between 100 to 300.
+ length=100 needs one byte, length=300 needs to bytes.
+
+ Thus conversion directly to "packet" is not worthy.
+ Let's use "convert" as a temporary buffer.
+ */
+ return (convert->copy((const char*) from, length, from_cs,
+ to_cs, &dummy_errors) ||
+ net_store_data((const uchar*) convert->ptr(), convert->length()));
+ }
+
+ ulong packet_length= packet->length();
+ ulong new_length= packet_length + conv_length + 1;
+
+ if (new_length > packet->alloced_length() && packet->realloc(new_length))
+ return 1;
+
+ char *length_pos= (char*) packet->ptr() + packet_length;
+ char *to= length_pos + 1;
+
+ to+= copy_and_convert(to, conv_length, to_cs,
+ (const char*) from, length, from_cs, &dummy_errors);
+
+ net_store_length((uchar*) length_pos, to - length_pos - 1);
+ packet->length((uint) (to - packet->ptr()));
+ return 0;
+}
+#endif
+
+
/**
Send a error string to client.
@@ -80,29 +139,33 @@ bool Protocol_binary::net_store_data(const uchar *from, size_t length)
@retval TRUE An error occurred and the message wasn't sent properly
*/
-bool net_send_error(THD *thd, uint sql_errno, const char *err)
+bool net_send_error(THD *thd, uint sql_errno, const char *err,
+ const char* sqlstate)
{
+ bool error;
DBUG_ENTER("net_send_error");
DBUG_ASSERT(!thd->spcont);
DBUG_ASSERT(sql_errno);
- DBUG_ASSERT(err && err[0]);
+ DBUG_ASSERT(err);
DBUG_PRINT("enter",("sql_errno: %d err: %s", sql_errno, err));
- bool error;
+
+ if (sqlstate == NULL)
+ sqlstate= mysql_errno_to_sqlstate(sql_errno);
/*
It's one case when we can push an error even though there
is an OK or EOF already.
*/
- thd->main_da.can_overwrite_status= TRUE;
+ thd->stmt_da->can_overwrite_status= TRUE;
/* Abort multi-result sets */
thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS;
- error= net_send_error_packet(thd, sql_errno, err);
+ error= net_send_error_packet(thd, sql_errno, err, sqlstate);
- thd->main_da.can_overwrite_status= FALSE;
+ thd->stmt_da->can_overwrite_status= FALSE;
DBUG_RETURN(error);
}
@@ -124,7 +187,7 @@ bool net_send_error(THD *thd, uint sql_errno, const char *err)
@param thd Thread handler
@param server_status The server status
- @param total_warn_count Total number of warnings
+ @param statement_warn_count Total number of warnings
@param affected_rows Number of rows changed by statement
@param id Auto_increment id for first row (if used)
@param message Message to send to the client (Used by mysql_status)
@@ -138,8 +201,8 @@ bool net_send_error(THD *thd, uint sql_errno, const char *err)
#ifndef EMBEDDED_LIBRARY
bool
net_send_ok(THD *thd,
- uint server_status, uint total_warn_count,
- ha_rows affected_rows, ulonglong id, const char *message)
+ uint server_status, uint statement_warn_count,
+ ulonglong affected_rows, ulonglong id, const char *message)
{
NET *net= &thd->net;
uchar buff[MYSQL_ERRMSG_SIZE+10],*pos;
@@ -162,12 +225,12 @@ net_send_ok(THD *thd,
(ulong) affected_rows,
(ulong) id,
(uint) (server_status & 0xffff),
- (uint) total_warn_count));
+ (uint) statement_warn_count));
int2store(pos, server_status);
pos+=2;
/* We can only return up to 65535 warnings in two bytes */
- uint tmp= min(total_warn_count, 65535);
+ uint tmp= min(statement_warn_count, 65535);
int2store(pos, tmp);
pos+= 2;
}
@@ -176,7 +239,7 @@ net_send_ok(THD *thd,
int2store(pos, server_status);
pos+=2;
}
- thd->main_da.can_overwrite_status= TRUE;
+ thd->stmt_da->can_overwrite_status= TRUE;
if (message && message[0])
pos= net_store_data(pos, (uchar*) message, strlen(message));
@@ -184,7 +247,7 @@ net_send_ok(THD *thd,
if (!error)
error= net_flush(net);
- thd->main_da.can_overwrite_status= FALSE;
+ thd->stmt_da->can_overwrite_status= FALSE;
DBUG_PRINT("info", ("OK sent, so no more error sending allowed"));
DBUG_RETURN(error);
@@ -208,7 +271,7 @@ static uchar eof_buff[1]= { (uchar) 254 }; /* Marker for end of fields */
@param thd Thread handler
@param server_status The server status
- @param total_warn_count Total number of warnings
+ @param statement_warn_count Total number of warnings
@return
@retval FALSE The message was successfully sent
@@ -216,7 +279,7 @@ static uchar eof_buff[1]= { (uchar) 254 }; /* Marker for end of fields */
*/
bool
-net_send_eof(THD *thd, uint server_status, uint total_warn_count)
+net_send_eof(THD *thd, uint server_status, uint statement_warn_count)
{
NET *net= &thd->net;
bool error= FALSE;
@@ -224,11 +287,11 @@ net_send_eof(THD *thd, uint server_status, uint total_warn_count)
/* Set to TRUE if no active vio, to work well in case of --init-file */
if (net->vio != 0)
{
- thd->main_da.can_overwrite_status= TRUE;
- error= write_eof_packet(thd, net, server_status, total_warn_count);
+ thd->stmt_da->can_overwrite_status= TRUE;
+ error= write_eof_packet(thd, net, server_status, statement_warn_count);
if (!error)
error= net_flush(net);
- thd->main_da.can_overwrite_status= FALSE;
+ thd->stmt_da->can_overwrite_status= FALSE;
DBUG_PRINT("info", ("EOF sent, so no more error sending allowed"));
}
DBUG_RETURN(error);
@@ -242,7 +305,7 @@ net_send_eof(THD *thd, uint server_status, uint total_warn_count)
@param thd The thread handler
@param net The network handler
@param server_status The server status
- @param total_warn_count The number of warnings
+ @param statement_warn_count The number of warnings
@return
@@ -252,7 +315,7 @@ net_send_eof(THD *thd, uint server_status, uint total_warn_count)
static bool write_eof_packet(THD *thd, NET *net,
uint server_status,
- uint total_warn_count)
+ uint statement_warn_count)
{
bool error;
if (thd->client_capabilities & CLIENT_PROTOCOL_41)
@@ -262,7 +325,7 @@ static bool write_eof_packet(THD *thd, NET *net,
Don't send warn count during SP execution, as the warn_list
is cleared between substatements, and mysqltest gets confused
*/
- uint tmp= min(total_warn_count, 65535);
+ uint tmp= min(statement_warn_count, 65535);
buff[0]= 254;
int2store(buff+1, tmp);
/*
@@ -309,13 +372,18 @@ bool send_old_password_request(THD *thd)
@retval TRUE An error occurred and the messages wasn't sent properly
*/
-bool net_send_error_packet(THD *thd, uint sql_errno, const char *err)
+bool net_send_error_packet(THD *thd, uint sql_errno, const char *err,
+ const char* sqlstate)
+
{
NET *net= &thd->net;
uint length;
/*
buff[]: sql_errno:2 + ('#':1 + SQLSTATE_LENGTH:5) + MYSQL_ERRMSG_SIZE:512
*/
+ uint error;
+ uchar converted_err[MYSQL_ERRMSG_SIZE];
+ uint32 converted_err_len;
uchar buff[2+1+SQLSTATE_LENGTH+MYSQL_ERRMSG_SIZE], *pos;
DBUG_ENTER("send_error_packet");
@@ -330,27 +398,24 @@ bool net_send_error_packet(THD *thd, uint sql_errno, const char *err)
DBUG_RETURN(FALSE);
}
- if (net->return_errno)
- { // new client code; Add errno before message
- int2store(buff,sql_errno);
- pos= buff+2;
- if (thd->client_capabilities & CLIENT_PROTOCOL_41)
- {
- /* The first # is to make the protocol backward compatible */
- buff[2]= '#';
- pos= (uchar*) strmov((char*) buff+3, mysql_errno_to_sqlstate(sql_errno));
- }
- length= (uint) (strmake((char*) pos, err, MYSQL_ERRMSG_SIZE-1) -
- (char*) buff);
- err= (char*) buff;
- }
- else
+ int2store(buff,sql_errno);
+ pos= buff+2;
+ if (thd->client_capabilities & CLIENT_PROTOCOL_41)
{
- length=(uint) strlen(err);
- set_if_smaller(length,MYSQL_ERRMSG_SIZE-1);
+ /* The first # is to make the protocol backward compatible */
+ buff[2]= '#';
+ pos= (uchar*) strmov((char*) buff+3, sqlstate);
}
+ converted_err_len= convert_error_message((char*)converted_err,
+ sizeof(converted_err),
+ thd->variables.character_set_results,
+ err, strlen(err),
+ system_charset_info, &error);
+ length= (uint) (strmake((char*) pos, (char*)converted_err, MYSQL_ERRMSG_SIZE) -
+ (char*) buff);
+ err= (char*) buff;
DBUG_RETURN(net_write_command(net,(uchar) 255, (uchar*) "", 0, (uchar*) err,
- length));
+ length));
}
#endif /* EMBEDDED_LIBRARY */
@@ -414,6 +479,12 @@ static uchar *net_store_length_fast(uchar *packet, uint length)
packet is "buffered" in the diagnostics area and sent to the client
in the end of statement.
+ @note This method defines a template, but delegates actual
+ sending of data to virtual Protocol::send_{ok,eof,error}. This
+ allows for implementation of protocols that "intercept" ok/eof/error
+ messages, and store them in memory, etc, instead of sending to
+ the client.
+
@pre The diagnostics area is assigned or disabled. It can not be empty
-- we assume that every SQL statement or COM_* command
generates OK, ERROR, or EOF status.
@@ -428,47 +499,94 @@ static uchar *net_store_length_fast(uchar *packet, uint length)
Diagnostics_area::is_sent is set for debugging purposes only.
*/
-void net_end_statement(THD *thd)
+void Protocol::end_statement()
{
- DBUG_ASSERT(! thd->main_da.is_sent);
+ DBUG_ENTER("Protocol::end_statement");
+ DBUG_ASSERT(! thd->stmt_da->is_sent);
+ bool error= FALSE;
/* Can not be true, but do not take chances in production. */
- if (thd->main_da.is_sent)
- return;
+ if (thd->stmt_da->is_sent)
+ DBUG_VOID_RETURN;
- bool error= FALSE;
-
- switch (thd->main_da.status()) {
+ switch (thd->stmt_da->status()) {
case Diagnostics_area::DA_ERROR:
/* The query failed, send error to log and abort bootstrap. */
- error= net_send_error(thd,
- thd->main_da.sql_errno(),
- thd->main_da.message());
+ error= send_error(thd->stmt_da->sql_errno(),
+ thd->stmt_da->message(),
+ thd->stmt_da->get_sqlstate());
break;
case Diagnostics_area::DA_EOF:
- error= net_send_eof(thd,
- thd->main_da.server_status(),
- thd->main_da.total_warn_count());
+ error= send_eof(thd->stmt_da->server_status(),
+ thd->stmt_da->statement_warn_count());
break;
case Diagnostics_area::DA_OK:
- error= net_send_ok(thd,
- thd->main_da.server_status(),
- thd->main_da.total_warn_count(),
- thd->main_da.affected_rows(),
- thd->main_da.last_insert_id(),
- thd->main_da.message());
+ error= send_ok(thd->stmt_da->server_status(),
+ thd->stmt_da->statement_warn_count(),
+ thd->stmt_da->affected_rows(),
+ thd->stmt_da->last_insert_id(),
+ thd->stmt_da->message());
break;
case Diagnostics_area::DA_DISABLED:
break;
case Diagnostics_area::DA_EMPTY:
default:
DBUG_ASSERT(0);
- error= net_send_ok(thd, thd->server_status, thd->total_warn_count,
- 0, 0, NULL);
+ error= send_ok(thd->server_status, 0, 0, 0, NULL);
break;
}
if (!error)
- thd->main_da.is_sent= TRUE;
+ thd->stmt_da->is_sent= TRUE;
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ A default implementation of "OK" packet response to the client.
+
+ Currently this implementation is re-used by both network-oriented
+ protocols -- the binary and text one. They do not differ
+ in their OK packet format, which allows for a significant simplification
+ on client side.
+*/
+
+bool Protocol::send_ok(uint server_status, uint statement_warn_count,
+ ulonglong affected_rows, ulonglong last_insert_id,
+ const char *message)
+{
+ DBUG_ENTER("Protocol::send_ok");
+
+ DBUG_RETURN(net_send_ok(thd, server_status, statement_warn_count,
+ affected_rows, last_insert_id, message));
+}
+
+
+/**
+ A default implementation of "EOF" packet response to the client.
+
+ Binary and text protocol do not differ in their EOF packet format.
+*/
+
+bool Protocol::send_eof(uint server_status, uint statement_warn_count)
+{
+ DBUG_ENTER("Protocol::send_eof");
+
+ DBUG_RETURN(net_send_eof(thd, server_status, statement_warn_count));
+}
+
+
+/**
+ A default implementation of "ERROR" packet response to the client.
+
+ Binary and text protocol do not differ in ERROR packet format.
+*/
+
+bool Protocol::send_error(uint sql_errno, const char *err_msg,
+ const char *sql_state)
+{
+ DBUG_ENTER("Protocol::send_error");
+
+ DBUG_RETURN(net_send_error_packet(thd, sql_errno, err_msg, sql_state));
}
@@ -525,9 +643,10 @@ void Protocol::init(THD *thd_arg)
for the error.
*/
-void Protocol::end_partial_result_set(THD *thd)
+void Protocol::end_partial_result_set(THD *thd_arg)
{
- net_send_eof(thd, thd->server_status, 0 /* no warnings, we're inside SP */);
+ net_send_eof(thd_arg, thd_arg->server_status,
+ 0 /* no warnings, we're inside SP */);
}
@@ -560,16 +679,16 @@ bool Protocol::flush()
1 Error (Note that in this case the error is not sent to the
client)
*/
-bool Protocol::send_fields(List<Item> *list, uint flags)
+bool Protocol::send_result_set_metadata(List<Item> *list, uint flags)
{
List_iterator_fast<Item> it(*list);
Item *item;
- uchar buff[80];
+ uchar buff[MAX_FIELD_WIDTH];
String tmp((char*) buff,sizeof(buff),&my_charset_bin);
Protocol_text prot(thd);
String *local_packet= prot.storage_packet();
CHARSET_INFO *thd_charset= thd->variables.character_set_results;
- DBUG_ENTER("send_fields");
+ DBUG_ENTER("send_result_set_metadata");
if (flags & SEND_NUM_ROWS)
{ // Packet with number of elements
@@ -711,9 +830,10 @@ bool Protocol::send_fields(List<Item> *list, uint flags)
to show that there is no cursor.
Send no warning information, as it will be sent at statement end.
*/
- write_eof_packet(thd, &thd->net, thd->server_status, thd->total_warn_count);
+ write_eof_packet(thd, &thd->net, thd->server_status,
+ thd->warning_info->statement_warn_count());
}
- DBUG_RETURN(prepare_for_send(list));
+ DBUG_RETURN(prepare_for_send(list->elements));
err:
my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES),
@@ -732,6 +852,47 @@ bool Protocol::write()
/**
+ Send one result set row.
+
+ @param row_items a collection of column values for that row
+
+ @return Error status.
+ @retval TRUE Error.
+ @retval FALSE Success.
+*/
+
+bool Protocol::send_result_set_row(List<Item> *row_items)
+{
+ char buffer[MAX_FIELD_WIDTH];
+ String str_buffer(buffer, sizeof (buffer), &my_charset_bin);
+ List_iterator_fast<Item> it(*row_items);
+
+ DBUG_ENTER("Protocol::send_result_set_row");
+
+ for (Item *item= it++; item; item= it++)
+ {
+ if (item->send(this, &str_buffer))
+ {
+ // If we're out of memory, reclaim some, to help us recover.
+ this->free();
+ DBUG_RETURN(TRUE);
+ }
+ /* Item::send() may generate an error. If so, abort the loop. */
+ if (thd->is_error())
+ DBUG_RETURN(TRUE);
+
+ /*
+ Reset str_buffer to its original state, as it may have been altered in
+ Item::send().
+ */
+ str_buffer.set(buffer, sizeof(buffer), &my_charset_bin);
+ }
+
+ DBUG_RETURN(FALSE);
+}
+
+
+/**
Send \\0 end terminated string.
@param from NullS or \\0 terminated string
@@ -777,7 +938,6 @@ bool Protocol::store(I_List<i_string>* str_list)
return store((char*) tmp.ptr(), len, tmp.charset());
}
-
/****************************************************************************
Functions to handle the simple (default) protocol where everything is
This protocol is the one that is used by default between the MySQL server
@@ -820,10 +980,10 @@ bool Protocol::store_string_aux(const char *from, size_t length,
fromcs != &my_charset_bin &&
tocs != &my_charset_bin)
{
- uint dummy_errors;
- return (convert->copy(from, length, fromcs, tocs, &dummy_errors) ||
- net_store_data((uchar*) convert->ptr(), convert->length()));
+ /* Store with conversion */
+ return net_store_data((uchar*) from, length, fromcs, tocs);
}
+ /* Store without conversion */
return net_store_data((uchar*) from, length);
}
@@ -1056,6 +1216,53 @@ bool Protocol_text::store_time(MYSQL_TIME *tm)
return net_store_data((uchar*) buff, length);
}
+/**
+ Assign OUT-parameters to user variables.
+
+ @param sp_params List of PS/SP parameters (both input and output).
+
+ @return Error status.
+ @retval FALSE Success.
+ @retval TRUE Error.
+*/
+
+bool Protocol_text::send_out_parameters(List<Item_param> *sp_params)
+{
+ DBUG_ASSERT(sp_params->elements ==
+ thd->lex->prepared_stmt_params.elements);
+
+ List_iterator_fast<Item_param> item_param_it(*sp_params);
+ List_iterator_fast<LEX_STRING> user_var_name_it(thd->lex->prepared_stmt_params);
+
+ while (true)
+ {
+ Item_param *item_param= item_param_it++;
+ LEX_STRING *user_var_name= user_var_name_it++;
+
+ if (!item_param || !user_var_name)
+ break;
+
+ if (!item_param->get_out_param_info())
+ continue; // It's an IN-parameter.
+
+ Item_func_set_user_var *suv=
+ new Item_func_set_user_var(*user_var_name, item_param);
+ /*
+ Item_func_set_user_var is not fixed after construction, call
+ fix_fields().
+ */
+ if (suv->fix_fields(thd, NULL))
+ return TRUE;
+
+ if (suv->check(FALSE))
+ return TRUE;
+
+ if (suv->update())
+ return TRUE;
+ }
+
+ return FALSE;
+}
/****************************************************************************
Functions to handle the binary protocol used with prepared statements
@@ -1076,14 +1283,13 @@ bool Protocol_text::store_time(MYSQL_TIME *tm)
[..]..[[length]data] data
****************************************************************************/
-bool Protocol_binary::prepare_for_send(List<Item> *item_list)
+bool Protocol_binary::prepare_for_send(uint num_columns)
{
- Protocol::prepare_for_send(item_list);
+ Protocol::prepare_for_send(num_columns);
bit_fields= (field_count+9)/8;
- if (packet->alloc(bit_fields+1))
- return 1;
+ return packet->alloc(bit_fields+1);
+
/* prepare_for_resend will be called after this one */
- return 0;
}
@@ -1271,3 +1477,80 @@ bool Protocol_binary::store_time(MYSQL_TIME *tm)
buff[0]=(char) length; // Length is stored first
return packet->append(buff, length+1, PACKET_BUFFER_EXTRA_ALLOC);
}
+
+/**
+ Send a result set with OUT-parameter values by means of PS-protocol.
+
+ @param sp_params List of PS/SP parameters (both input and output).
+
+ @return Error status.
+ @retval FALSE Success.
+ @retval TRUE Error.
+*/
+
+bool Protocol_binary::send_out_parameters(List<Item_param> *sp_params)
+{
+ if (!(thd->client_capabilities & CLIENT_PS_MULTI_RESULTS))
+ {
+ /* The client does not support OUT-parameters. */
+ return FALSE;
+ }
+
+ List<Item> out_param_lst;
+
+ {
+ List_iterator_fast<Item_param> item_param_it(*sp_params);
+
+ while (true)
+ {
+ Item_param *item_param= item_param_it++;
+
+ if (!item_param)
+ break;
+
+ if (!item_param->get_out_param_info())
+ continue; // It's an IN-parameter.
+
+ if (out_param_lst.push_back(item_param))
+ return TRUE;
+ }
+ }
+
+ if (!out_param_lst.elements)
+ return FALSE;
+
+ /*
+ We have to set SERVER_PS_OUT_PARAMS in THD::server_status, because it
+ is used in send_result_set_metadata().
+ */
+
+ thd->server_status|= SERVER_PS_OUT_PARAMS | SERVER_MORE_RESULTS_EXISTS;
+
+ /* Send meta-data. */
+ if (send_result_set_metadata(&out_param_lst, SEND_NUM_ROWS | SEND_EOF))
+ return TRUE;
+
+ /* Send data. */
+
+ prepare_for_resend();
+
+ if (send_result_set_row(&out_param_lst))
+ return TRUE;
+
+ if (write())
+ return TRUE;
+
+ /* Restore THD::server_status. */
+ thd->server_status&= ~SERVER_PS_OUT_PARAMS;
+
+ /*
+ Reset SERVER_MORE_RESULTS_EXISTS bit, because this is the last packet
+ for sure.
+ */
+ thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS;
+
+ /* Send EOF-packet. */
+ net_send_eof(thd, thd->server_status, 0);
+
+ return FALSE;
+}
diff --git a/sql/protocol.h b/sql/protocol.h
index 251ba6fbc33..142f7919d6f 100644
--- a/sql/protocol.h
+++ b/sql/protocol.h
@@ -1,3 +1,6 @@
+#ifndef PROTOCOL_INCLUDED
+#define PROTOCOL_INCLUDED
+
/* Copyright (C) 2002-2006 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -17,9 +20,11 @@
#pragma interface /* gcc class implementation */
#endif
+#include "sql_error.h"
class i_string;
class THD;
+class Item_param;
typedef struct st_mysql_field MYSQL_FIELD;
typedef struct st_mysql_rows MYSQL_ROWS;
@@ -42,8 +47,20 @@ protected:
MYSQL_FIELD *next_mysql_field;
MEM_ROOT *alloc;
#endif
+ bool net_store_data(const uchar *from, size_t length,
+ CHARSET_INFO *fromcs, CHARSET_INFO *tocs);
bool store_string_aux(const char *from, size_t length,
CHARSET_INFO *fromcs, CHARSET_INFO *tocs);
+
+ virtual bool send_ok(uint server_status, uint statement_warn_count,
+ ulonglong affected_rows, ulonglong last_insert_id,
+ const char *message);
+
+ virtual bool send_eof(uint server_status, uint statement_warn_count);
+
+ virtual bool send_error(uint sql_errno, const char *err_msg,
+ const char *sql_state);
+
public:
Protocol() {}
Protocol(THD *thd_arg) { init(thd_arg); }
@@ -51,7 +68,8 @@ public:
void init(THD* thd_arg);
enum { SEND_NUM_ROWS= 1, SEND_DEFAULTS= 2, SEND_EOF= 4 };
- virtual bool send_fields(List<Item> *list, uint flags);
+ virtual bool send_result_set_metadata(List<Item> *list, uint flags);
+ bool send_result_set_row(List<Item> *row_items);
bool store(I_List<i_string> *str_list);
bool store(const char *from, CHARSET_INFO *cs);
@@ -69,9 +87,9 @@ public:
inline bool store(String *str)
{ return store((char*) str->ptr(), str->length(), str->charset()); }
- virtual bool prepare_for_send(List<Item> *item_list)
+ virtual bool prepare_for_send(uint num_columns)
{
- field_count=item_list->elements;
+ field_count= num_columns;
return 0;
}
virtual bool flush();
@@ -93,6 +111,8 @@ public:
virtual bool store_date(MYSQL_TIME *time)=0;
virtual bool store_time(MYSQL_TIME *time)=0;
virtual bool store(Field *field)=0;
+
+ virtual bool send_out_parameters(List<Item_param> *sp_params)=0;
#ifdef EMBEDDED_LIBRARY
int begin_dataset();
virtual void remove_last_row() {}
@@ -101,13 +121,15 @@ public:
#endif
enum enum_protocol_type
{
- PROTOCOL_TEXT= 0, PROTOCOL_BINARY= 1
/*
- before adding here or change the values, consider that it is cast to a
- bit in sql_cache.cc.
+ Before adding a new type, please make sure
+ there is enough storage for it in Query_cache_query_flags.
*/
+ PROTOCOL_TEXT= 0, PROTOCOL_BINARY= 1, PROTOCOL_LOCAL= 2
};
virtual enum enum_protocol_type type()= 0;
+
+ void end_statement();
};
@@ -134,6 +156,8 @@ public:
virtual bool store(float nr, uint32 decimals, String *buffer);
virtual bool store(double from, uint32 decimals, String *buffer);
virtual bool store(Field *field);
+
+ virtual bool send_out_parameters(List<Item_param> *sp_params);
#ifdef EMBEDDED_LIBRARY
void remove_last_row();
#endif
@@ -148,7 +172,7 @@ private:
public:
Protocol_binary() {}
Protocol_binary(THD *thd_arg) :Protocol(thd_arg) {}
- virtual bool prepare_for_send(List<Item> *item_list);
+ virtual bool prepare_for_send(uint num_columns);
virtual void prepare_for_resend();
#ifdef EMBEDDED_LIBRARY
virtual bool write();
@@ -169,14 +193,18 @@ public:
virtual bool store(float nr, uint32 decimals, String *buffer);
virtual bool store(double from, uint32 decimals, String *buffer);
virtual bool store(Field *field);
+
+ virtual bool send_out_parameters(List<Item_param> *sp_params);
+
virtual enum enum_protocol_type type() { return PROTOCOL_BINARY; };
};
void send_warning(THD *thd, uint sql_errno, const char *err=0);
-bool net_send_error(THD *thd, uint sql_errno=0, const char *err=0);
-void net_end_statement(THD *thd);
+bool net_send_error(THD *thd, uint sql_errno, const char *err,
+ const char* sqlstate);
bool send_old_password_request(THD *thd);
uchar *net_store_data(uchar *to,const uchar *from, size_t length);
uchar *net_store_data(uchar *to,int32 from);
uchar *net_store_data(uchar *to,longlong from);
+#endif /* PROTOCOL_INCLUDED */
diff --git a/sql/records.cc b/sql/records.cc
index b6faf0227f9..8fd63d104a4 100644
--- a/sql/records.cc
+++ b/sql/records.cc
@@ -13,6 +13,9 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#ifdef USE_PRAGMA_INTERFACE
+#pragma implementation /* gcc class implementation */
+#endif
/**
@file
@@ -21,8 +24,10 @@
Functions for easy reading of records, possible through a cache
*/
+#include "records.h"
#include "mysql_priv.h"
+
static int rr_quick(READ_RECORD *info);
int rr_sequential(READ_RECORD *info);
static int rr_from_tempfile(READ_RECORD *info);
diff --git a/sql/records.h b/sql/records.h
new file mode 100644
index 00000000000..ae81a31ee1a
--- /dev/null
+++ b/sql/records.h
@@ -0,0 +1,79 @@
+#ifndef SQL_RECORDS_H
+#define SQL_RECORDS_H
+/* Copyright (C) 2008 Sun/MySQL
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifdef USE_PRAGMA_INTERFACE
+#pragma interface /* gcc class implementation */
+#endif
+#include <my_global.h> /* for uint typedefs */
+
+struct st_join_table;
+class handler;
+struct TABLE;
+class THD;
+class SQL_SELECT;
+
+/**
+ A context for reading through a single table using a chosen access method:
+ index read, scan, etc, use of cache, etc.
+
+ Use by:
+ READ_RECORD read_record;
+ init_read_record(&read_record, ...);
+ while (read_record.read_record())
+ {
+ ...
+ }
+ end_read_record();
+*/
+
+struct READ_RECORD
+{
+ typedef int (*Read_func)(READ_RECORD*);
+ typedef void (*Unlock_row_func)(st_join_table *);
+ typedef int (*Setup_func)(struct st_join_table*);
+
+ TABLE *table; /* Head-form */
+ handler *file;
+ TABLE **forms; /* head and ref forms */
+ Unlock_row_func unlock_row;
+ Read_func read_record;
+ THD *thd;
+ SQL_SELECT *select;
+ uint cache_records;
+ uint ref_length,struct_length,reclength,rec_cache_size,error_offset;
+ uint index;
+ uchar *ref_pos; /* pointer to form->refpos */
+ uchar *record;
+ uchar *rec_buf; /* to read field values after filesort */
+ uchar *cache,*cache_pos,*cache_end,*read_positions;
+ struct st_io_cache *io_cache;
+ bool print_error, ignore_not_found_rows;
+
+public:
+ READ_RECORD() {}
+};
+
+void init_read_record(READ_RECORD *info, THD *thd, TABLE *reg_form,
+ SQL_SELECT *select, int use_record_cache,
+ bool print_errors, bool disable_rr_cache);
+void init_read_record_idx(READ_RECORD *info, THD *thd, TABLE *table,
+ bool print_error, uint idx);
+void end_read_record(READ_RECORD *info);
+
+void rr_unlock_row(st_join_table *tab);
+
+#endif /* SQL_RECORDS_H */
diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc
index c6a05e93bf4..7a941b1d99b 100644
--- a/sql/repl_failsafe.cc
+++ b/sql/repl_failsafe.cc
@@ -146,10 +146,10 @@ void unregister_slave(THD* thd, bool only_mine, bool need_mutex)
pthread_mutex_lock(&LOCK_slave_list);
SLAVE_INFO* old_si;
- if ((old_si = (SLAVE_INFO*)hash_search(&slave_list,
- (uchar*)&thd->server_id, 4)) &&
+ if ((old_si = (SLAVE_INFO*)my_hash_search(&slave_list,
+ (uchar*)&thd->server_id, 4)) &&
(!only_mine || old_si->thd == thd))
- hash_delete(&slave_list, (uchar*)old_si);
+ my_hash_delete(&slave_list, (uchar*)old_si);
if (need_mutex)
pthread_mutex_unlock(&LOCK_slave_list);
@@ -221,17 +221,18 @@ extern "C" void slave_info_free(void *s)
void init_slave_list()
{
- hash_init(&slave_list, system_charset_info, SLAVE_LIST_CHUNK, 0, 0,
- (hash_get_key) slave_list_key, (hash_free_key) slave_info_free, 0);
+ my_hash_init(&slave_list, system_charset_info, SLAVE_LIST_CHUNK, 0, 0,
+ (my_hash_get_key) slave_list_key,
+ (my_hash_free_key) slave_info_free, 0);
pthread_mutex_init(&LOCK_slave_list, MY_MUTEX_INIT_FAST);
}
void end_slave_list()
{
/* No protection by a mutex needed as we are only called at shutdown */
- if (hash_inited(&slave_list))
+ if (my_hash_inited(&slave_list))
{
- hash_free(&slave_list);
+ my_hash_free(&slave_list);
pthread_mutex_destroy(&LOCK_slave_list);
}
}
@@ -470,7 +471,7 @@ bool show_new_master(THD* thd)
field_list.push_back(new Item_empty_string("Log_name", 20));
field_list.push_back(new Item_return_int("Log_pos", 10,
MYSQL_TYPE_LONGLONG));
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
protocol->prepare_for_resend();
@@ -547,8 +548,8 @@ HOSTS";
uint32 log_server_id;
SLAVE_INFO* si, *old_si;
log_server_id = atoi(row[0]);
- if ((old_si= (SLAVE_INFO*)hash_search(&slave_list,
- (uchar*)&log_server_id,4)))
+ if ((old_si= (SLAVE_INFO*)my_hash_search(&slave_list,
+ (uchar*)&log_server_id,4)))
si = old_si;
else
{
@@ -681,7 +682,7 @@ bool show_slave_hosts(THD* thd)
field_list.push_back(new Item_return_int("Master_id", 10,
MYSQL_TYPE_LONG));
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
@@ -689,7 +690,7 @@ bool show_slave_hosts(THD* thd)
for (uint i = 0; i < slave_list.records; ++i)
{
- SLAVE_INFO* si = (SLAVE_INFO*) hash_element(&slave_list, i);
+ SLAVE_INFO* si = (SLAVE_INFO*) my_hash_element(&slave_list, i);
protocol->prepare_for_resend();
protocol->store((uint32) si->server_id);
protocol->store(si->host, &my_charset_bin);
@@ -920,7 +921,7 @@ bool load_master_data(THD* thd)
goto err;
}
/* Clear the result of mysql_create_db(). */
- thd->main_da.reset_diagnostics_area();
+ thd->stmt_da->reset_diagnostics_area();
if (mysql_select_db(&mysql, db) ||
mysql_real_query(&mysql, STRING_WITH_LEN("SHOW TABLES")) ||
diff --git a/sql/repl_failsafe.h b/sql/repl_failsafe.h
index 6ff78067aca..bce2c727050 100644
--- a/sql/repl_failsafe.h
+++ b/sql/repl_failsafe.h
@@ -1,3 +1,6 @@
+#ifndef REPL_FAILSAFE_INCLUDED
+#define REPL_FAILSAFE_INCLUDED
+
/* Copyright (C) 2001-2005 MySQL AB & Sasha
This program is free software; you can redistribute it and/or modify
@@ -49,3 +52,4 @@ int register_slave(THD* thd, uchar* packet, uint packet_length);
void unregister_slave(THD* thd, bool only_mine, bool need_mutex);
#endif /* HAVE_REPLICATION */
+#endif /* REPL_FAILSAFE_INCLUDED */
diff --git a/sql/replication.h b/sql/replication.h
new file mode 100644
index 00000000000..eea77ef9f8e
--- /dev/null
+++ b/sql/replication.h
@@ -0,0 +1,550 @@
+/* Copyright (C) 2008 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef REPLICATION_H
+#define REPLICATION_H
+
+typedef struct st_mysql MYSQL;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ Transaction observer flags.
+*/
+enum Trans_flags {
+ /** Transaction is a real transaction */
+ TRANS_IS_REAL_TRANS = 1
+};
+
+/**
+ Transaction observer parameter
+*/
+typedef struct Trans_param {
+ uint32 server_id;
+ uint32 flags;
+
+ /*
+ The latest binary log file name and position written by current
+ transaction, if binary log is disabled or no log event has been
+ written into binary log file by current transaction (events
+ written into transaction log cache are not counted), these two
+ member will be zero.
+ */
+ const char *log_file;
+ my_off_t log_pos;
+} Trans_param;
+
+/**
+ Observes and extends transaction execution
+*/
+typedef struct Trans_observer {
+ uint32 len;
+
+ /**
+ This callback is called after transaction commit
+
+ This callback is called right after commit to storage engines for
+ transactional tables.
+
+ For non-transactional tables, this is called at the end of the
+ statement, before sending statement status, if the statement
+ succeeded.
+
+ @note The return value is currently ignored by the server.
+
+ @param param The parameter for transaction observers
+
+ @retval 0 Sucess
+ @retval 1 Failure
+ */
+ int (*after_commit)(Trans_param *param);
+
+ /**
+ This callback is called after transaction rollback
+
+ This callback is called right after rollback to storage engines
+ for transactional tables.
+
+ For non-transactional tables, this is called at the end of the
+ statement, before sending statement status, if the statement
+ failed.
+
+ @note The return value is currently ignored by the server.
+
+ @param param The parameter for transaction observers
+
+ @retval 0 Sucess
+ @retval 1 Failure
+ */
+ int (*after_rollback)(Trans_param *param);
+} Trans_observer;
+
+/**
+ Binlog storage flags
+*/
+enum Binlog_storage_flags {
+ /** Binary log was sync:ed */
+ BINLOG_STORAGE_IS_SYNCED = 1
+};
+
+/**
+ Binlog storage observer parameters
+ */
+typedef struct Binlog_storage_param {
+ uint32 server_id;
+} Binlog_storage_param;
+
+/**
+ Observe binlog logging storage
+*/
+typedef struct Binlog_storage_observer {
+ uint32 len;
+
+ /**
+ This callback is called after binlog has been flushed
+
+ This callback is called after cached events have been flushed to
+ binary log file. Whether the binary log file is synchronized to
+ disk is indicated by the bit BINLOG_STORAGE_IS_SYNCED in @a flags.
+
+ @param param Observer common parameter
+ @param log_file Binlog file name been updated
+ @param log_pos Binlog position after update
+ @param flags flags for binlog storage
+
+ @retval 0 Sucess
+ @retval 1 Failure
+ */
+ int (*after_flush)(Binlog_storage_param *param,
+ const char *log_file, my_off_t log_pos,
+ uint32 flags);
+} Binlog_storage_observer;
+
+/**
+ Replication binlog transmitter (binlog dump) observer parameter.
+*/
+typedef struct Binlog_transmit_param {
+ uint32 server_id;
+ uint32 flags;
+} Binlog_transmit_param;
+
+/**
+ Observe and extends the binlog dumping thread.
+*/
+typedef struct Binlog_transmit_observer {
+ uint32 len;
+
+ /**
+ This callback is called when binlog dumping starts
+
+
+ @param param Observer common parameter
+ @param log_file Binlog file name to transmit from
+ @param log_pos Binlog position to transmit from
+
+ @retval 0 Sucess
+ @retval 1 Failure
+ */
+ int (*transmit_start)(Binlog_transmit_param *param,
+ const char *log_file, my_off_t log_pos);
+
+ /**
+ This callback is called when binlog dumping stops
+
+ @param param Observer common parameter
+
+ @retval 0 Sucess
+ @retval 1 Failure
+ */
+ int (*transmit_stop)(Binlog_transmit_param *param);
+
+ /**
+ This callback is called to reserve bytes in packet header for event transmission
+
+ This callback is called when resetting transmit packet header to
+ reserve bytes for this observer in packet header.
+
+ The @a header buffer is allocated by the server code, and @a size
+ is the size of the header buffer. Each observer can only reserve
+ a maximum size of @a size in the header.
+
+ @param param Observer common parameter
+ @param header Pointer of the header buffer
+ @param size Size of the header buffer
+ @param len Header length reserved by this observer
+
+ @retval 0 Sucess
+ @retval 1 Failure
+ */
+ int (*reserve_header)(Binlog_transmit_param *param,
+ unsigned char *header,
+ unsigned long size,
+ unsigned long *len);
+
+ /**
+ This callback is called before sending an event packet to slave
+
+ @param param Observer common parameter
+ @param packet Binlog event packet to send
+ @param len Length of the event packet
+ @param log_file Binlog file name of the event packet to send
+ @param log_pos Binlog position of the event packet to send
+
+ @retval 0 Sucess
+ @retval 1 Failure
+ */
+ int (*before_send_event)(Binlog_transmit_param *param,
+ unsigned char *packet, unsigned long len,
+ const char *log_file, my_off_t log_pos );
+
+ /**
+ This callback is called after sending an event packet to slave
+
+ @param param Observer common parameter
+ @param event_buf Binlog event packet buffer sent
+ @param len length of the event packet buffer
+
+ @retval 0 Sucess
+ @retval 1 Failure
+ */
+ int (*after_send_event)(Binlog_transmit_param *param,
+ const char *event_buf, unsigned long len);
+
+ /**
+ This callback is called after resetting master status
+
+ This is called when executing the command RESET MASTER, and is
+ used to reset status variables added by observers.
+
+ @param param Observer common parameter
+
+ @retval 0 Sucess
+ @retval 1 Failure
+ */
+ int (*after_reset_master)(Binlog_transmit_param *param);
+} Binlog_transmit_observer;
+
+/**
+ Binlog relay IO flags
+*/
+enum Binlog_relay_IO_flags {
+ /** Binary relay log was sync:ed */
+ BINLOG_RELAY_IS_SYNCED = 1
+};
+
+
+/**
+ Replication binlog relay IO observer parameter
+*/
+typedef struct Binlog_relay_IO_param {
+ uint32 server_id;
+
+ /* Master host, user and port */
+ char *host;
+ char *user;
+ unsigned int port;
+
+ char *master_log_name;
+ my_off_t master_log_pos;
+
+ MYSQL *mysql; /* the connection to master */
+} Binlog_relay_IO_param;
+
+/**
+ Observes and extends the service of slave IO thread.
+*/
+typedef struct Binlog_relay_IO_observer {
+ uint32 len;
+
+ /**
+ This callback is called when slave IO thread starts
+
+ @param param Observer common parameter
+
+ @retval 0 Sucess
+ @retval 1 Failure
+ */
+ int (*thread_start)(Binlog_relay_IO_param *param);
+
+ /**
+ This callback is called when slave IO thread stops
+
+ @param param Observer common parameter
+
+ @retval 0 Sucess
+ @retval 1 Failure
+ */
+ int (*thread_stop)(Binlog_relay_IO_param *param);
+
+ /**
+ This callback is called before slave requesting binlog transmission from master
+
+ This is called before slave issuing BINLOG_DUMP command to master
+ to request binlog.
+
+ @param param Observer common parameter
+ @param flags binlog dump flags
+
+ @retval 0 Sucess
+ @retval 1 Failure
+ */
+ int (*before_request_transmit)(Binlog_relay_IO_param *param, uint32 flags);
+
+ /**
+ This callback is called after read an event packet from master
+
+ @param param Observer common parameter
+ @param packet The event packet read from master
+ @param len Length of the event packet read from master
+ @param event_buf The event packet return after process
+ @param event_len The length of event packet return after process
+
+ @retval 0 Sucess
+ @retval 1 Failure
+ */
+ int (*after_read_event)(Binlog_relay_IO_param *param,
+ const char *packet, unsigned long len,
+ const char **event_buf, unsigned long *event_len);
+
+ /**
+ This callback is called after written an event packet to relay log
+
+ @param param Observer common parameter
+ @param event_buf Event packet written to relay log
+ @param event_len Length of the event packet written to relay log
+ @param flags flags for relay log
+
+ @retval 0 Sucess
+ @retval 1 Failure
+ */
+ int (*after_queue_event)(Binlog_relay_IO_param *param,
+ const char *event_buf, unsigned long event_len,
+ uint32 flags);
+
+ /**
+ This callback is called after reset slave relay log IO status
+
+ @param param Observer common parameter
+
+ @retval 0 Sucess
+ @retval 1 Failure
+ */
+ int (*after_reset_slave)(Binlog_relay_IO_param *param);
+} Binlog_relay_IO_observer;
+
+
+/**
+ Register a transaction observer
+
+ @param observer The transaction observer to register
+ @param p pointer to the internal plugin structure
+
+ @retval 0 Sucess
+ @retval 1 Observer already exists
+*/
+int register_trans_observer(Trans_observer *observer, void *p);
+
+/**
+ Unregister a transaction observer
+
+ @param observer The transaction observer to unregister
+ @param p pointer to the internal plugin structure
+
+ @retval 0 Sucess
+ @retval 1 Observer not exists
+*/
+int unregister_trans_observer(Trans_observer *observer, void *p);
+
+/**
+ Register a binlog storage observer
+
+ @param observer The binlog storage observer to register
+ @param p pointer to the internal plugin structure
+
+ @retval 0 Sucess
+ @retval 1 Observer already exists
+*/
+int register_binlog_storage_observer(Binlog_storage_observer *observer, void *p);
+
+/**
+ Unregister a binlog storage observer
+
+ @param observer The binlog storage observer to unregister
+ @param p pointer to the internal plugin structure
+
+ @retval 0 Sucess
+ @retval 1 Observer not exists
+*/
+int unregister_binlog_storage_observer(Binlog_storage_observer *observer, void *p);
+
+/**
+ Register a binlog transmit observer
+
+ @param observer The binlog transmit observer to register
+ @param p pointer to the internal plugin structure
+
+ @retval 0 Sucess
+ @retval 1 Observer already exists
+*/
+int register_binlog_transmit_observer(Binlog_transmit_observer *observer, void *p);
+
+/**
+ Unregister a binlog transmit observer
+
+ @param observer The binlog transmit observer to unregister
+ @param p pointer to the internal plugin structure
+
+ @retval 0 Sucess
+ @retval 1 Observer not exists
+*/
+int unregister_binlog_transmit_observer(Binlog_transmit_observer *observer, void *p);
+
+/**
+ Register a binlog relay IO (slave IO thread) observer
+
+ @param observer The binlog relay IO observer to register
+ @param p pointer to the internal plugin structure
+
+ @retval 0 Sucess
+ @retval 1 Observer already exists
+*/
+int register_binlog_relay_io_observer(Binlog_relay_IO_observer *observer, void *p);
+
+/**
+ Unregister a binlog relay IO (slave IO thread) observer
+
+ @param observer The binlog relay IO observer to unregister
+ @param p pointer to the internal plugin structure
+
+ @retval 0 Sucess
+ @retval 1 Observer not exists
+*/
+int unregister_binlog_relay_io_observer(Binlog_relay_IO_observer *observer, void *p);
+
+/**
+ Connect to master
+
+ This function can only used in the slave I/O thread context, and
+ will use the same master information to do the connection.
+
+ @code
+ MYSQL *mysql = mysql_init(NULL);
+ if (rpl_connect_master(mysql))
+ {
+ // do stuff with the connection
+ }
+ mysql_close(mysql); // close the connection
+ @endcode
+
+ @param mysql address of MYSQL structure to use, pass NULL will
+ create a new one
+
+ @return address of MYSQL structure on success, NULL on failure
+*/
+MYSQL *rpl_connect_master(MYSQL *mysql);
+
+/**
+ Set thread entering a condition
+
+ This function should be called before putting a thread to wait for
+ a condition. @a mutex should be held before calling this
+ function. After being waken up, @f thd_exit_cond should be called.
+
+ @param thd The thread entering the condition, NULL means current thread
+ @param cond The condition the thread is going to wait for
+ @param mutex The mutex associated with the condition, this must be
+ held before call this function
+ @param msg The new process message for the thread
+*/
+const char* thd_enter_cond(MYSQL_THD thd, pthread_cond_t *cond,
+ pthread_mutex_t *mutex, const char *msg);
+
+/**
+ Set thread leaving a condition
+
+ This function should be called after a thread being waken up for a
+ condition.
+
+ @param thd The thread entering the condition, NULL means current thread
+ @param old_msg The process message, ususally this should be the old process
+ message before calling @f thd_enter_cond
+*/
+void thd_exit_cond(MYSQL_THD thd, const char *old_msg);
+
+/**
+ Get the value of user variable as an integer.
+
+ This function will return the value of variable @a name as an
+ integer. If the original value of the variable is not an integer,
+ the value will be converted into an integer.
+
+ @param name user variable name
+ @param value pointer to return the value
+ @param null_value if not NULL, the function will set it to true if
+ the value of variable is null, set to false if not
+
+ @retval 0 Success
+ @retval 1 Variable not found
+*/
+int get_user_var_int(const char *name,
+ long long int *value, int *null_value);
+
+/**
+ Get the value of user variable as a double precision float number.
+
+ This function will return the value of variable @a name as real
+ number. If the original value of the variable is not a real number,
+ the value will be converted into a real number.
+
+ @param name user variable name
+ @param value pointer to return the value
+ @param null_value if not NULL, the function will set it to true if
+ the value of variable is null, set to false if not
+
+ @retval 0 Success
+ @retval 1 Variable not found
+*/
+int get_user_var_real(const char *name,
+ double *value, int *null_value);
+
+/**
+ Get the value of user variable as a string.
+
+ This function will return the value of variable @a name as
+ string. If the original value of the variable is not a string,
+ the value will be converted into a string.
+
+ @param name user variable name
+ @param value pointer to the value buffer
+ @param len length of the value buffer
+ @param precision precision of the value if it is a float number
+ @param null_value if not NULL, the function will set it to true if
+ the value of variable is null, set to false if not
+
+ @retval 0 Success
+ @retval 1 Variable not found
+*/
+int get_user_var_str(const char *name,
+ char *value, unsigned long len,
+ unsigned int precision, int *null_value);
+
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* REPLICATION_H */
diff --git a/sql/rpl_filter.cc b/sql/rpl_filter.cc
index 68272c58bb1..392d8baf50e 100644
--- a/sql/rpl_filter.cc
+++ b/sql/rpl_filter.cc
@@ -32,9 +32,9 @@ Rpl_filter::Rpl_filter() :
Rpl_filter::~Rpl_filter()
{
if (do_table_inited)
- hash_free(&do_table);
+ my_hash_free(&do_table);
if (ignore_table_inited)
- hash_free(&ignore_table);
+ my_hash_free(&ignore_table);
if (wild_do_table_inited)
free_string_array(&wild_do_table);
if (wild_ignore_table_inited)
@@ -103,12 +103,12 @@ Rpl_filter::tables_ok(const char* db, TABLE_LIST* tables)
len= (uint) (strmov(end, tables->table_name) - hash_key);
if (do_table_inited) // if there are any do's
{
- if (hash_search(&do_table, (uchar*) hash_key, len))
+ if (my_hash_search(&do_table, (uchar*) hash_key, len))
DBUG_RETURN(1);
}
if (ignore_table_inited) // if there are any ignores
{
- if (hash_search(&ignore_table, (uchar*) hash_key, len))
+ if (my_hash_search(&ignore_table, (uchar*) hash_key, len))
DBUG_RETURN(0);
}
if (wild_do_table_inited &&
@@ -387,7 +387,7 @@ void free_table_ent(void* a)
void
Rpl_filter::init_table_rule_hash(HASH* h, bool* h_inited)
{
- hash_init(h, system_charset_info,TABLE_RULE_HASH_SIZE,0,0,
+ my_hash_init(h, system_charset_info,TABLE_RULE_HASH_SIZE,0,0,
get_table_key, free_table_ent, 0);
*h_inited = 1;
}
@@ -458,7 +458,7 @@ Rpl_filter::table_rule_ent_hash_to_str(String* s, HASH* h, bool inited)
{
for (uint i= 0; i < h->records; i++)
{
- TABLE_RULE_ENT* e= (TABLE_RULE_ENT*) hash_element(h, i);
+ TABLE_RULE_ENT* e= (TABLE_RULE_ENT*) my_hash_element(h, i);
if (s->length())
s->append(',');
s->append(e->db,e->key_len);
diff --git a/sql/rpl_handler.cc b/sql/rpl_handler.cc
new file mode 100644
index 00000000000..ebd6e4e0c0b
--- /dev/null
+++ b/sql/rpl_handler.cc
@@ -0,0 +1,518 @@
+/* Copyright (C) 2008 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "mysql_priv.h"
+
+#include "rpl_mi.h"
+#include "sql_repl.h"
+#include "log_event.h"
+#include "rpl_filter.h"
+#include <my_dir.h>
+#include "rpl_handler.h"
+
+Trans_delegate *transaction_delegate;
+Binlog_storage_delegate *binlog_storage_delegate;
+#ifdef HAVE_REPLICATION
+Binlog_transmit_delegate *binlog_transmit_delegate;
+Binlog_relay_IO_delegate *binlog_relay_io_delegate;
+#endif /* HAVE_REPLICATION */
+
+/*
+ structure to save transaction log filename and position
+*/
+typedef struct Trans_binlog_info {
+ my_off_t log_pos;
+ char log_file[FN_REFLEN];
+} Trans_binlog_info;
+
+static pthread_key(Trans_binlog_info*, RPL_TRANS_BINLOG_INFO);
+
+int get_user_var_int(const char *name,
+ long long int *value, int *null_value)
+{
+ my_bool null_val;
+ user_var_entry *entry=
+ (user_var_entry*) my_hash_search(&current_thd->user_vars,
+ (uchar*) name, strlen(name));
+ if (!entry)
+ return 1;
+ *value= entry->val_int(&null_val);
+ if (null_value)
+ *null_value= null_val;
+ return 0;
+}
+
+int get_user_var_real(const char *name,
+ double *value, int *null_value)
+{
+ my_bool null_val;
+ user_var_entry *entry=
+ (user_var_entry*) my_hash_search(&current_thd->user_vars,
+ (uchar*) name, strlen(name));
+ if (!entry)
+ return 1;
+ *value= entry->val_real(&null_val);
+ if (null_value)
+ *null_value= null_val;
+ return 0;
+}
+
+int get_user_var_str(const char *name, char *value,
+ size_t len, unsigned int precision, int *null_value)
+{
+ String str;
+ my_bool null_val;
+ user_var_entry *entry=
+ (user_var_entry*) my_hash_search(&current_thd->user_vars,
+ (uchar*) name, strlen(name));
+ if (!entry)
+ return 1;
+ entry->val_str(&null_val, &str, precision);
+ strncpy(value, str.c_ptr(), len);
+ if (null_value)
+ *null_value= null_val;
+ return 0;
+}
+
+int delegates_init()
+{
+ static unsigned long trans_mem[sizeof(Trans_delegate) / sizeof(unsigned long) + 1];
+ static unsigned long storage_mem[sizeof(Binlog_storage_delegate) / sizeof(unsigned long) + 1];
+#ifdef HAVE_REPLICATION
+ static unsigned long transmit_mem[sizeof(Binlog_transmit_delegate) / sizeof(unsigned long) + 1];
+ static unsigned long relay_io_mem[sizeof(Binlog_relay_IO_delegate)/ sizeof(unsigned long) + 1];
+#endif
+
+ if (!(transaction_delegate= new (trans_mem) Trans_delegate)
+ || (!transaction_delegate->is_inited())
+ || !(binlog_storage_delegate= new (storage_mem) Binlog_storage_delegate)
+ || (!binlog_storage_delegate->is_inited())
+#ifdef HAVE_REPLICATION
+ || !(binlog_transmit_delegate= new (transmit_mem) Binlog_transmit_delegate)
+ || (!binlog_transmit_delegate->is_inited())
+ || !(binlog_relay_io_delegate= new (relay_io_mem) Binlog_relay_IO_delegate)
+ || (!binlog_relay_io_delegate->is_inited())
+#endif /* HAVE_REPLICATION */
+ )
+ return 1;
+
+ if (pthread_key_create(&RPL_TRANS_BINLOG_INFO, NULL))
+ return 1;
+ return 0;
+}
+
+void delegates_destroy()
+{
+ if (transaction_delegate)
+ transaction_delegate->~Trans_delegate();
+ if (binlog_storage_delegate)
+ binlog_storage_delegate->~Binlog_storage_delegate();
+#ifdef HAVE_REPLICATION
+ if (binlog_transmit_delegate)
+ binlog_transmit_delegate->~Binlog_transmit_delegate();
+ if (binlog_relay_io_delegate)
+ binlog_relay_io_delegate->~Binlog_relay_IO_delegate();
+#endif /* HAVE_REPLICATION */
+}
+
+/*
+ This macro is used by almost all the Delegate methods to iterate
+ over all the observers running given callback function of the
+ delegate .
+
+ Add observer plugins to the thd->lex list, after each statement, all
+ plugins add to thd->lex will be automatically unlocked.
+ */
+#define FOREACH_OBSERVER(r, f, thd, args) \
+ param.server_id= thd->server_id; \
+ /*
+ Use a struct to make sure that they are allocated adjacent, check
+ delete_dynamic().
+ */ \
+ struct { \
+ DYNAMIC_ARRAY plugins; \
+ /* preallocate 8 slots */ \
+ plugin_ref plugins_buffer[8]; \
+ } s; \
+ DYNAMIC_ARRAY *plugins= &s.plugins; \
+ plugin_ref *plugins_buffer= s.plugins_buffer; \
+ my_init_dynamic_array2(plugins, sizeof(plugin_ref), \
+ plugins_buffer, 8, 8); \
+ read_lock(); \
+ Observer_info_iterator iter= observer_info_iter(); \
+ Observer_info *info= iter++; \
+ for (; info; info= iter++) \
+ { \
+ plugin_ref plugin= \
+ my_plugin_lock(0, &info->plugin); \
+ if (!plugin) \
+ { \
+ /* plugin is not intialized or deleted, this is not an error */ \
+ r= 0; \
+ break; \
+ } \
+ insert_dynamic(plugins, (uchar *)&plugin); \
+ if (((Observer *)info->observer)->f \
+ && ((Observer *)info->observer)->f args) \
+ { \
+ r= 1; \
+ sql_print_error("Run function '" #f "' in plugin '%s' failed", \
+ info->plugin_int->name.str); \
+ break; \
+ } \
+ } \
+ unlock(); \
+ /*
+ Unlock plugins should be done after we released the Delegate lock
+ to avoid possible deadlock when this is the last user of the
+ plugin, and when we unlock the plugin, it will try to
+ deinitialize the plugin, which will try to lock the Delegate in
+ order to remove the observers.
+ */ \
+ plugin_unlock_list(0, (plugin_ref*)plugins->buffer, \
+ plugins->elements); \
+ delete_dynamic(plugins)
+
+
+int Trans_delegate::after_commit(THD *thd, bool all)
+{
+ Trans_param param;
+ bool is_real_trans= (all || thd->transaction.all.ha_list == 0);
+ if (is_real_trans)
+ param.flags |= TRANS_IS_REAL_TRANS;
+
+ Trans_binlog_info *log_info=
+ my_pthread_getspecific_ptr(Trans_binlog_info*, RPL_TRANS_BINLOG_INFO);
+
+ param.log_file= log_info ? log_info->log_file : 0;
+ param.log_pos= log_info ? log_info->log_pos : 0;
+
+ int ret= 0;
+ FOREACH_OBSERVER(ret, after_commit, thd, (&param));
+
+ /*
+ This is the end of a real transaction or autocommit statement, we
+ can free the memory allocated for binlog file and position.
+ */
+ if (is_real_trans && log_info)
+ {
+ my_pthread_setspecific_ptr(RPL_TRANS_BINLOG_INFO, NULL);
+ my_free(log_info, MYF(0));
+ }
+ return ret;
+}
+
+int Trans_delegate::after_rollback(THD *thd, bool all)
+{
+ Trans_param param;
+ bool is_real_trans= (all || thd->transaction.all.ha_list == 0);
+ if (is_real_trans)
+ param.flags |= TRANS_IS_REAL_TRANS;
+
+ Trans_binlog_info *log_info=
+ my_pthread_getspecific_ptr(Trans_binlog_info*, RPL_TRANS_BINLOG_INFO);
+
+ param.log_file= log_info ? log_info->log_file : 0;
+ param.log_pos= log_info ? log_info->log_pos : 0;
+
+ int ret= 0;
+ FOREACH_OBSERVER(ret, after_commit, thd, (&param));
+
+ /*
+ This is the end of a real transaction or autocommit statement, we
+ can free the memory allocated for binlog file and position.
+ */
+ if (is_real_trans && log_info)
+ {
+ my_pthread_setspecific_ptr(RPL_TRANS_BINLOG_INFO, NULL);
+ my_free(log_info, MYF(0));
+ }
+ return ret;
+}
+
+int Binlog_storage_delegate::after_flush(THD *thd,
+ const char *log_file,
+ my_off_t log_pos,
+ bool synced)
+{
+ Binlog_storage_param param;
+ uint32 flags=0;
+ if (synced)
+ flags |= BINLOG_STORAGE_IS_SYNCED;
+
+ Trans_binlog_info *log_info=
+ my_pthread_getspecific_ptr(Trans_binlog_info*, RPL_TRANS_BINLOG_INFO);
+
+ if (!log_info)
+ {
+ if(!(log_info=
+ (Trans_binlog_info *)my_malloc(sizeof(Trans_binlog_info), MYF(0))))
+ return 1;
+ my_pthread_setspecific_ptr(RPL_TRANS_BINLOG_INFO, log_info);
+ }
+
+ strcpy(log_info->log_file, log_file+dirname_length(log_file));
+ log_info->log_pos = log_pos;
+
+ int ret= 0;
+ FOREACH_OBSERVER(ret, after_flush, thd,
+ (&param, log_info->log_file, log_info->log_pos, flags));
+ return ret;
+}
+
+#ifdef HAVE_REPLICATION
+int Binlog_transmit_delegate::transmit_start(THD *thd, ushort flags,
+ const char *log_file,
+ my_off_t log_pos)
+{
+ Binlog_transmit_param param;
+ param.flags= flags;
+
+ int ret= 0;
+ FOREACH_OBSERVER(ret, transmit_start, thd, (&param, log_file, log_pos));
+ return ret;
+}
+
+int Binlog_transmit_delegate::transmit_stop(THD *thd, ushort flags)
+{
+ Binlog_transmit_param param;
+ param.flags= flags;
+
+ int ret= 0;
+ FOREACH_OBSERVER(ret, transmit_stop, thd, (&param));
+ return ret;
+}
+
+int Binlog_transmit_delegate::reserve_header(THD *thd, ushort flags,
+ String *packet)
+{
+ /* NOTE2ME: Maximum extra header size for each observer, I hope 32
+ bytes should be enough for each Observer to reserve their extra
+ header. If later found this is not enough, we can increase this
+ /HEZX
+ */
+#define RESERVE_HEADER_SIZE 32
+ unsigned char header[RESERVE_HEADER_SIZE];
+ ulong hlen;
+ Binlog_transmit_param param;
+ param.flags= flags;
+ param.server_id= thd->server_id;
+
+ int ret= 0;
+ read_lock();
+ Observer_info_iterator iter= observer_info_iter();
+ Observer_info *info= iter++;
+ for (; info; info= iter++)
+ {
+ plugin_ref plugin=
+ my_plugin_lock(thd, &info->plugin);
+ if (!plugin)
+ {
+ ret= 1;
+ break;
+ }
+ hlen= 0;
+ if (((Observer *)info->observer)->reserve_header
+ && ((Observer *)info->observer)->reserve_header(&param,
+ header,
+ RESERVE_HEADER_SIZE,
+ &hlen))
+ {
+ ret= 1;
+ plugin_unlock(thd, plugin);
+ break;
+ }
+ plugin_unlock(thd, plugin);
+ if (hlen == 0)
+ continue;
+ if (hlen > RESERVE_HEADER_SIZE || packet->append((char *)header, hlen))
+ {
+ ret= 1;
+ break;
+ }
+ }
+ unlock();
+ return ret;
+}
+
+int Binlog_transmit_delegate::before_send_event(THD *thd, ushort flags,
+ String *packet,
+ const char *log_file,
+ my_off_t log_pos)
+{
+ Binlog_transmit_param param;
+ param.flags= flags;
+
+ int ret= 0;
+ FOREACH_OBSERVER(ret, before_send_event, thd,
+ (&param, (uchar *)packet->c_ptr(),
+ packet->length(),
+ log_file+dirname_length(log_file), log_pos));
+ return ret;
+}
+
+int Binlog_transmit_delegate::after_send_event(THD *thd, ushort flags,
+ String *packet)
+{
+ Binlog_transmit_param param;
+ param.flags= flags;
+
+ int ret= 0;
+ FOREACH_OBSERVER(ret, after_send_event, thd,
+ (&param, packet->c_ptr(), packet->length()));
+ return ret;
+}
+
+int Binlog_transmit_delegate::after_reset_master(THD *thd, ushort flags)
+
+{
+ Binlog_transmit_param param;
+ param.flags= flags;
+
+ int ret= 0;
+ FOREACH_OBSERVER(ret, after_reset_master, thd, (&param));
+ return ret;
+}
+
+void Binlog_relay_IO_delegate::init_param(Binlog_relay_IO_param *param,
+ Master_info *mi)
+{
+ param->mysql= mi->mysql;
+ param->user= mi->user;
+ param->host= mi->host;
+ param->port= mi->port;
+ param->master_log_name= mi->master_log_name;
+ param->master_log_pos= mi->master_log_pos;
+}
+
+int Binlog_relay_IO_delegate::thread_start(THD *thd, Master_info *mi)
+{
+ Binlog_relay_IO_param param;
+ init_param(&param, mi);
+
+ int ret= 0;
+ FOREACH_OBSERVER(ret, thread_start, thd, (&param));
+ return ret;
+}
+
+
+int Binlog_relay_IO_delegate::thread_stop(THD *thd, Master_info *mi)
+{
+
+ Binlog_relay_IO_param param;
+ init_param(&param, mi);
+
+ int ret= 0;
+ FOREACH_OBSERVER(ret, thread_stop, thd, (&param));
+ return ret;
+}
+
+int Binlog_relay_IO_delegate::before_request_transmit(THD *thd,
+ Master_info *mi,
+ ushort flags)
+{
+ Binlog_relay_IO_param param;
+ init_param(&param, mi);
+
+ int ret= 0;
+ FOREACH_OBSERVER(ret, before_request_transmit, thd, (&param, (uint32)flags));
+ return ret;
+}
+
+int Binlog_relay_IO_delegate::after_read_event(THD *thd, Master_info *mi,
+ const char *packet, ulong len,
+ const char **event_buf,
+ ulong *event_len)
+{
+ Binlog_relay_IO_param param;
+ init_param(&param, mi);
+
+ int ret= 0;
+ FOREACH_OBSERVER(ret, after_read_event, thd,
+ (&param, packet, len, event_buf, event_len));
+ return ret;
+}
+
+int Binlog_relay_IO_delegate::after_queue_event(THD *thd, Master_info *mi,
+ const char *event_buf,
+ ulong event_len,
+ bool synced)
+{
+ Binlog_relay_IO_param param;
+ init_param(&param, mi);
+
+ uint32 flags=0;
+ if (synced)
+ flags |= BINLOG_STORAGE_IS_SYNCED;
+
+ int ret= 0;
+ FOREACH_OBSERVER(ret, after_queue_event, thd,
+ (&param, event_buf, event_len, flags));
+ return ret;
+}
+
+int Binlog_relay_IO_delegate::after_reset_slave(THD *thd, Master_info *mi)
+
+{
+ Binlog_relay_IO_param param;
+ init_param(&param, mi);
+
+ int ret= 0;
+ FOREACH_OBSERVER(ret, after_reset_slave, thd, (&param));
+ return ret;
+}
+#endif /* HAVE_REPLICATION */
+
+int register_trans_observer(Trans_observer *observer, void *p)
+{
+ return transaction_delegate->add_observer(observer, (st_plugin_int *)p);
+}
+
+int unregister_trans_observer(Trans_observer *observer, void *p)
+{
+ return transaction_delegate->remove_observer(observer, (st_plugin_int *)p);
+}
+
+int register_binlog_storage_observer(Binlog_storage_observer *observer, void *p)
+{
+ return binlog_storage_delegate->add_observer(observer, (st_plugin_int *)p);
+}
+
+int unregister_binlog_storage_observer(Binlog_storage_observer *observer, void *p)
+{
+ return binlog_storage_delegate->remove_observer(observer, (st_plugin_int *)p);
+}
+
+#ifdef HAVE_REPLICATION
+int register_binlog_transmit_observer(Binlog_transmit_observer *observer, void *p)
+{
+ return binlog_transmit_delegate->add_observer(observer, (st_plugin_int *)p);
+}
+
+int unregister_binlog_transmit_observer(Binlog_transmit_observer *observer, void *p)
+{
+ return binlog_transmit_delegate->remove_observer(observer, (st_plugin_int *)p);
+}
+
+int register_binlog_relay_io_observer(Binlog_relay_IO_observer *observer, void *p)
+{
+ return binlog_relay_io_delegate->add_observer(observer, (st_plugin_int *)p);
+}
+
+int unregister_binlog_relay_io_observer(Binlog_relay_IO_observer *observer, void *p)
+{
+ return binlog_relay_io_delegate->remove_observer(observer, (st_plugin_int *)p);
+}
+#endif /* HAVE_REPLICATION */
diff --git a/sql/rpl_handler.h b/sql/rpl_handler.h
new file mode 100644
index 00000000000..4fb7b4e035b
--- /dev/null
+++ b/sql/rpl_handler.h
@@ -0,0 +1,213 @@
+/* Copyright (C) 2008 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef RPL_HANDLER_H
+#define RPL_HANDLER_H
+
+#include "mysql_priv.h"
+#include "rpl_mi.h"
+#include "rpl_rli.h"
+#include "sql_plugin.h"
+#include "replication.h"
+
+class Observer_info {
+public:
+ void *observer;
+ st_plugin_int *plugin_int;
+ plugin_ref plugin;
+
+ Observer_info(void *ob, st_plugin_int *p)
+ :observer(ob), plugin_int(p)
+ {
+ plugin= plugin_int_to_ref(plugin_int);
+ }
+};
+
+class Delegate {
+public:
+ typedef List<Observer_info> Observer_info_list;
+ typedef List_iterator<Observer_info> Observer_info_iterator;
+
+ int add_observer(void *observer, st_plugin_int *plugin)
+ {
+ int ret= FALSE;
+ if (!inited)
+ return TRUE;
+ write_lock();
+ Observer_info_iterator iter(observer_info_list);
+ Observer_info *info= iter++;
+ while (info && info->observer != observer)
+ info= iter++;
+ if (!info)
+ {
+ info= new Observer_info(observer, plugin);
+ if (!info || observer_info_list.push_back(info, &memroot))
+ ret= TRUE;
+ }
+ else
+ ret= TRUE;
+ unlock();
+ return ret;
+ }
+
+ int remove_observer(void *observer, st_plugin_int *plugin)
+ {
+ int ret= FALSE;
+ if (!inited)
+ return TRUE;
+ write_lock();
+ Observer_info_iterator iter(observer_info_list);
+ Observer_info *info= iter++;
+ while (info && info->observer != observer)
+ info= iter++;
+ if (info)
+ iter.remove();
+ else
+ ret= TRUE;
+ unlock();
+ return ret;
+ }
+
+ inline Observer_info_iterator observer_info_iter()
+ {
+ return Observer_info_iterator(observer_info_list);
+ }
+
+ inline bool is_empty()
+ {
+ return observer_info_list.is_empty();
+ }
+
+ inline int read_lock()
+ {
+ if (!inited)
+ return TRUE;
+ return rw_rdlock(&lock);
+ }
+
+ inline int write_lock()
+ {
+ if (!inited)
+ return TRUE;
+ return rw_wrlock(&lock);
+ }
+
+ inline int unlock()
+ {
+ if (!inited)
+ return TRUE;
+ return rw_unlock(&lock);
+ }
+
+ inline bool is_inited()
+ {
+ return inited;
+ }
+
+ Delegate()
+ {
+ inited= FALSE;
+ if (my_rwlock_init(&lock, NULL))
+ return;
+ init_sql_alloc(&memroot, 1024, 0);
+ inited= TRUE;
+ }
+ ~Delegate()
+ {
+ inited= FALSE;
+ rwlock_destroy(&lock);
+ free_root(&memroot, MYF(0));
+ }
+
+private:
+ Observer_info_list observer_info_list;
+ rw_lock_t lock;
+ MEM_ROOT memroot;
+ bool inited;
+};
+
+class Trans_delegate
+ :public Delegate {
+public:
+ typedef Trans_observer Observer;
+ int before_commit(THD *thd, bool all);
+ int before_rollback(THD *thd, bool all);
+ int after_commit(THD *thd, bool all);
+ int after_rollback(THD *thd, bool all);
+};
+
+class Binlog_storage_delegate
+ :public Delegate {
+public:
+ typedef Binlog_storage_observer Observer;
+ int after_flush(THD *thd, const char *log_file,
+ my_off_t log_pos, bool synced);
+};
+
+#ifdef HAVE_REPLICATION
+class Binlog_transmit_delegate
+ :public Delegate {
+public:
+ typedef Binlog_transmit_observer Observer;
+ int transmit_start(THD *thd, ushort flags,
+ const char *log_file, my_off_t log_pos);
+ int transmit_stop(THD *thd, ushort flags);
+ int reserve_header(THD *thd, ushort flags, String *packet);
+ int before_send_event(THD *thd, ushort flags,
+ String *packet, const
+ char *log_file, my_off_t log_pos );
+ int after_send_event(THD *thd, ushort flags,
+ String *packet);
+ int after_reset_master(THD *thd, ushort flags);
+};
+
+class Binlog_relay_IO_delegate
+ :public Delegate {
+public:
+ typedef Binlog_relay_IO_observer Observer;
+ int thread_start(THD *thd, Master_info *mi);
+ int thread_stop(THD *thd, Master_info *mi);
+ int before_request_transmit(THD *thd, Master_info *mi, ushort flags);
+ int after_read_event(THD *thd, Master_info *mi,
+ const char *packet, ulong len,
+ const char **event_buf, ulong *event_len);
+ int after_queue_event(THD *thd, Master_info *mi,
+ const char *event_buf, ulong event_len,
+ bool synced);
+ int after_reset_slave(THD *thd, Master_info *mi);
+private:
+ void init_param(Binlog_relay_IO_param *param, Master_info *mi);
+};
+#endif /* HAVE_REPLICATION */
+
+int delegates_init();
+void delegates_destroy();
+
+extern Trans_delegate *transaction_delegate;
+extern Binlog_storage_delegate *binlog_storage_delegate;
+#ifdef HAVE_REPLICATION
+extern Binlog_transmit_delegate *binlog_transmit_delegate;
+extern Binlog_relay_IO_delegate *binlog_relay_io_delegate;
+#endif /* HAVE_REPLICATION */
+
+/*
+ if there is no observers in the delegate, we can return 0
+ immediately.
+*/
+#define RUN_HOOK(group, hook, args) \
+ (group ##_delegate->is_empty() ? \
+ 0 : group ##_delegate->hook args)
+
+#endif /* RPL_HANDLER_H */
diff --git a/sql/rpl_injector.h b/sql/rpl_injector.h
index 4ece092c5b8..a0c71fee099 100644
--- a/sql/rpl_injector.h
+++ b/sql/rpl_injector.h
@@ -25,9 +25,8 @@
/* Forward declarations */
class handler;
class MYSQL_BIN_LOG;
-struct st_table;
+struct TABLE;
-typedef st_table TABLE;
/*
Injector to inject rows into the MySQL server.
diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc
index 5e46837e948..e83e0ad0ba9 100644
--- a/sql/rpl_mi.cc
+++ b/sql/rpl_mi.cc
@@ -26,17 +26,21 @@
int init_intvar_from_file(int* var, IO_CACHE* f, int default_val);
int init_strvar_from_file(char *var, int max_size, IO_CACHE *f,
const char *default_val);
+int init_floatvar_from_file(float* var, IO_CACHE* f, float default_val);
+int init_dynarray_intvar_from_file(DYNAMIC_ARRAY* arr, IO_CACHE* f);
-Master_info::Master_info()
+Master_info::Master_info(bool is_slave_recovery)
:Slave_reporting_capability("I/O"),
ssl(0), ssl_verify_server_cert(0), fd(-1), io_thd(0), inited(0),
- abort_slave(0),slave_running(0),
- slave_run_id(0)
+ rli(is_slave_recovery), abort_slave(0), slave_running(0),
+ slave_run_id(0), sync_counter(0),
+ heartbeat_period(0), received_heartbeats(0), master_id(0)
{
host[0] = 0; user[0] = 0; password[0] = 0;
ssl_ca[0]= 0; ssl_capath[0]= 0; ssl_cert[0]= 0;
ssl_cipher[0]= 0; ssl_key[0]= 0;
+ my_init_dynamic_array(&ignore_server_ids, sizeof(::server_id), 16, 16);
bzero((char*) &file, sizeof(file));
pthread_mutex_init(&run_lock, MY_MUTEX_INIT_FAST);
pthread_mutex_init(&data_lock, MY_MUTEX_INIT_FAST);
@@ -47,6 +51,7 @@ Master_info::Master_info()
Master_info::~Master_info()
{
+ delete_dynamic(&ignore_server_ids);
pthread_mutex_destroy(&run_lock);
pthread_mutex_destroy(&data_lock);
pthread_cond_destroy(&data_cond);
@@ -54,6 +59,43 @@ Master_info::~Master_info()
pthread_cond_destroy(&stop_cond);
}
+/**
+ A comparison function to be supplied as argument to @c sort_dynamic()
+ and @c bsearch()
+
+ @return -1 if first argument is less, 0 if it equal to, 1 if it is greater
+ than the second
+*/
+int change_master_server_id_cmp(ulong *id1, ulong *id2)
+{
+ return *id1 < *id2? -1 : (*id1 > *id2? 1 : 0);
+}
+
+
+/**
+ Reports if the s_id server has been configured to ignore events
+ it generates with
+
+ CHANGE MASTER IGNORE_SERVER_IDS= ( list of server ids )
+
+ Method is called from the io thread event receiver filtering.
+
+ @param s_id the master server identifier
+
+ @retval TRUE if s_id is in the list of ignored master servers,
+ @retval FALSE otherwise.
+ */
+bool Master_info::shall_ignore_server_id(ulong s_id)
+{
+ if (likely(ignore_server_ids.elements == 1))
+ return (* (ulong*) dynamic_array_ptr(&ignore_server_ids, 0)) == s_id;
+ else
+ return bsearch((const ulong *) &s_id,
+ ignore_server_ids.buffer,
+ ignore_server_ids.elements, sizeof(ulong),
+ (int (*) (const void*, const void*)) change_master_server_id_cmp)
+ != NULL;
+}
void init_master_info_with_options(Master_info* mi)
{
@@ -84,6 +126,17 @@ void init_master_info_with_options(Master_info* mi)
strmake(mi->ssl_key, master_ssl_key, sizeof(mi->ssl_key)-1);
/* Intentionally init ssl_verify_server_cert to 0, no option available */
mi->ssl_verify_server_cert= 0;
+ /*
+ always request heartbeat unless master_heartbeat_period is set
+ explicitly zero. Here is the default value for heartbeat period
+ if CHANGE MASTER did not specify it. (no data loss in conversion
+ as hb period has a max)
+ */
+ mi->heartbeat_period= (float) min(SLAVE_MAX_HEARTBEAT_PERIOD,
+ (slave_net_timeout/2.0));
+ DBUG_ASSERT(mi->heartbeat_period > (float) 0.001
+ || mi->heartbeat_period == 0);
+
DBUG_VOID_RETURN;
}
@@ -93,9 +146,12 @@ enum {
/* 5.1.16 added value of master_ssl_verify_server_cert */
LINE_FOR_MASTER_SSL_VERIFY_SERVER_CERT= 15,
-
+ /* 6.0 added value of master_heartbeat_period */
+ LINE_FOR_MASTER_HEARTBEAT_PERIOD= 16,
+ /* 6.0 added value of master_ignore_server_id */
+ LINE_FOR_REPLICATE_IGNORE_SERVER_IDS= 17,
/* Number of lines currently used when saving master info file */
- LINES_IN_MASTER_INFO= LINE_FOR_MASTER_SSL_VERIFY_SERVER_CERT
+ LINES_IN_MASTER_INFO= LINE_FOR_REPLICATE_IGNORE_SERVER_IDS
};
int init_master_info(Master_info* mi, const char* master_info_fname,
@@ -197,6 +253,7 @@ file '%s')", fname);
mi->fd = fd;
int port, connect_retry, master_log_pos, lines;
int ssl= 0, ssl_verify_server_cert= 0;
+ float master_heartbeat_period= 0.0;
char *first_non_digit;
/*
@@ -281,7 +338,23 @@ file '%s')", fname);
if (lines >= LINE_FOR_MASTER_SSL_VERIFY_SERVER_CERT &&
init_intvar_from_file(&ssl_verify_server_cert, &mi->file, 0))
goto errwithmsg;
-
+ /*
+ Starting from 6.0 master_heartbeat_period might be
+ in the file
+ */
+ if (lines >= LINE_FOR_MASTER_HEARTBEAT_PERIOD &&
+ init_floatvar_from_file(&master_heartbeat_period, &mi->file, 0.0))
+ goto errwithmsg;
+ /*
+ Starting from 6.0 list of server_id of ignorable servers might be
+ in the file
+ */
+ if (lines >= LINE_FOR_REPLICATE_IGNORE_SERVER_IDS &&
+ init_dynarray_intvar_from_file(&mi->ignore_server_ids, &mi->file))
+ {
+ sql_print_error("Failed to initialize master info ignore_server_ids");
+ goto errwithmsg;
+ }
}
#ifndef HAVE_OPENSSL
@@ -300,6 +373,7 @@ file '%s')", fname);
mi->connect_retry= (uint) connect_retry;
mi->ssl= (my_bool) ssl;
mi->ssl_verify_server_cert= ssl_verify_server_cert;
+ mi->heartbeat_period= master_heartbeat_period;
}
DBUG_PRINT("master_info",("log_file_name: %s position: %ld",
mi->master_log_name,
@@ -310,6 +384,7 @@ file '%s')", fname);
goto err;
mi->inited = 1;
+ mi->rli.is_relay_log_recovery= FALSE;
// now change cache READ -> WRITE - must do this before flush_master_info
reinit_io_cache(&mi->file, WRITE_CACHE, 0L, 0, 1);
if ((error=test(flush_master_info(mi, 1))))
@@ -342,6 +417,7 @@ int flush_master_info(Master_info* mi, bool flush_relay_log_cache)
{
IO_CACHE* file = &mi->file;
char lbuf[22];
+ int err= 0;
DBUG_ENTER("flush_master_info");
DBUG_PRINT("enter",("master_pos: %ld", (long) mi->master_log_pos));
@@ -358,9 +434,35 @@ int flush_master_info(Master_info* mi, bool flush_relay_log_cache)
When we come to this place in code, relay log may or not be initialized;
the caller is responsible for setting 'flush_relay_log_cache' accordingly.
*/
- if (flush_relay_log_cache &&
- flush_io_cache(mi->rli.relay_log.get_log_file()))
- DBUG_RETURN(2);
+ if (flush_relay_log_cache)
+ {
+ IO_CACHE *log_file= mi->rli.relay_log.get_log_file();
+ if (flush_io_cache(log_file))
+ DBUG_RETURN(2);
+ }
+
+ /*
+ produce a line listing the total number and all the ignored server_id:s
+ */
+ char* ignore_server_ids_buf;
+ {
+ ignore_server_ids_buf=
+ (char *) my_malloc((sizeof(::server_id) * 3 + 1) *
+ (1 + mi->ignore_server_ids.elements), MYF(MY_WME));
+ if (!ignore_server_ids_buf)
+ DBUG_RETURN(1);
+ for (ulong i= 0, cur_len= my_sprintf(ignore_server_ids_buf,
+ (ignore_server_ids_buf, "%u",
+ mi->ignore_server_ids.elements));
+ i < mi->ignore_server_ids.elements; i++)
+ {
+ ulong s_id;
+ get_dynamic(&mi->ignore_server_ids, (uchar*) &s_id, i);
+ cur_len +=my_sprintf(ignore_server_ids_buf + cur_len,
+ (ignore_server_ids_buf + cur_len,
+ " %lu", s_id));
+ }
+ }
/*
We flushed the relay log BEFORE the master.info file, because if we crash
@@ -378,17 +480,27 @@ int flush_master_info(Master_info* mi, bool flush_relay_log_cache)
contents of file). But because of number of lines in the first line
of file we don't care about this garbage.
*/
-
+ char heartbeat_buf[sizeof(mi->heartbeat_period) * 4]; // buffer to suffice always
+ my_sprintf(heartbeat_buf, (heartbeat_buf, "%.3f", mi->heartbeat_period));
my_b_seek(file, 0L);
my_b_printf(file,
- "%u\n%s\n%s\n%s\n%s\n%s\n%d\n%d\n%d\n%s\n%s\n%s\n%s\n%s\n%d\n",
+ "%u\n%s\n%s\n%s\n%s\n%s\n%d\n%d\n%d\n%s\n%s\n%s\n%s\n%s\n%d\n%s\n%s\n",
LINES_IN_MASTER_INFO,
mi->master_log_name, llstr(mi->master_log_pos, lbuf),
mi->host, mi->user,
mi->password, mi->port, mi->connect_retry,
(int)(mi->ssl), mi->ssl_ca, mi->ssl_capath, mi->ssl_cert,
- mi->ssl_cipher, mi->ssl_key, mi->ssl_verify_server_cert);
- DBUG_RETURN(-flush_io_cache(file));
+ mi->ssl_cipher, mi->ssl_key, mi->ssl_verify_server_cert,
+ heartbeat_buf, ignore_server_ids_buf);
+ my_free(ignore_server_ids_buf, MYF(0));
+ err= flush_io_cache(file);
+ if (sync_masterinfo_period && !err &&
+ ++(mi->sync_counter) >= sync_masterinfo_period)
+ {
+ err= my_sync(mi->fd, MYF(MY_WME));
+ mi->sync_counter= 0;
+ }
+ DBUG_RETURN(-err);
}
diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h
index 93fb0a98198..f822a6bc1b1 100644
--- a/sql/rpl_mi.h
+++ b/sql/rpl_mi.h
@@ -20,6 +20,7 @@
#include "rpl_rli.h"
#include "rpl_reporting.h"
+#include "my_sys.h"
/*****************************************************************************
@@ -58,8 +59,9 @@
class Master_info : public Slave_reporting_capability
{
public:
- Master_info();
+ Master_info(bool is_slave_recovery);
~Master_info();
+ bool shall_ignore_server_id(ulong s_id);
/* the variables below are needed because we can change masters on the fly */
char master_log_name[FN_REFLEN];
@@ -100,6 +102,16 @@ class Master_info : public Slave_reporting_capability
*/
long clock_diff_with_master;
+ /*
+ Keeps track of the number of events before fsyncing.
+ The option --sync-master-info determines how many
+ events should happen before fsyncing.
+ */
+ uint sync_counter;
+ float heartbeat_period; // interface with CHANGE MASTER or master.info
+ ulonglong received_heartbeats; // counter of received heartbeat events
+ DYNAMIC_ARRAY ignore_server_ids;
+ ulong master_id;
};
void init_master_info_with_options(Master_info* mi);
@@ -109,6 +121,7 @@ int init_master_info(Master_info* mi, const char* master_info_fname,
int thread_mask);
void end_master_info(Master_info* mi);
int flush_master_info(Master_info* mi, bool flush_relay_log_cache);
+int change_master_server_id_cmp(ulong *id1, ulong *id2);
#endif /* HAVE_REPLICATION */
#endif /* RPL_MI_H */
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
index a26717d7acf..a1403d2ff71 100644
--- a/sql/rpl_rli.cc
+++ b/sql/rpl_rli.cc
@@ -28,12 +28,13 @@ int init_intvar_from_file(int* var, IO_CACHE* f, int default_val);
int init_strvar_from_file(char *var, int max_size, IO_CACHE *f,
const char *default_val);
-
-Relay_log_info::Relay_log_info()
+Relay_log_info::Relay_log_info(bool is_slave_recovery)
:Slave_reporting_capability("SQL"),
no_storage(FALSE), replicate_same_server_id(::replicate_same_server_id),
- info_fd(-1), cur_log_fd(-1), save_temporary_tables(0),
- cur_log_old_open_count(0), group_relay_log_pos(0), event_relay_log_pos(0),
+ info_fd(-1), cur_log_fd(-1), relay_log(&sync_relaylog_period),
+ sync_counter(0), is_relay_log_recovery(is_slave_recovery),
+ save_temporary_tables(0), cur_log_old_open_count(0), group_relay_log_pos(0),
+ event_relay_log_pos(0),
#if HAVE_purify
is_fake(FALSE),
#endif
@@ -207,7 +208,7 @@ a file name for --relay-log-index option", opt_relaylog_index_name);
{
sql_print_error("Failed to create a new relay log info file (\
file '%s', errno %d)", fname, my_errno);
- msg= current_thd->main_da.message();
+ msg= current_thd->stmt_da->message();
goto err;
}
if (init_io_cache(&rli->info_file, info_fd, IO_SIZE*2, READ_CACHE, 0L,0,
@@ -215,7 +216,7 @@ file '%s', errno %d)", fname, my_errno);
{
sql_print_error("Failed to create a cache on relay log info file '%s'",
fname);
- msg= current_thd->main_da.message();
+ msg= current_thd->stmt_da->message();
goto err;
}
@@ -282,6 +283,9 @@ Failed to open the existing relay log info file '%s' (errno %d)",
rli->group_relay_log_pos= rli->event_relay_log_pos= relay_log_pos;
rli->group_master_log_pos= master_log_pos;
+ if (rli->is_relay_log_recovery && init_recovery(rli->mi, &msg))
+ goto err;
+
if (init_relay_log_pos(rli,
rli->group_relay_log_name,
rli->group_relay_log_pos,
@@ -313,7 +317,10 @@ Failed to open the existing relay log info file '%s' (errno %d)",
*/
reinit_io_cache(&rli->info_file, WRITE_CACHE,0L,0,1);
if ((error= flush_relay_log_info(rli)))
- sql_print_error("Failed to flush relay log info file");
+ {
+ msg= "Failed to flush relay log info file";
+ goto err;
+ }
if (count_relay_log_space(rli))
{
msg="Error counting relay log space";
@@ -1212,7 +1219,6 @@ void Relay_log_info::cleanup_context(THD *thd, bool error)
*/
thd->options&= ~OPTION_NO_FOREIGN_KEY_CHECKS;
thd->options&= ~OPTION_RELAXED_UNIQUE_CHECKS;
- last_event_start_time= 0;
DBUG_VOID_RETURN;
}
diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h
index 171778d9675..fd36d18adae 100644
--- a/sql/rpl_rli.h
+++ b/sql/rpl_rli.h
@@ -96,6 +96,19 @@ public:
LOG_INFO linfo;
IO_CACHE cache_buf,*cur_log;
+ /*
+ Keeps track of the number of transactions that commits
+ before fsyncing. The option --sync-relay-log-info determines
+ how many transactions should commit before fsyncing.
+ */
+ uint sync_counter;
+
+ /*
+ Identifies when the recovery process is going on.
+ See sql/slave.cc:init_recovery for further details.
+ */
+ bool is_relay_log_recovery;
+
/* The following variables are safe to read any time */
/* IO_CACHE of the info file - set only during init or end */
@@ -267,7 +280,7 @@ public:
char slave_patternload_file[FN_REFLEN];
size_t slave_patternload_file_size;
- Relay_log_info();
+ Relay_log_info(bool is_slave_recovery);
~Relay_log_info();
/*
@@ -336,12 +349,10 @@ public:
void clear_tables_to_lock();
/*
- Used by row-based replication to detect that it should not stop at
- this event, but give it a chance to send more events. The time
- where the last event inside a group started is stored here. If the
- variable is zero, we are not in a group (but may be in a
- transaction).
- */
+ Used to defer stopping the SQL thread to give it a chance
+ to finish up the current group of events.
+ The timestamp is set and reset in @c sql_slave_killed().
+ */
time_t last_event_start_time;
/**
diff --git a/sql/rpl_tblmap.cc b/sql/rpl_tblmap.cc
index 6ef9a8623fe..b04a3120a86 100644
--- a/sql/rpl_tblmap.cc
+++ b/sql/rpl_tblmap.cc
@@ -34,10 +34,10 @@ table_mapping::table_mapping()
No "free_element" function for entries passed here, as the entries are
allocated in a MEM_ROOT (freed as a whole in the destructor), they cannot
be freed one by one.
- Note that below we don't test if hash_init() succeeded. This constructor
- is called at startup only.
+ Note that below we don't test if my_hash_init() succeeded. This
+ constructor is called at startup only.
*/
- (void) hash_init(&m_table_ids,&my_charset_bin,TABLE_ID_HASH_SIZE,
+ (void) my_hash_init(&m_table_ids,&my_charset_bin,TABLE_ID_HASH_SIZE,
offsetof(entry,table_id),sizeof(ulong),
0,0,0);
/* We don't preallocate any block, this is consistent with m_free=0 above */
@@ -49,7 +49,7 @@ table_mapping::~table_mapping()
#ifdef MYSQL_CLIENT
clear_tables();
#endif
- hash_free(&m_table_ids);
+ my_hash_free(&m_table_ids);
free_root(&m_mem_root, MYF(0));
}
@@ -115,7 +115,7 @@ int table_mapping::set_table(ulong table_id, TABLE* table)
#ifdef MYSQL_CLIENT
free_table_map_log_event(e->table);
#endif
- hash_delete(&m_table_ids,(uchar *)e);
+ my_hash_delete(&m_table_ids,(uchar *)e);
}
e->table_id= table_id;
e->table= table;
@@ -138,7 +138,7 @@ int table_mapping::remove_table(ulong table_id)
entry *e= find_entry(table_id);
if (e)
{
- hash_delete(&m_table_ids,(uchar *)e);
+ my_hash_delete(&m_table_ids,(uchar *)e);
/* we add this entry to the chain of free (free for use) entries */
e->next= m_free;
m_free= e;
@@ -156,7 +156,7 @@ void table_mapping::clear_tables()
DBUG_ENTER("table_mapping::clear_tables()");
for (uint i= 0; i < m_table_ids.records; i++)
{
- entry *e= (entry *)hash_element(&m_table_ids, i);
+ entry *e= (entry *)my_hash_element(&m_table_ids, i);
#ifdef MYSQL_CLIENT
free_table_map_log_event(e->table);
#endif
diff --git a/sql/rpl_tblmap.h b/sql/rpl_tblmap.h
index 3b5b10be580..a6ec8bcbc9b 100644
--- a/sql/rpl_tblmap.h
+++ b/sql/rpl_tblmap.h
@@ -18,8 +18,7 @@
/* Forward declarations */
#ifndef MYSQL_CLIENT
-struct st_table;
-typedef st_table TABLE;
+struct TABLE;
#else
class Table_map_log_event;
typedef Table_map_log_event TABLE;
@@ -91,9 +90,9 @@ private:
entry *find_entry(ulong table_id)
{
- return (entry *)hash_search(&m_table_ids,
- (uchar*)&table_id,
- sizeof(table_id));
+ return (entry *) my_hash_search(&m_table_ids,
+ (uchar*)&table_id,
+ sizeof(table_id));
}
int expand();
diff --git a/sql/scheduler.h b/sql/scheduler.h
index 46bbd300cbb..e7916031a27 100644
--- a/sql/scheduler.h
+++ b/sql/scheduler.h
@@ -1,3 +1,6 @@
+#ifndef SCHEDULER_INCLUDED
+#define SCHEDULER_INCLUDED
+
/* Copyright (C) 2007 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -58,3 +61,5 @@ enum pool_command_op
class thd_scheduler
{};
+
+#endif /* SCHEDULER_INCLUDED */
diff --git a/sql/set_var.cc b/sql/set_var.cc
index b80bdde9670..36597658077 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -125,8 +125,10 @@ static void fix_net_read_timeout(THD *thd, enum_var_type type);
static void fix_net_write_timeout(THD *thd, enum_var_type type);
static void fix_net_retry_count(THD *thd, enum_var_type type);
static void fix_max_join_size(THD *thd, enum_var_type type);
+#ifdef HAVE_QUERY_CACHE
static void fix_query_cache_size(THD *thd, enum_var_type type);
static void fix_query_cache_min_res_unit(THD *thd, enum_var_type type);
+#endif
static void fix_myisam_max_sort_file_size(THD *thd, enum_var_type type);
static void fix_max_binlog_size(THD *thd, enum_var_type type);
static void fix_max_relay_log_size(THD *thd, enum_var_type type);
@@ -302,9 +304,6 @@ static sys_var_key_cache_long sys_key_cache_division_limit(&vars, "key_cache_div
static sys_var_key_cache_long sys_key_cache_age_threshold(&vars, "key_cache_age_threshold",
offsetof(KEY_CACHE,
param_age_threshold));
-static sys_var_const sys_language(&vars, "language",
- OPT_GLOBAL, SHOW_CHAR,
- (uchar*) language);
static sys_var_const sys_large_files_support(&vars, "large_files_support",
OPT_GLOBAL, SHOW_BOOL,
(uchar*) &opt_large_files);
@@ -314,6 +313,9 @@ static sys_var_const sys_large_page_size(&vars, "large_page_size",
static sys_var_const sys_large_pages(&vars, "large_pages",
OPT_GLOBAL, SHOW_MY_BOOL,
(uchar*) &opt_large_pages);
+static sys_var_const sys_language(&vars, "lc_messages_dir",
+ OPT_GLOBAL, SHOW_CHAR,
+ (uchar*) lc_messages_dir);
static sys_var_bool_ptr sys_local_infile(&vars, "local_infile",
&opt_local_infile);
#ifdef HAVE_MLOCKALL
@@ -434,7 +436,7 @@ static sys_var_thd_enum sys_myisam_stats_method(&vars, "myisam_stats_met
&myisam_stats_method_typelib,
NULL);
-#ifdef __NT__
+#ifdef _WIN32
/* purecov: begin inspected */
static sys_var_const sys_named_pipe(&vars, "named_pipe",
OPT_GLOBAL, SHOW_MY_BOOL,
@@ -493,9 +495,6 @@ static sys_var_thd_ulong sys_div_precincrement(&vars, "div_precision_increment",
&SV::div_precincrement);
static sys_var_long_ptr sys_rpl_recovery_rank(&vars, "rpl_recovery_rank",
&rpl_recovery_rank);
-static sys_var_long_ptr sys_query_cache_size(&vars, "query_cache_size",
- &query_cache_size,
- fix_query_cache_size);
static sys_var_thd_ulong sys_range_alloc_block_size(&vars, "range_alloc_block_size",
&SV::range_alloc_block_size);
@@ -557,14 +556,20 @@ sys_var_enum_const sys_thread_handling(&vars, "thread_handling",
NULL);
#ifdef HAVE_QUERY_CACHE
+static sys_var_long_ptr sys_query_cache_size(&vars, "query_cache_size",
+ &query_cache_size,
+ fix_query_cache_size);
static sys_var_long_ptr sys_query_cache_limit(&vars, "query_cache_limit",
- &query_cache.query_cache_limit);
-static sys_var_long_ptr sys_query_cache_min_res_unit(&vars, "query_cache_min_res_unit",
- &query_cache_min_res_unit,
- fix_query_cache_min_res_unit);
+ &query_cache.query_cache_limit);
+static sys_var_long_ptr
+ sys_query_cache_min_res_unit(&vars, "query_cache_min_res_unit",
+ &query_cache_min_res_unit,
+ fix_query_cache_min_res_unit);
+static int check_query_cache_type(THD *thd, set_var *var);
static sys_var_thd_enum sys_query_cache_type(&vars, "query_cache_type",
&SV::query_cache_type,
- &query_cache_type_typelib);
+ &query_cache_type_typelib, NULL,
+ check_query_cache_type);
static sys_var_thd_bool
sys_query_cache_wlock_invalidate(&vars, "query_cache_wlock_invalidate",
&SV::query_cache_wlock_invalidate);
@@ -768,7 +773,7 @@ static sys_var_thd_bit sys_unique_checks(&vars, "unique_checks", 0,
OPTION_RELAXED_UNIQUE_CHECKS,
1,
sys_var::SESSION_VARIABLE_IN_BINLOG);
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
static sys_var_thd_bit sys_profiling(&vars, "profiling", NULL,
set_option_bit,
ulonglong(OPTION_PROFILING));
@@ -792,6 +797,9 @@ sys_last_insert_id(&vars, "last_insert_id",
static sys_var_last_insert_id
sys_identity(&vars, "identity", sys_var::SESSION_VARIABLE_IN_BINLOG);
+static sys_var_thd_lc_messages
+sys_lc_messages(&vars, "lc_messages", sys_var::NOT_IN_BINLOG);
+
static sys_var_thd_lc_time_names
sys_lc_time_names(&vars, "lc_time_names", sys_var::SESSION_VARIABLE_IN_BINLOG);
@@ -870,9 +878,9 @@ static sys_var_have_plugin sys_have_ndbcluster(&vars, "have_ndbcluster", C_STRIN
static sys_var_have_variable sys_have_openssl(&vars, "have_openssl", &have_ssl);
static sys_var_have_variable sys_have_ssl(&vars, "have_ssl", &have_ssl);
static sys_var_have_plugin sys_have_partition_db(&vars, "have_partitioning", C_STRING_WITH_LEN("partition"), MYSQL_STORAGE_ENGINE_PLUGIN);
+static sys_var_have_variable sys_have_profiling(&vars, "have_profiling", &have_profiling);
static sys_var_have_variable sys_have_query_cache(&vars, "have_query_cache",
&have_query_cache);
-static sys_var_have_variable sys_have_community_features(&vars, "have_community_features", &have_community_features);
static sys_var_have_variable sys_have_rtree_keys(&vars, "have_rtree_keys", &have_rtree_keys);
static sys_var_have_variable sys_have_symlink(&vars, "have_symlink", &have_symlink);
/* Global read-only variable describing server license */
@@ -913,8 +921,10 @@ bool sys_var_str::check(THD *thd, set_var *var)
return 0;
if ((res=(*check_func)(thd, var)) < 0)
- my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0),
- name, var->value->str_value.ptr());
+ {
+ ErrConvString err(&var->value->str_value);
+ my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, err.ptr());
+ }
return res;
}
@@ -1126,10 +1136,9 @@ static void fix_net_retry_count(THD *thd __attribute__((unused)),
{}
#endif /* HAVE_REPLICATION */
-
+#ifdef HAVE_QUERY_CACHE
static void fix_query_cache_size(THD *thd, enum_var_type type)
{
-#ifdef HAVE_QUERY_CACHE
ulong new_cache_size= query_cache.resize(query_cache_size);
/*
@@ -1143,11 +1152,35 @@ static void fix_query_cache_size(THD *thd, enum_var_type type)
query_cache_size, new_cache_size);
query_cache_size= new_cache_size;
-#endif
}
-#ifdef HAVE_QUERY_CACHE
+/**
+ Trigger before query_cache_type variable is updated.
+ @param thd Thread handler
+ @param var Pointer to the new variable status
+
+ @return Status code
+ @retval 1 Failure
+ @retval 0 Success
+*/
+
+static int check_query_cache_type(THD *thd, set_var *var)
+{
+ /*
+ Don't allow changes of the query_cache_type if the query cache
+ is disabled.
+ */
+ if (query_cache.is_disabled())
+ {
+ my_error(ER_QUERY_CACHE_DISABLED,MYF(0));
+ return 1;
+ }
+
+ return 0;
+}
+
+
static void fix_query_cache_min_res_unit(THD *thd, enum_var_type type)
{
query_cache_min_res_unit=
@@ -1541,6 +1574,23 @@ static bool get_unsigned(THD *thd, set_var *var, ulonglong user_max,
}
+bool sys_var_uint_ptr::check(THD *thd, set_var *var)
+{
+ var->save_result.ulong_value= (ulong) var->value->val_uint();
+ return 0;
+}
+
+bool sys_var_uint_ptr::update(THD *thd, set_var *var)
+{
+ *value= (uint) var->save_result.ulong_value;
+ return 0;
+}
+
+void sys_var_uint_ptr::set_default(THD *thd, enum_var_type type)
+{
+ *value= (uint) option_limits->def_value;
+}
+
sys_var_long_ptr::
sys_var_long_ptr(sys_var_chain *chain, const char *name_arg, ulong *value_ptr_arg,
sys_after_update_func after_update_arg)
@@ -1802,9 +1852,15 @@ bool sys_var::check_enum(THD *thd, set_var *var, const TYPELIB *enum_names)
if (!(res=var->value->val_str(&str)) ||
((long) (var->save_result.ulong_value=
(ulong) find_type(enum_names, res->ptr(),
- res->length(),1)-1)) < 0)
+ res->length(), FALSE) - 1)) < 0)
{
- value= res ? res->c_ptr() : "NULL";
+ if (res)
+ {
+ ErrConvString err(res);
+ my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, err.ptr());
+ return 1;
+ }
+ value= "NULL";
goto err;
}
}
@@ -1857,8 +1913,9 @@ bool sys_var::check_set(THD *thd, set_var *var, TYPELIB *enum_names)
&not_used));
if (error_len)
{
- strmake(buff, error, min(sizeof(buff) - 1, error_len));
- goto err;
+ ErrConvString err(error, error_len, res->charset());
+ my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, err.ptr());
+ return 1;
}
}
else
@@ -2006,7 +2063,8 @@ bool sys_var_thd_date_time_format::check(THD *thd, set_var *var)
if (!(format= date_time_format_make(date_time_type,
res->ptr(), res->length())))
{
- my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, res->c_ptr());
+ ErrConvString err(res);
+ my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, err.ptr());
return 1;
}
@@ -2110,7 +2168,8 @@ bool sys_var_collation::check(THD *thd, set_var *var)
}
if (!(tmp=get_charset_by_name(res->c_ptr(),MYF(0))))
{
- my_error(ER_UNKNOWN_COLLATION, MYF(0), res->c_ptr());
+ ErrConvString err(res);
+ my_error(ER_UNKNOWN_COLLATION, MYF(0), err.ptr());
return 1;
}
}
@@ -2150,7 +2209,8 @@ bool sys_var_character_set::check(THD *thd, set_var *var)
else if (!(tmp=get_charset_by_csname(res->c_ptr(),MY_CS_PRIMARY,MYF(0))) &&
!(tmp=get_old_charset_by_name(res->c_ptr())))
{
- my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), res->c_ptr());
+ ErrConvString err(res);
+ my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), err.ptr());
return 1;
}
}
@@ -2541,9 +2601,20 @@ bool update_sys_var_str_path(THD *thd, sys_var_str *var_str,
{
MYSQL_QUERY_LOG *file_log;
char buff[FN_REFLEN];
- char *res= 0, *old_value=(char *)(var ? var->value->str_value.ptr() : 0);
+ char *res= 0, *old_value= 0;
bool result= 0;
- uint str_length= (var ? var->value->str_value.length() : 0);
+ uint str_length= 0;
+
+ if (var)
+ {
+ String str(buff, sizeof(buff), system_charset_info), *newval;
+
+ newval= var->value->val_str(&str);
+ old_value= newval->c_ptr_safe();
+ str_length= strlen(old_value);
+ }
+
+
switch (log_type) {
case QUERY_LOG_SLOW:
@@ -2912,7 +2983,7 @@ bool sys_var_thd_ulong_session_readonly::check(THD *thd, set_var *var)
}
-bool sys_var_thd_lc_time_names::check(THD *thd, set_var *var)
+static MY_LOCALE *check_locale(THD *thd, const char *name, set_var *var)
{
MY_LOCALE *locale_match;
@@ -2922,29 +2993,38 @@ bool sys_var_thd_lc_time_names::check(THD *thd, set_var *var)
{
char buf[20];
int10_to_str((int) var->value->val_int(), buf, -10);
- my_printf_error(ER_UNKNOWN_ERROR, "Unknown locale: '%s'", MYF(0), buf);
- return 1;
+ my_printf_error(ER_UNKNOWN_LOCALE, ER(ER_UNKNOWN_LOCALE), MYF(0), buf);
+ return 0;
}
}
else // STRING_RESULT
{
char buff[6];
- String str(buff, sizeof(buff), &my_charset_latin1), *res;
+ String str(buff, sizeof(buff), system_charset_info), *res;
if (!(res=var->value->val_str(&str)))
{
my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, "NULL");
- return 1;
+ return 0;
}
const char *locale_str= res->c_ptr();
if (!(locale_match= my_locale_by_name(locale_str)))
{
- my_printf_error(ER_UNKNOWN_ERROR,
- "Unknown locale: '%s'", MYF(0), locale_str);
- return 1;
+ my_printf_error(ER_UNKNOWN_LOCALE, ER(ER_UNKNOWN_LOCALE),
+ MYF(0), locale_str);
+ return 0;
}
}
- var->save_result.locale_value= locale_match;
+ return var->save_result.locale_value= locale_match;
+}
+
+
+bool sys_var_thd_lc::check(THD *thd, set_var *var)
+{
+ MY_LOCALE *locale_match;
+
+ if (!(locale_match= check_locale(thd, name, var)))
+ return 1;
return 0;
}
@@ -2976,6 +3056,56 @@ void sys_var_thd_lc_time_names::set_default(THD *thd, enum_var_type type)
thd->variables.lc_time_names= global_system_variables.lc_time_names;
}
+
+bool sys_var_thd_lc_messages::update(THD *thd, set_var *var)
+{
+ MY_LOCALE *locale= var->save_result.locale_value;
+
+ if (!locale->errmsgs->errmsgs)
+ {
+ pthread_mutex_lock(&LOCK_error_messages);
+ if (!locale->errmsgs->errmsgs &&
+ read_texts(ERRMSG_FILE, locale->errmsgs->language,
+ &locale->errmsgs->errmsgs,
+ ER_ERROR_LAST - ER_ERROR_FIRST + 1))
+ {
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_UNKNOWN_ERROR,
+ "Can't process error message file for locale '%s'",
+ locale->name);
+ pthread_mutex_unlock(&LOCK_error_messages);
+ return 0;
+ }
+ pthread_mutex_unlock(&LOCK_error_messages);
+ }
+
+ if (var->type == OPT_GLOBAL)
+ global_system_variables.lc_messages= locale;
+ else
+ thd->variables.lc_messages= locale;
+
+ return 0;
+}
+
+
+uchar *sys_var_thd_lc_messages::value_ptr(THD *thd, enum_var_type type,
+ LEX_STRING *base)
+{
+ return type == OPT_GLOBAL ?
+ (uchar *) global_system_variables.lc_messages->name :
+ (uchar *) thd->variables.lc_messages->name;
+}
+
+
+void sys_var_thd_lc_messages::set_default(THD *thd, enum_var_type type)
+{
+ if (type == OPT_GLOBAL)
+ global_system_variables.lc_messages= my_default_lc_messages;
+ else
+ thd->variables.lc_messages= global_system_variables.lc_messages;
+}
+
+
/*
Handling of microseoncds given as seconds.part_seconds
@@ -3061,6 +3191,15 @@ static bool set_option_autocommit(THD *thd, set_var *var)
ulonglong org_options= thd->options;
+ /*
+ If we are setting AUTOCOMMIT=1 and it was not already 1, then we
+ need to commit any outstanding transactions.
+ */
+ if (var->save_result.ulong_value != 0 &&
+ (thd->options & OPTION_NOT_AUTOCOMMIT) &&
+ ha_commit(thd))
+ return 1;
+
if (var->save_result.ulong_value != 0)
thd->options&= ~((sys_var_thd_bit*) var->var)->bit_flag;
else
@@ -3074,8 +3213,6 @@ static bool set_option_autocommit(THD *thd, set_var *var)
thd->options&= ~(ulonglong) (OPTION_BEGIN | OPTION_KEEP_LOG);
thd->transaction.all.modified_non_trans_table= FALSE;
thd->server_status|= SERVER_STATUS_AUTOCOMMIT;
- if (ha_commit(thd))
- return 1;
}
else
{
@@ -3139,17 +3276,13 @@ static int check_pseudo_thread_id(THD *thd, set_var *var)
static uchar *get_warning_count(THD *thd)
{
- thd->sys_var_tmp.long_value=
- (thd->warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_NOTE] +
- thd->warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_ERROR] +
- thd->warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_WARN]);
+ thd->sys_var_tmp.long_value= thd->warning_info->warn_count();
return (uchar*) &thd->sys_var_tmp.long_value;
}
static uchar *get_error_count(THD *thd)
{
- thd->sys_var_tmp.long_value=
- thd->warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_ERROR];
+ thd->sys_var_tmp.long_value= thd->warning_info->error_count();
return (uchar*) &thd->sys_var_tmp.long_value;
}
@@ -3259,7 +3392,7 @@ int mysql_add_sys_var_chain(sys_var *first, struct my_option *long_options)
error:
for (; first != var; first= first->next)
- hash_delete(&system_variable_hash, (uchar*) first);
+ my_hash_delete(&system_variable_hash, (uchar*) first);
return 1;
}
@@ -3283,7 +3416,7 @@ int mysql_del_sys_var_chain(sys_var *first)
/* A write lock should be held on LOCK_system_variables_hash */
for (sys_var *var= first; var; var= var->next)
- result|= hash_delete(&system_variable_hash, (uchar*) var);
+ result|= my_hash_delete(&system_variable_hash, (uchar*) var);
return result;
}
@@ -3320,7 +3453,7 @@ SHOW_VAR* enumerate_sys_vars(THD *thd, bool sorted)
for (i= 0; i < count; i++)
{
- sys_var *var= (sys_var*) hash_element(&system_variable_hash, i);
+ sys_var *var= (sys_var*) my_hash_element(&system_variable_hash, i);
show->name= var->name;
show->value= (char*) var;
show->type= SHOW_SYS;
@@ -3357,8 +3490,8 @@ int set_var_init()
for (sys_var *var=vars.first; var; var= var->next, count++) ;
- if (hash_init(&system_variable_hash, system_charset_info, count, 0,
- 0, (hash_get_key) get_sys_var_length, 0, HASH_UNIQUE))
+ if (my_hash_init(&system_variable_hash, system_charset_info, count, 0,
+ 0, (my_hash_get_key) get_sys_var_length, 0, HASH_UNIQUE))
goto error;
vars.last->next= NULL;
@@ -3383,7 +3516,7 @@ error:
void set_var_free()
{
- hash_free(&system_variable_hash);
+ my_hash_free(&system_variable_hash);
}
@@ -3409,7 +3542,7 @@ sys_var *intern_find_sys_var(const char *str, uint length, bool no_error)
This function is only called from the sql_plugin.cc.
A lock on LOCK_system_variable_hash should be held
*/
- var= (sys_var*) hash_search(&system_variable_hash,
+ var= (sys_var*) my_hash_search(&system_variable_hash,
(uchar*) str, length ? length : strlen(str));
if (!(var || no_error))
my_error(ER_UNKNOWN_SYSTEM_VARIABLE, MYF(0), (char*) str);
@@ -3496,6 +3629,16 @@ bool not_all_support_one_shot(List<set_var_base> *var_list)
Functions to handle SET mysql_internal_variable=const_expr
*****************************************************************************/
+/**
+ Verify that the supplied value is correct.
+
+ @param thd Thread handler
+
+ @return status code
+ @retval -1 Failure
+ @retval 0 Success
+*/
+
int set_var::check(THD *thd)
{
if (var->is_readonly())
@@ -3923,7 +4066,8 @@ ulong fix_sql_mode(ulong sql_mode)
if (sql_mode & MODE_TRADITIONAL)
sql_mode|= (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES |
MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE |
- MODE_ERROR_FOR_DIVISION_BY_ZERO | MODE_NO_AUTO_CREATE_USER);
+ MODE_ERROR_FOR_DIVISION_BY_ZERO | MODE_NO_AUTO_CREATE_USER |
+ MODE_NO_ENGINE_SUBSTITUTION);
return sql_mode;
}
@@ -4001,8 +4145,9 @@ bool sys_var_thd_optimizer_switch::check(THD *thd, set_var *var)
&error, &error_len, &not_used);
if (error_len)
{
- strmake(buff, error, min(sizeof(buff) - 1, error_len));
- goto err;
+ ErrConvString err(error, error_len, res->charset());
+ my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, err.ptr());
+ return TRUE;
}
return FALSE;
err:
diff --git a/sql/set_var.h b/sql/set_var.h
index fa747107870..c08097521d2 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -1,3 +1,6 @@
+#ifndef SET_VAR_INCLUDED
+#define SET_VAR_INCLUDED
+
/* Copyright (C) 2002-2006 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -172,6 +175,27 @@ public:
{ return (uchar*) value; }
};
+/**
+ Unsigned int system variable class
+ */
+class sys_var_uint_ptr :public sys_var
+{
+public:
+ sys_var_uint_ptr(sys_var_chain *chain, const char *name_arg,
+ uint *value_ptr_arg,
+ sys_after_update_func after_update_arg= NULL)
+ :sys_var(name_arg, after_update_arg),
+ value(value_ptr_arg)
+ { chain_sys_var(chain); }
+ bool check(THD *thd, set_var *var);
+ bool update(THD *thd, set_var *var);
+ void set_default(THD *thd, enum_var_type type);
+ SHOW_TYPE show_type() { return SHOW_INT; }
+ uchar *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base)
+ { return (uchar*) value; }
+private:
+ uint *value;
+};
/*
A global ulong variable that is protected by LOCK_global_system_variables
@@ -519,10 +543,16 @@ public:
{ chain_sys_var(chain); }
bool check(THD *thd, set_var *var)
{
- int ret= 0;
- if (check_func)
- ret= (*check_func)(thd, var);
- return ret ? ret : check_enum(thd, var, enum_names);
+ /*
+ check_enum fails if the character representation supplied was wrong
+ or that the integer value was wrong or missing.
+ */
+ if (check_enum(thd, var, enum_names))
+ return TRUE;
+ else if ((check_func && (*check_func)(thd, var)))
+ return TRUE;
+ else
+ return FALSE;
}
bool update(THD *thd, set_var *var);
void set_default(THD *thd, enum_var_type type);
@@ -1216,11 +1246,12 @@ public:
};
-class sys_var_thd_lc_time_names :public sys_var_thd
+
+class sys_var_thd_lc: public sys_var_thd
{
public:
- sys_var_thd_lc_time_names(sys_var_chain *chain, const char *name_arg,
- Binlog_status_enum binlog_status_arg= NOT_IN_BINLOG)
+ sys_var_thd_lc(sys_var_chain *chain, const char *name_arg,
+ Binlog_status_enum binlog_status_arg= NOT_IN_BINLOG)
: sys_var_thd(name_arg, NULL, binlog_status_arg)
{
#if MYSQL_VERSION_ID < 50000
@@ -1235,11 +1266,35 @@ public:
return ((type != STRING_RESULT) && (type != INT_RESULT));
}
bool check_default(enum_var_type type) { return 0; }
+};
+
+
+class sys_var_thd_lc_time_names :public sys_var_thd_lc
+{
+public:
+ sys_var_thd_lc_time_names(sys_var_chain *chain_arg, const char *name_arg,
+ Binlog_status_enum binlog_status_arg= NOT_IN_BINLOG)
+ : sys_var_thd_lc(chain_arg, name_arg, binlog_status_arg)
+ {}
bool update(THD *thd, set_var *var);
uchar *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
- virtual void set_default(THD *thd, enum_var_type type);
+ void set_default(THD *thd, enum_var_type type);
+};
+
+
+class sys_var_thd_lc_messages :public sys_var_thd_lc
+{
+public:
+ sys_var_thd_lc_messages(sys_var_chain *chain_arg, const char *name_arg,
+ Binlog_status_enum binlog_status_arg= NOT_IN_BINLOG)
+ : sys_var_thd_lc(chain_arg, name_arg, binlog_status_arg)
+ {}
+ bool update(THD *thd, set_var *var);
+ uchar *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
+ void set_default(THD *thd, enum_var_type type);
};
+
#ifdef HAVE_EVENT_SCHEDULER
class sys_var_event_scheduler :public sys_var_long_ptr
{
@@ -1322,8 +1377,8 @@ public:
{
Item_field *item= (Item_field*) value_arg;
if (!(value=new Item_string(item->field_name,
- (uint) strlen(item->field_name),
- item->collation.collation)))
+ (uint) strlen(item->field_name),
+ system_charset_info)))
value=value_arg; /* Give error message later */
}
else
@@ -1465,3 +1520,5 @@ void free_key_cache(const char *name, KEY_CACHE *key_cache);
bool process_key_caches(process_key_cache_t func);
void delete_elements(I_List<NAMED_LIST> *list,
void (*free_element)(const char*, uchar*));
+
+#endif /* SET_VAR_INCLUDED */
diff --git a/sql/share/Makefile.am b/sql/share/Makefile.am
index 68b393e619f..06b349d5de2 100644
--- a/sql/share/Makefile.am
+++ b/sql/share/Makefile.am
@@ -15,7 +15,7 @@
## Process this file with automake to create Makefile.in
-EXTRA_DIST= errmsg.txt
+EXTRA_DIST= errmsg-utf8.txt
dist-hook:
for dir in charsets @AVAILABLE_LANGUAGES@; do \
@@ -31,7 +31,7 @@ all-local: english/errmsg.sys
# Use the english errmsg.sys as a flag that all errmsg.sys needs to be
# created. Normally these are created by extra/Makefile
-english/errmsg.sys: errmsg.txt
+english/errmsg.sys: errmsg-utf8.txt
rm -f $(top_builddir)/include/mysqld_error.h
(cd $(top_builddir)/extra && $(MAKE))
@@ -43,8 +43,8 @@ install-data-local:
$(DESTDIR)$(pkgdatadir)/$$lang/errmsg.sys; \
done
$(mkinstalldirs) $(DESTDIR)$(pkgdatadir)/charsets
- $(INSTALL_DATA) $(srcdir)/errmsg.txt \
- $(DESTDIR)$(pkgdatadir)/errmsg.txt; \
+ $(INSTALL_DATA) $(srcdir)/errmsg-utf8.txt \
+ $(DESTDIR)$(pkgdatadir)/errmsg-utf8.txt; \
$(INSTALL_DATA) $(srcdir)/charsets/README $(DESTDIR)$(pkgdatadir)/charsets/README
$(INSTALL_DATA) $(srcdir)/charsets/*.xml $(DESTDIR)$(pkgdatadir)/charsets
diff --git a/sql/share/errmsg-cnv.sh b/sql/share/errmsg-cnv.sh
new file mode 100644
index 00000000000..ac6243c9a37
--- /dev/null
+++ b/sql/share/errmsg-cnv.sh
@@ -0,0 +1,61 @@
+#
+# This shell script converts errmsg.txt
+# from a mixed-charset format
+# to utf8 format
+# and writes the result to errmgs-utf8.txt
+#
+
+
+cat errmsg.txt | while IFS= ; read -r a
+do
+cs=""
+
+var="${a#"${a%%[![:space:]]*}"}"
+
+case $var in
+cze*|hun*|pol*|rum*|slo*)
+ cs=latin2
+ ;;
+dan*|nla*|eng*|fre*|ger*|ita*|nor*|por*|spa*|swe*)
+ cs=latin1
+ ;;
+est*)
+ cs=latin7
+ ;;
+greek*)
+ cs=windows-1253
+ ;;
+jpn*)
+ cs=euc-jp
+ ;;
+jps*)
+ cs=shift-jis
+ ;;
+kor*)
+ cs=euc-kr
+ ;;
+serbian*)
+ cs=windows-1250
+ ;;
+rus*)
+ cs=koi8-r
+ ;;
+ukr*)
+ cs=koi8-u
+ ;;
+*)
+ echo $a
+esac
+
+if [ "x$cs" != "x" ]
+then
+ b=`echo $a | iconv -f $cs -t utf-8` ; rc=$?
+ if [ "$rc" == "0" ]
+ then
+ echo "$b"
+ else
+ echo "# This message failed to convert from $cs, skipped"
+ fi
+fi
+done > errmsg-utf8.txt
+
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
new file mode 100644
index 00000000000..4260efdeb56
--- /dev/null
+++ b/sql/share/errmsg-utf8.txt
@@ -0,0 +1,6262 @@
+languages czech=cze latin2, danish=dan latin1, dutch=nla latin1, english=eng latin1, estonian=est latin7, french=fre latin1, german=ger latin1, greek=greek greek, hungarian=hun latin2, italian=ita latin1, japanese=jpn ujis, japanese-sjis=jps sjis, korean=kor euckr, norwegian-ny=norwegian-ny latin1, norwegian=nor latin1, polish=pol latin2, portuguese=por latin1, romanian=rum latin2, russian=rus koi8r, serbian=serbian cp1250, slovak=slo latin2, spanish=spa latin1, swedish=swe latin1, ukrainian=ukr koi8u;
+
+default-language eng
+
+start-error-number 1000
+
+ER_HASHCHK
+ eng "hashchk"
+ER_NISAMCHK
+ eng "isamchk"
+ER_NO
+ cze "NE"
+ dan "NEJ"
+ nla "NEE"
+ eng "NO"
+ est "EI"
+ fre "NON"
+ ger "Nein"
+ greek "ΟΧΙ"
+ hun "NEM"
+ kor "아니오"
+ nor "NEI"
+ norwegian-ny "NEI"
+ pol "NIE"
+ por "NÃO"
+ rum "NU"
+ rus "ÐЕТ"
+ serbian "NE"
+ slo "NIE"
+ ukr "ÐІ"
+ER_YES
+ cze "ANO"
+ dan "JA"
+ nla "JA"
+ eng "YES"
+ est "JAH"
+ fre "OUI"
+ ger "Ja"
+ greek "ÎΑΙ"
+ hun "IGEN"
+ ita "SI"
+ kor "예"
+ nor "JA"
+ norwegian-ny "JA"
+ pol "TAK"
+ por "SIM"
+ rum "DA"
+ rus "ДÐ"
+ serbian "DA"
+ slo "Ãno"
+ spa "SI"
+ ukr "ТÐК"
+ER_CANT_CREATE_FILE
+ cze "Nemohu vytvo-Břit soubor '%-.200s' (chybový kód: %d)"
+ dan "Kan ikke oprette filen '%-.200s' (Fejlkode: %d)"
+ nla "Kan file '%-.200s' niet aanmaken (Errcode: %d)"
+ eng "Can't create file '%-.200s' (errno: %d)"
+ est "Ei suuda luua faili '%-.200s' (veakood: %d)"
+ fre "Ne peut créer le fichier '%-.200s' (Errcode: %d)"
+ ger "Kann Datei '%-.200s' nicht erzeugen (Fehler: %d)"
+ greek "ΑδÏνατη η δημιουÏγία του αÏχείου '%-.200s' (κωδικός λάθους: %d)"
+ hun "A '%-.200s' file nem hozhato letre (hibakod: %d)"
+ ita "Impossibile creare il file '%-.200s' (errno: %d)"
+ jpn "'%-.200s' ファイルãŒä½œã‚Œã¾ã›ã‚“ (errno: %d)"
+ kor "í™”ì¼ '%-.200s'를 만들지 못했습니다. (ì—러번호: %d)"
+ nor "Kan ikke opprette fila '%-.200s' (Feilkode: %d)"
+ norwegian-ny "Kan ikkje opprette fila '%-.200s' (Feilkode: %d)"
+ pol "Nie można stworzyć pliku '%-.200s' (Kod błędu: %d)"
+ por "Não pode criar o arquivo '%-.200s' (erro no. %d)"
+ rum "Nu pot sa creez fisierul '%-.200s' (Eroare: %d)"
+ rus "Ðевозможно Ñоздать файл '%-.200s' (ошибка: %d)"
+ serbian "Ne mogu da kreiram file '%-.200s' (errno: %d)"
+ slo "Nemôžem vytvoriť súbor '%-.200s' (chybový kód: %d)"
+ spa "No puedo crear archivo '%-.200s' (Error: %d)"
+ swe "Kan inte skapa filen '%-.200s' (Felkod: %d)"
+ ukr "Ðе можу Ñтворити файл '%-.200s' (помилка: %d)"
+ER_CANT_CREATE_TABLE
+ cze "Nemohu vytvo-Břit tabulku '%-.200s' (chybový kód: %d)"
+ dan "Kan ikke oprette tabellen '%-.200s' (Fejlkode: %d)"
+ nla "Kan tabel '%-.200s' niet aanmaken (Errcode: %d)"
+ eng "Can't create table '%-.200s' (errno: %d)"
+ jps "'%-.200s' テーブルãŒä½œã‚Œã¾ã›ã‚“.(errno: %d)",
+ est "Ei suuda luua tabelit '%-.200s' (veakood: %d)"
+ fre "Ne peut créer la table '%-.200s' (Errcode: %d)"
+ ger "Kann Tabelle '%-.200s' nicht erzeugen (Fehler: %d)"
+ greek "ΑδÏνατη η δημιουÏγία του πίνακα '%-.200s' (κωδικός λάθους: %d)"
+ hun "A '%-.200s' tabla nem hozhato letre (hibakod: %d)"
+ ita "Impossibile creare la tabella '%-.200s' (errno: %d)"
+ jpn "'%-.200s' テーブルãŒä½œã‚Œã¾ã›ã‚“.(errno: %d)"
+ kor "í…Œì´ë¸” '%-.200s'를 만들지 못했습니다. (ì—러번호: %d)"
+ nor "Kan ikke opprette tabellen '%-.200s' (Feilkode: %d)"
+ norwegian-ny "Kan ikkje opprette tabellen '%-.200s' (Feilkode: %d)"
+ pol "Nie można stworzyć tabeli '%-.200s' (Kod błędu: %d)"
+ por "Não pode criar a tabela '%-.200s' (erro no. %d)"
+ rum "Nu pot sa creez tabla '%-.200s' (Eroare: %d)"
+ rus "Ðевозможно Ñоздать таблицу '%-.200s' (ошибка: %d)"
+ serbian "Ne mogu da kreiram tabelu '%-.200s' (errno: %d)"
+ slo "Nemôžem vytvoriť tabuľku '%-.200s' (chybový kód: %d)"
+ spa "No puedo crear tabla '%-.200s' (Error: %d)"
+ swe "Kan inte skapa tabellen '%-.200s' (Felkod: %d)"
+ ukr "Ðе можу Ñтворити таблицю '%-.200s' (помилка: %d)"
+ER_CANT_CREATE_DB
+ cze "Nemohu vytvo-Břit databázi '%-.192s' (chybový kód: %d)"
+ dan "Kan ikke oprette databasen '%-.192s' (Fejlkode: %d)"
+ nla "Kan database '%-.192s' niet aanmaken (Errcode: %d)"
+ eng "Can't create database '%-.192s' (errno: %d)"
+ jps "'%-.192s' データベースãŒä½œã‚Œã¾ã›ã‚“ (errno: %d)",
+ est "Ei suuda luua andmebaasi '%-.192s' (veakood: %d)"
+ fre "Ne peut créer la base '%-.192s' (Erreur %d)"
+ ger "Kann Datenbank '%-.192s' nicht erzeugen (Fehler: %d)"
+ greek "ΑδÏνατη η δημιουÏγία της βάσης δεδομένων '%-.192s' (κωδικός λάθους: %d)"
+ hun "Az '%-.192s' adatbazis nem hozhato letre (hibakod: %d)"
+ ita "Impossibile creare il database '%-.192s' (errno: %d)"
+ jpn "'%-.192s' データベースãŒä½œã‚Œã¾ã›ã‚“ (errno: %d)"
+ kor "ë°ì´íƒ€ë² ì´ìŠ¤ '%-.192s'를 만들지 못했습니다.. (ì—러번호: %d)"
+ nor "Kan ikke opprette databasen '%-.192s' (Feilkode: %d)"
+ norwegian-ny "Kan ikkje opprette databasen '%-.192s' (Feilkode: %d)"
+ pol "Nie można stworzyć bazy danych '%-.192s' (Kod błędu: %d)"
+ por "Não pode criar o banco de dados '%-.192s' (erro no. %d)"
+ rum "Nu pot sa creez baza de date '%-.192s' (Eroare: %d)"
+ rus "Ðевозможно Ñоздать базу данных '%-.192s' (ошибка: %d)"
+ serbian "Ne mogu da kreiram bazu '%-.192s' (errno: %d)"
+ slo "Nemôžem vytvoriť databázu '%-.192s' (chybový kód: %d)"
+ spa "No puedo crear base de datos '%-.192s' (Error: %d)"
+ swe "Kan inte skapa databasen '%-.192s' (Felkod: %d)"
+ ukr "Ðе можу Ñтворити базу данних '%-.192s' (помилка: %d)"
+ER_DB_CREATE_EXISTS
+ cze "Nemohu vytvo-Břit databázi '%-.192s'; databáze již existuje"
+ dan "Kan ikke oprette databasen '%-.192s'; databasen eksisterer"
+ nla "Kan database '%-.192s' niet aanmaken; database bestaat reeds"
+ eng "Can't create database '%-.192s'; database exists"
+ jps "'%-.192s' データベースãŒä½œã‚Œã¾ã›ã‚“.æ—¢ã«ãã®ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ãŒå­˜åœ¨ã—ã¾ã™",
+ est "Ei suuda luua andmebaasi '%-.192s': andmebaas juba eksisteerib"
+ fre "Ne peut créer la base '%-.192s'; elle existe déjà"
+ ger "Kann Datenbank '%-.192s' nicht erzeugen. Datenbank existiert bereits"
+ greek "ΑδÏνατη η δημιουÏγία της βάσης δεδομένων '%-.192s'; Η βάση δεδομένων υπάÏχει ήδη"
+ hun "Az '%-.192s' adatbazis nem hozhato letre Az adatbazis mar letezik"
+ ita "Impossibile creare il database '%-.192s'; il database esiste"
+ jpn "'%-.192s' データベースãŒä½œã‚Œã¾ã›ã‚“.æ—¢ã«ãã®ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ãŒå­˜åœ¨ã—ã¾ã™"
+ kor "ë°ì´íƒ€ë² ì´ìŠ¤ '%-.192s'를 만들지 못했습니다.. ë°ì´íƒ€ë² ì´ìŠ¤ê°€ 존재함"
+ nor "Kan ikke opprette databasen '%-.192s'; databasen eksisterer"
+ norwegian-ny "Kan ikkje opprette databasen '%-.192s'; databasen eksisterer"
+ pol "Nie można stworzyć bazy danych '%-.192s'; baza danych już istnieje"
+ por "Não pode criar o banco de dados '%-.192s'; este banco de dados já existe"
+ rum "Nu pot sa creez baza de date '%-.192s'; baza de date exista deja"
+ rus "Ðевозможно Ñоздать базу данных '%-.192s'. База данных уже ÑущеÑтвует"
+ serbian "Ne mogu da kreiram bazu '%-.192s'; baza već postoji."
+ slo "Nemôžem vytvoriť databázu '%-.192s'; databáza existuje"
+ spa "No puedo crear base de datos '%-.192s'; la base de datos ya existe"
+ swe "Databasen '%-.192s' existerar redan"
+ ukr "Ðе можу Ñтворити базу данних '%-.192s'. База данних Ñ–Ñнує"
+ER_DB_DROP_EXISTS
+ cze "Nemohu zru-Bšit databázi '%-.192s', databáze neexistuje"
+ dan "Kan ikke slette (droppe) '%-.192s'; databasen eksisterer ikke"
+ nla "Kan database '%-.192s' niet verwijderen; database bestaat niet"
+ eng "Can't drop database '%-.192s'; database doesn't exist"
+ jps "'%-.192s' データベースを破棄ã§ãã¾ã›ã‚“. ãã®ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ãŒãªã„ã®ã§ã™.",
+ est "Ei suuda kustutada andmebaasi '%-.192s': andmebaasi ei eksisteeri"
+ fre "Ne peut effacer la base '%-.192s'; elle n'existe pas"
+ ger "Kann Datenbank '%-.192s' nicht löschen; Datenbank nicht vorhanden"
+ greek "ΑδÏνατη η διαγÏαφή της βάσης δεδομένων '%-.192s'. Η βάση δεδομένων δεν υπάÏχει"
+ hun "A(z) '%-.192s' adatbazis nem szuntetheto meg. Az adatbazis nem letezik"
+ ita "Impossibile cancellare '%-.192s'; il database non esiste"
+ jpn "'%-.192s' データベースを破棄ã§ãã¾ã›ã‚“. ãã®ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ãŒãªã„ã®ã§ã™."
+ kor "ë°ì´íƒ€ë² ì´ìŠ¤ '%-.192s'를 제거하지 못했습니다. ë°ì´íƒ€ë² ì´ìŠ¤ê°€ 존재하지 ì•ŠìŒ "
+ nor "Kan ikke fjerne (drop) '%-.192s'; databasen eksisterer ikke"
+ norwegian-ny "Kan ikkje fjerne (drop) '%-.192s'; databasen eksisterer ikkje"
+ pol "Nie można usun?ć bazy danych '%-.192s'; baza danych nie istnieje"
+ por "Não pode eliminar o banco de dados '%-.192s'; este banco de dados não existe"
+ rum "Nu pot sa drop baza de date '%-.192s'; baza da date este inexistenta"
+ rus "Ðевозможно удалить базу данных '%-.192s'. Такой базы данных нет"
+ serbian "Ne mogu da izbrišem bazu '%-.192s'; baza ne postoji."
+ slo "Nemôžem zmazať databázu '%-.192s'; databáza neexistuje"
+ spa "No puedo eliminar base de datos '%-.192s'; la base de datos no existe"
+ swe "Kan inte radera databasen '%-.192s'; databasen finns inte"
+ ukr "Ðе можу видалити базу данних '%-.192s'. База данних не Ñ–Ñнує"
+ER_DB_DROP_DELETE
+ cze "Chyba p-Bři rušení databáze (nemohu vymazat '%-.192s', chyba %d)"
+ dan "Fejl ved sletning (drop) af databasen (kan ikke slette '%-.192s', Fejlkode %d)"
+ nla "Fout bij verwijderen database (kan '%-.192s' niet verwijderen, Errcode: %d)"
+ eng "Error dropping database (can't delete '%-.192s', errno: %d)"
+ jps "データベース破棄エラー ('%-.192s' を削除ã§ãã¾ã›ã‚“, errno: %d)",
+ est "Viga andmebaasi kustutamisel (ei suuda kustutada faili '%-.192s', veakood: %d)"
+ fre "Ne peut effacer la base '%-.192s' (erreur %d)"
+ ger "Fehler beim Löschen der Datenbank ('%-.192s' kann nicht gelöscht werden, Fehler: %d)"
+ greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κατά τη διαγÏαφή της βάσης δεδομένων (αδÏνατη η διαγÏαφή '%-.192s', κωδικός λάθους: %d)"
+ hun "Adatbazis megszuntetesi hiba ('%-.192s' nem torolheto, hibakod: %d)"
+ ita "Errore durante la cancellazione del database (impossibile cancellare '%-.192s', errno: %d)"
+ jpn "データベース破棄エラー ('%-.192s' を削除ã§ãã¾ã›ã‚“, errno: %d)"
+ kor "ë°ì´íƒ€ë² ì´ìŠ¤ 제거 ì—러('%-.192s'를 삭제할 수 ì—†ì니다, ì—러번호: %d)"
+ nor "Feil ved fjerning (drop) av databasen (kan ikke slette '%-.192s', feil %d)"
+ norwegian-ny "Feil ved fjerning (drop) av databasen (kan ikkje slette '%-.192s', feil %d)"
+ pol "Bł?d podczas usuwania bazy danych (nie można usun?ć '%-.192s', bł?d %d)"
+ por "Erro ao eliminar banco de dados (não pode eliminar '%-.192s' - erro no. %d)"
+ rum "Eroare dropuind baza de date (nu pot sa sterg '%-.192s', Eroare: %d)"
+ rus "Ошибка при удалении базы данных (невозможно удалить '%-.192s', ошибка: %d)"
+ serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem '%-.192s', errno: %d)"
+ slo "Chyba pri mazaní databázy (nemôžem zmazať '%-.192s', chybový kód: %d)"
+ spa "Error eliminando la base de datos(no puedo borrar '%-.192s', error %d)"
+ swe "Fel vid radering av databasen (Kan inte radera '%-.192s'. Felkod: %d)"
+ ukr "Ðе можу видалити базу данних (Ðе можу видалити '%-.192s', помилка: %d)"
+ER_DB_DROP_RMDIR
+ cze "Chyba p-Bři rušení databáze (nemohu vymazat adresář '%-.192s', chyba %d)"
+ dan "Fejl ved sletting af database (kan ikke slette folderen '%-.192s', Fejlkode %d)"
+ nla "Fout bij verwijderen database (kan rmdir '%-.192s' niet uitvoeren, Errcode: %d)"
+ eng "Error dropping database (can't rmdir '%-.192s', errno: %d)"
+ jps "データベース破棄エラー ('%-.192s' ã‚’ rmdir ã§ãã¾ã›ã‚“, errno: %d)",
+ est "Viga andmebaasi kustutamisel (ei suuda kustutada kataloogi '%-.192s', veakood: %d)"
+ fre "Erreur en effaçant la base (rmdir '%-.192s', erreur %d)"
+ ger "Fehler beim Löschen der Datenbank (Verzeichnis '%-.192s' kann nicht gelöscht werden, Fehler: %d)"
+ greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κατά τη διαγÏαφή της βάσης δεδομένων (αδÏνατη η διαγÏαφή του φακέλλου '%-.192s', κωδικός λάθους: %d)"
+ hun "Adatbazis megszuntetesi hiba ('%-.192s' nem szuntetheto meg, hibakod: %d)"
+ ita "Errore durante la cancellazione del database (impossibile rmdir '%-.192s', errno: %d)"
+ jpn "データベース破棄エラー ('%-.192s' ã‚’ rmdir ã§ãã¾ã›ã‚“, errno: %d)"
+ kor "ë°ì´íƒ€ë² ì´ìŠ¤ 제거 ì—러(rmdir '%-.192s'를 í•  수 ì—†ì니다, ì—러번호: %d)"
+ nor "Feil ved sletting av database (kan ikke slette katalogen '%-.192s', feil %d)"
+ norwegian-ny "Feil ved sletting av database (kan ikkje slette katalogen '%-.192s', feil %d)"
+ pol "Bł?d podczas usuwania bazy danych (nie można wykonać rmdir '%-.192s', bł?d %d)"
+ por "Erro ao eliminar banco de dados (não pode remover diretório '%-.192s' - erro no. %d)"
+ rum "Eroare dropuind baza de date (nu pot sa rmdir '%-.192s', Eroare: %d)"
+ rus "Ðевозможно удалить базу данных (невозможно удалить каталог '%-.192s', ошибка: %d)"
+ serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem direktorijum '%-.192s', errno: %d)"
+ slo "Chyba pri mazaní databázy (nemôžem vymazať adresár '%-.192s', chybový kód: %d)"
+ spa "Error eliminando la base de datos (No puedo borrar directorio '%-.192s', error %d)"
+ swe "Fel vid radering av databasen (Kan inte radera biblioteket '%-.192s'. Felkod: %d)"
+ ukr "Ðе можу видалити базу данних (Ðе можу видалити теку '%-.192s', помилка: %d)"
+ER_CANT_DELETE_FILE
+ cze "Chyba p-Bři výmazu '%-.192s' (chybový kód: %d)"
+ dan "Fejl ved sletning af '%-.192s' (Fejlkode: %d)"
+ nla "Fout bij het verwijderen van '%-.192s' (Errcode: %d)"
+ eng "Error on delete of '%-.192s' (errno: %d)"
+ jps "'%-.192s' ã®å‰Šé™¤ãŒã‚¨ãƒ©ãƒ¼ (errno: %d)",
+ est "Viga '%-.192s' kustutamisel (veakood: %d)"
+ fre "Erreur en effaçant '%-.192s' (Errcode: %d)"
+ ger "Fehler beim Löschen von '%-.192s' (Fehler: %d)"
+ greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κατά τη διαγÏαφή '%-.192s' (κωδικός λάθους: %d)"
+ hun "Torlesi hiba: '%-.192s' (hibakod: %d)"
+ ita "Errore durante la cancellazione di '%-.192s' (errno: %d)"
+ jpn "'%-.192s' ã®å‰Šé™¤ãŒã‚¨ãƒ©ãƒ¼ (errno: %d)"
+ kor "'%-.192s' ì‚­ì œ 중 ì—러 (ì—러번호: %d)"
+ nor "Feil ved sletting av '%-.192s' (Feilkode: %d)"
+ norwegian-ny "Feil ved sletting av '%-.192s' (Feilkode: %d)"
+ pol "Bł?d podczas usuwania '%-.192s' (Kod błędu: %d)"
+ por "Erro na remoção de '%-.192s' (erro no. %d)"
+ rum "Eroare incercind sa delete '%-.192s' (Eroare: %d)"
+ rus "Ошибка при удалении '%-.192s' (ошибка: %d)"
+ serbian "Greška pri brisanju '%-.192s' (errno: %d)"
+ slo "Chyba pri mazaní '%-.192s' (chybový kód: %d)"
+ spa "Error en el borrado de '%-.192s' (Error: %d)"
+ swe "Kan inte radera filen '%-.192s' (Felkod: %d)"
+ ukr "Ðе можу видалити '%-.192s' (помилка: %d)"
+ER_CANT_FIND_SYSTEM_REC
+ cze "Nemohu -BÄíst záznam v systémové tabulce"
+ dan "Kan ikke læse posten i systemfolderen"
+ nla "Kan record niet lezen in de systeem tabel"
+ eng "Can't read record in system table"
+ jps "system table ã®ãƒ¬ã‚³ãƒ¼ãƒ‰ã‚’読む事ãŒã§ãã¾ã›ã‚“ã§ã—ãŸ",
+ est "Ei suuda lugeda kirjet süsteemsest tabelist"
+ fre "Ne peut lire un enregistrement de la table 'system'"
+ ger "Datensatz in der Systemtabelle nicht lesbar"
+ greek "ΑδÏνατη η ανάγνωση εγγÏαφής από πίνακα του συστήματος"
+ hun "Nem olvashato rekord a rendszertablaban"
+ ita "Impossibile leggere il record dalla tabella di sistema"
+ jpn "system table ã®ãƒ¬ã‚³ãƒ¼ãƒ‰ã‚’読む事ãŒã§ãã¾ã›ã‚“ã§ã—ãŸ"
+ kor "system í…Œì´ë¸”ì—ì„œ 레코드를 ì½ì„ 수 없습니다."
+ nor "Kan ikke lese posten i systemkatalogen"
+ norwegian-ny "Kan ikkje lese posten i systemkatalogen"
+ pol "Nie można odczytać rekordu z tabeli systemowej"
+ por "Não pode ler um registro numa tabela do sistema"
+ rum "Nu pot sa citesc cimpurile in tabla de system (system table)"
+ rus "Ðевозможно прочитать запиÑÑŒ в ÑиÑтемной таблице"
+ serbian "Ne mogu da proÄitam slog iz sistemske tabele"
+ slo "Nemôžem ÄítaÅ¥ záznam v systémovej tabuľke"
+ spa "No puedo leer el registro en la tabla del sistema"
+ swe "Hittar inte posten i systemregistret"
+ ukr "Ðе можу зчитати Ð·Ð°Ð¿Ð¸Ñ Ð· ÑиÑтемної таблиці"
+ER_CANT_GET_STAT
+ cze "Nemohu z-Bískat stav '%-.200s' (chybový kód: %d)"
+ dan "Kan ikke læse status af '%-.200s' (Fejlkode: %d)"
+ nla "Kan de status niet krijgen van '%-.200s' (Errcode: %d)"
+ eng "Can't get status of '%-.200s' (errno: %d)"
+ jps "'%-.200s' ã®ã‚¹ãƒ†ã‚¤ã‚¿ã‚¹ãŒå¾—られã¾ã›ã‚“. (errno: %d)",
+ est "Ei suuda lugeda '%-.200s' olekut (veakood: %d)"
+ fre "Ne peut obtenir le status de '%-.200s' (Errcode: %d)"
+ ger "Kann Status von '%-.200s' nicht ermitteln (Fehler: %d)"
+ greek "ΑδÏνατη η λήψη πληÏοφοÏιών για την κατάσταση του '%-.200s' (κωδικός λάθους: %d)"
+ hun "A(z) '%-.200s' statusza nem allapithato meg (hibakod: %d)"
+ ita "Impossibile leggere lo stato di '%-.200s' (errno: %d)"
+ jpn "'%-.200s' ã®ã‚¹ãƒ†ã‚¤ã‚¿ã‚¹ãŒå¾—られã¾ã›ã‚“. (errno: %d)"
+ kor "'%-.200s'ì˜ ìƒíƒœë¥¼ 얻지 못했습니다. (ì—러번호: %d)"
+ nor "Kan ikke lese statusen til '%-.200s' (Feilkode: %d)"
+ norwegian-ny "Kan ikkje lese statusen til '%-.200s' (Feilkode: %d)"
+ pol "Nie można otrzymać statusu '%-.200s' (Kod błędu: %d)"
+ por "Não pode obter o status de '%-.200s' (erro no. %d)"
+ rum "Nu pot sa obtin statusul lui '%-.200s' (Eroare: %d)"
+ rus "Ðевозможно получить ÑтатуÑную информацию о '%-.200s' (ошибка: %d)"
+ serbian "Ne mogu da dobijem stanje file-a '%-.200s' (errno: %d)"
+ slo "Nemôžem zistiť stav '%-.200s' (chybový kód: %d)"
+ spa "No puedo obtener el estado de '%-.200s' (Error: %d)"
+ swe "Kan inte läsa filinformationen (stat) från '%-.200s' (Felkod: %d)"
+ ukr "Ðе можу отримати ÑÑ‚Ð°Ñ‚ÑƒÑ '%-.200s' (помилка: %d)"
+ER_CANT_GET_WD
+ cze "Chyba p-Bři zjišťování pracovní adresář (chybový kód: %d)"
+ dan "Kan ikke læse aktive folder (Fejlkode: %d)"
+ nla "Kan de werkdirectory niet krijgen (Errcode: %d)"
+ eng "Can't get working directory (errno: %d)"
+ jps "working directory を得る事ãŒã§ãã¾ã›ã‚“ã§ã—㟠(errno: %d)",
+ est "Ei suuda identifitseerida jooksvat kataloogi (veakood: %d)"
+ fre "Ne peut obtenir le répertoire de travail (Errcode: %d)"
+ ger "Kann Arbeitsverzeichnis nicht ermitteln (Fehler: %d)"
+ greek "Ο φάκελλος εÏγασίας δεν βÏέθηκε (κωδικός λάθους: %d)"
+ hun "A munkakonyvtar nem allapithato meg (hibakod: %d)"
+ ita "Impossibile leggere la directory di lavoro (errno: %d)"
+ jpn "working directory を得る事ãŒã§ãã¾ã›ã‚“ã§ã—㟠(errno: %d)"
+ kor "수행 디렉토리를 찾지 못했습니다. (ì—러번호: %d)"
+ nor "Kan ikke lese aktiv katalog(Feilkode: %d)"
+ norwegian-ny "Kan ikkje lese aktiv katalog(Feilkode: %d)"
+ pol "Nie można rozpoznać aktualnego katalogu (Kod błędu: %d)"
+ por "Não pode obter o diretório corrente (erro no. %d)"
+ rum "Nu pot sa obtin directorul current (working directory) (Eroare: %d)"
+ rus "Ðевозможно определить рабочий каталог (ошибка: %d)"
+ serbian "Ne mogu da dobijem trenutni direktorijum (errno: %d)"
+ slo "Nemôžem zistiť pracovný adresár (chybový kód: %d)"
+ spa "No puedo acceder al directorio (Error: %d)"
+ swe "Kan inte inte läsa aktivt bibliotek. (Felkod: %d)"
+ ukr "Ðе можу визначити робочу теку (помилка: %d)"
+ER_CANT_LOCK
+ cze "Nemohu uzamknout soubor (chybov-Bý kód: %d)"
+ dan "Kan ikke låse fil (Fejlkode: %d)"
+ nla "Kan de file niet blokeren (Errcode: %d)"
+ eng "Can't lock file (errno: %d)"
+ jps "ファイルをロックã§ãã¾ã›ã‚“ (errno: %d)",
+ est "Ei suuda lukustada faili (veakood: %d)"
+ fre "Ne peut verrouiller le fichier (Errcode: %d)"
+ ger "Datei kann nicht gesperrt werden (Fehler: %d)"
+ greek "Το αÏχείο δεν μποÏεί να κλειδωθεί (κωδικός λάθους: %d)"
+ hun "A file nem zarolhato. (hibakod: %d)"
+ ita "Impossibile il locking il file (errno: %d)"
+ jpn "ファイルをロックã§ãã¾ã›ã‚“ (errno: %d)"
+ kor "í™”ì¼ì„ 잠그지(lock) 못했습니다. (ì—러번호: %d)"
+ nor "Kan ikke låse fila (Feilkode: %d)"
+ norwegian-ny "Kan ikkje låse fila (Feilkode: %d)"
+ pol "Nie można zablokować pliku (Kod błędu: %d)"
+ por "Não pode travar o arquivo (erro no. %d)"
+ rum "Nu pot sa lock fisierul (Eroare: %d)"
+ rus "Ðевозможно поÑтавить блокировку на файле (ошибка: %d)"
+ serbian "Ne mogu da zakljuÄam file (errno: %d)"
+ slo "Nemôžem zamknúť súbor (chybový kód: %d)"
+ spa "No puedo bloquear archivo: (Error: %d)"
+ swe "Kan inte låsa filen. (Felkod: %d)"
+ ukr "Ðе можу заблокувати файл (помилка: %d)"
+ER_CANT_OPEN_FILE
+ cze "Nemohu otev-Břít soubor '%-.200s' (chybový kód: %d)"
+ dan "Kan ikke åbne fil: '%-.200s' (Fejlkode: %d)"
+ nla "Kan de file '%-.200s' niet openen (Errcode: %d)"
+ eng "Can't open file: '%-.200s' (errno: %d)"
+ jps "'%-.200s' ファイルを開ã事ãŒã§ãã¾ã›ã‚“ (errno: %d)",
+ est "Ei suuda avada faili '%-.200s' (veakood: %d)"
+ fre "Ne peut ouvrir le fichier: '%-.200s' (Errcode: %d)"
+ ger "Kann Datei '%-.200s' nicht öffnen (Fehler: %d)"
+ greek "Δεν είναι δυνατό να ανοιχτεί το αÏχείο: '%-.200s' (κωδικός λάθους: %d)"
+ hun "A '%-.200s' file nem nyithato meg (hibakod: %d)"
+ ita "Impossibile aprire il file: '%-.200s' (errno: %d)"
+ jpn "'%-.200s' ファイルを開ã事ãŒã§ãã¾ã›ã‚“ (errno: %d)"
+ kor "í™”ì¼ì„ 열지 못했습니다.: '%-.200s' (ì—러번호: %d)"
+ nor "Kan ikke åpne fila: '%-.200s' (Feilkode: %d)"
+ norwegian-ny "Kan ikkje åpne fila: '%-.200s' (Feilkode: %d)"
+ pol "Nie można otworzyć pliku: '%-.200s' (Kod błędu: %d)"
+ por "Não pode abrir o arquivo '%-.200s' (erro no. %d)"
+ rum "Nu pot sa deschid fisierul: '%-.200s' (Eroare: %d)"
+ rus "Ðевозможно открыть файл: '%-.200s' (ошибка: %d)"
+ serbian "Ne mogu da otvorim file: '%-.200s' (errno: %d)"
+ slo "Nemôžem otvoriť súbor: '%-.200s' (chybový kód: %d)"
+ spa "No puedo abrir archivo: '%-.200s' (Error: %d)"
+ swe "Kan inte använda '%-.200s' (Felkod: %d)"
+ ukr "Ðе можу відкрити файл: '%-.200s' (помилка: %d)"
+ER_FILE_NOT_FOUND
+ cze "Nemohu naj-Bít soubor '%-.200s' (chybový kód: %d)"
+ dan "Kan ikke finde fila: '%-.200s' (Fejlkode: %d)"
+ nla "Kan de file: '%-.200s' niet vinden (Errcode: %d)"
+ eng "Can't find file: '%-.200s' (errno: %d)"
+ jps "'%-.200s' ファイルを見付ã‘る事ãŒã§ãã¾ã›ã‚“.(errno: %d)",
+ est "Ei suuda leida faili '%-.200s' (veakood: %d)"
+ fre "Ne peut trouver le fichier: '%-.200s' (Errcode: %d)"
+ ger "Kann Datei '%-.200s' nicht finden (Fehler: %d)"
+ greek "Δεν βÏέθηκε το αÏχείο: '%-.200s' (κωδικός λάθους: %d)"
+ hun "A(z) '%-.200s' file nem talalhato (hibakod: %d)"
+ ita "Impossibile trovare il file: '%-.200s' (errno: %d)"
+ jpn "'%-.200s' ファイルを見付ã‘る事ãŒã§ãã¾ã›ã‚“.(errno: %d)"
+ kor "í™”ì¼ì„ 찾지 못했습니다.: '%-.200s' (ì—러번호: %d)"
+ nor "Kan ikke finne fila: '%-.200s' (Feilkode: %d)"
+ norwegian-ny "Kan ikkje finne fila: '%-.200s' (Feilkode: %d)"
+ pol "Nie można znaleĽć pliku: '%-.200s' (Kod błędu: %d)"
+ por "Não pode encontrar o arquivo '%-.200s' (erro no. %d)"
+ rum "Nu pot sa gasesc fisierul: '%-.200s' (Eroare: %d)"
+ rus "Ðевозможно найти файл: '%-.200s' (ошибка: %d)"
+ serbian "Ne mogu da pronađem file: '%-.200s' (errno: %d)"
+ slo "Nemôžem nájsť súbor: '%-.200s' (chybový kód: %d)"
+ spa "No puedo encontrar archivo: '%-.200s' (Error: %d)"
+ swe "Hittar inte filen '%-.200s' (Felkod: %d)"
+ ukr "Ðе можу знайти файл: '%-.200s' (помилка: %d)"
+ER_CANT_READ_DIR
+ cze "Nemohu -BÄíst adresář '%-.192s' (chybový kód: %d)"
+ dan "Kan ikke læse folder '%-.192s' (Fejlkode: %d)"
+ nla "Kan de directory niet lezen van '%-.192s' (Errcode: %d)"
+ eng "Can't read dir of '%-.192s' (errno: %d)"
+ jps "'%-.192s' ディレクトリãŒèª­ã‚ã¾ã›ã‚“.(errno: %d)",
+ est "Ei suuda lugeda kataloogi '%-.192s' (veakood: %d)"
+ fre "Ne peut lire le répertoire de '%-.192s' (Errcode: %d)"
+ ger "Verzeichnis von '%-.192s' nicht lesbar (Fehler: %d)"
+ greek "Δεν είναι δυνατό να διαβαστεί ο φάκελλος του '%-.192s' (κωδικός λάθους: %d)"
+ hun "A(z) '%-.192s' konyvtar nem olvashato. (hibakod: %d)"
+ ita "Impossibile leggere la directory di '%-.192s' (errno: %d)"
+ jpn "'%-.192s' ディレクトリãŒèª­ã‚ã¾ã›ã‚“.(errno: %d)"
+ kor "'%-.192s'디렉토리를 ì½ì§€ 못했습니다. (ì—러번호: %d)"
+ nor "Kan ikke lese katalogen '%-.192s' (Feilkode: %d)"
+ norwegian-ny "Kan ikkje lese katalogen '%-.192s' (Feilkode: %d)"
+ pol "Nie można odczytać katalogu '%-.192s' (Kod błędu: %d)"
+ por "Não pode ler o diretório de '%-.192s' (erro no. %d)"
+ rum "Nu pot sa citesc directorul '%-.192s' (Eroare: %d)"
+ rus "Ðевозможно прочитать каталог '%-.192s' (ошибка: %d)"
+ serbian "Ne mogu da proÄitam direktorijum '%-.192s' (errno: %d)"
+ slo "Nemôžem ÄítaÅ¥ adresár '%-.192s' (chybový kód: %d)"
+ spa "No puedo leer el directorio de '%-.192s' (Error: %d)"
+ swe "Kan inte läsa från bibliotek '%-.192s' (Felkod: %d)"
+ ukr "Ðе можу прочитати теку '%-.192s' (помилка: %d)"
+ER_CANT_SET_WD
+ cze "Nemohu zm-Běnit adresář na '%-.192s' (chybový kód: %d)"
+ dan "Kan ikke skifte folder til '%-.192s' (Fejlkode: %d)"
+ nla "Kan de directory niet veranderen naar '%-.192s' (Errcode: %d)"
+ eng "Can't change dir to '%-.192s' (errno: %d)"
+ jps "'%-.192s' ディレクトリ㫠chdir ã§ãã¾ã›ã‚“.(errno: %d)",
+ est "Ei suuda siseneda kataloogi '%-.192s' (veakood: %d)"
+ fre "Ne peut changer le répertoire pour '%-.192s' (Errcode: %d)"
+ ger "Kann nicht in das Verzeichnis '%-.192s' wechseln (Fehler: %d)"
+ greek "ΑδÏνατη η αλλαγή του Ï„Ïέχοντος καταλόγου σε '%-.192s' (κωδικός λάθους: %d)"
+ hun "Konyvtarvaltas nem lehetseges a(z) '%-.192s'-ba. (hibakod: %d)"
+ ita "Impossibile cambiare la directory in '%-.192s' (errno: %d)"
+ jpn "'%-.192s' ディレクトリ㫠chdir ã§ãã¾ã›ã‚“.(errno: %d)"
+ kor "'%-.192s'디렉토리로 ì´ë™í•  수 없었습니다. (ì—러번호: %d)"
+ nor "Kan ikke skifte katalog til '%-.192s' (Feilkode: %d)"
+ norwegian-ny "Kan ikkje skifte katalog til '%-.192s' (Feilkode: %d)"
+ pol "Nie można zmienić katalogu na '%-.192s' (Kod błędu: %d)"
+ por "Não pode mudar para o diretório '%-.192s' (erro no. %d)"
+ rum "Nu pot sa schimb directorul '%-.192s' (Eroare: %d)"
+ rus "Ðевозможно перейти в каталог '%-.192s' (ошибка: %d)"
+ serbian "Ne mogu da promenim direktorijum na '%-.192s' (errno: %d)"
+ slo "Nemôžem vojsť do adresára '%-.192s' (chybový kód: %d)"
+ spa "No puedo cambiar al directorio de '%-.192s' (Error: %d)"
+ swe "Kan inte byta till '%-.192s' (Felkod: %d)"
+ ukr "Ðе можу перейти у теку '%-.192s' (помилка: %d)"
+ER_CHECKREAD
+ cze "Z-Báznam byl zmÄ›nÄ›n od posledního Ätení v tabulce '%-.192s'"
+ dan "Posten er ændret siden sidste læsning '%-.192s'"
+ nla "Record is veranderd sinds de laatste lees activiteit in de tabel '%-.192s'"
+ eng "Record has changed since last read in table '%-.192s'"
+ est "Kirje tabelis '%-.192s' on muutunud viimasest lugemisest saadik"
+ fre "Enregistrement modifié depuis sa dernière lecture dans la table '%-.192s'"
+ ger "Datensatz hat sich seit dem letzten Zugriff auf Tabelle '%-.192s' geändert"
+ greek "Η εγγÏαφή έχει αλλάξει από την τελευταία φοÏά που ανασÏÏθηκε από τον πίνακα '%-.192s'"
+ hun "A(z) '%-.192s' tablaban talalhato rekord megvaltozott az utolso olvasas ota"
+ ita "Il record e` cambiato dall'ultima lettura della tabella '%-.192s'"
+ kor "í…Œì´ë¸” '%-.192s'ì—ì„œ 마지막으로 ì½ì€ 후 Recordê°€ 변경ë˜ì—ˆìŠµë‹ˆë‹¤."
+ nor "Posten har blitt endret siden den ble lest '%-.192s'"
+ norwegian-ny "Posten har vorte endra sidan den sist vart lesen '%-.192s'"
+ pol "Rekord został zmieniony od ostaniego odczytania z tabeli '%-.192s'"
+ por "Registro alterado desde a última leitura da tabela '%-.192s'"
+ rum "Cimpul a fost schimbat de la ultima citire a tabelei '%-.192s'"
+ rus "ЗапиÑÑŒ изменилаÑÑŒ Ñ Ð¼Ð¾Ð¼ÐµÐ½Ñ‚Ð° поÑледней выборки в таблице '%-.192s'"
+ serbian "Slog je promenjen od zadnjeg Äitanja tabele '%-.192s'"
+ slo "Záznam bol zmenený od posledného Äítania v tabuľke '%-.192s'"
+ spa "El registro ha cambiado desde la ultima lectura de la tabla '%-.192s'"
+ swe "Posten har förändrats sedan den lästes i register '%-.192s'"
+ ukr "Ð—Ð°Ð¿Ð¸Ñ Ð±ÑƒÐ»Ð¾ змінено з чаÑу оÑтаннього Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ Ð· таблиці '%-.192s'"
+ER_DISK_FULL
+ cze "Disk je pln-Bý (%s), Äekám na uvolnÄ›ní nÄ›jakého místa ..."
+ dan "Ikke mere diskplads (%s). Venter på at få frigjort plads..."
+ nla "Schijf vol (%s). Aan het wachten totdat er ruimte vrij wordt gemaakt..."
+ eng "Disk full (%s); waiting for someone to free some space..."
+ jps "Disk full (%s). 誰ã‹ãŒä½•ã‹ã‚’減らã™ã¾ã§ã¾ã£ã¦ãã ã•ã„...",
+ est "Ketas täis (%s). Ootame kuni tekib vaba ruumi..."
+ fre "Disque plein (%s). J'attend que quelqu'un libère de l'espace..."
+ ger "Festplatte voll (%s). Warte, bis jemand Platz schafft ..."
+ greek "Δεν υπάÏχει χώÏος στο δίσκο (%s). ΠαÏακαλώ, πεÏιμένετε να ελευθεÏωθεί χώÏος..."
+ hun "A lemez megtelt (%s)."
+ ita "Disco pieno (%s). In attesa che qualcuno liberi un po' di spazio..."
+ jpn "Disk full (%s). 誰ã‹ãŒä½•ã‹ã‚’減らã™ã¾ã§ã¾ã£ã¦ãã ã•ã„..."
+ kor "Disk full (%s). 다른 ì‚¬ëžŒì´ ì§€ìš¸ë•Œê¹Œì§€ 기다립니다..."
+ nor "Ikke mer diskplass (%s). Venter på å få frigjort plass..."
+ norwegian-ny "Ikkje meir diskplass (%s). Ventar på å få frigjort plass..."
+ pol "Dysk pełny (%s). Oczekiwanie na zwolnienie miejsca..."
+ por "Disco cheio (%s). Aguardando alguém liberar algum espaço..."
+ rum "Hard-disk-ul este plin (%s). Astept sa se elibereze ceva spatiu..."
+ rus "ДиÑк заполнен. (%s). Ожидаем, пока кто-то не уберет поÑле ÑÐµÐ±Ñ Ð¼ÑƒÑор..."
+ serbian "Disk je pun (%s). Čekam nekoga da dođe i oslobodi nešto mesta..."
+ slo "Disk je plný (%s), Äakám na uvoľnenie miesta..."
+ spa "Disco lleno (%s). Esperando para que se libere algo de espacio..."
+ swe "Disken är full (%s). Väntar tills det finns ledigt utrymme..."
+ ukr "ДиÑк заповнений (%s). Вичикую, доки звільнитьÑÑ Ñ‚Ñ€Ð¾Ñ…Ð¸ міÑцÑ..."
+ER_DUP_KEY 23000
+ cze "Nemohu zapsat, zdvojen-Bý klÃ­Ä v tabulce '%-.192s'"
+ dan "Kan ikke skrive, flere ens nøgler i tabellen '%-.192s'"
+ nla "Kan niet schrijven, dubbele zoeksleutel in tabel '%-.192s'"
+ eng "Can't write; duplicate key in table '%-.192s'"
+ jps "table '%-.192s' ã« key ãŒé‡è¤‡ã—ã¦ã„ã¦æ›¸ãã“ã‚ã¾ã›ã‚“",
+ est "Ei saa kirjutada, korduv võti tabelis '%-.192s'"
+ fre "Ecriture impossible, doublon dans une clé de la table '%-.192s'"
+ ger "Kann nicht speichern, Grund: doppelter Schlüssel in Tabelle '%-.192s'"
+ greek "Δεν είναι δυνατή η καταχώÏηση, η τιμή υπάÏχει ήδη στον πίνακα '%-.192s'"
+ hun "Irasi hiba, duplikalt kulcs a '%-.192s' tablaban."
+ ita "Scrittura impossibile: chiave duplicata nella tabella '%-.192s'"
+ jpn "table '%-.192s' ã« key ãŒé‡è¤‡ã—ã¦ã„ã¦æ›¸ãã“ã‚ã¾ã›ã‚“"
+ kor "기ë¡í•  수 ì—†ì니다., í…Œì´ë¸” '%-.192s'ì—ì„œ 중복 키"
+ nor "Kan ikke skrive, flere like nøkler i tabellen '%-.192s'"
+ norwegian-ny "Kan ikkje skrive, flere like nyklar i tabellen '%-.192s'"
+ pol "Nie można zapisać, powtórzone klucze w tabeli '%-.192s'"
+ por "Não pode gravar. Chave duplicada na tabela '%-.192s'"
+ rum "Nu pot sa scriu (can't write), cheie duplicata in tabela '%-.192s'"
+ rus "Ðевозможно произвеÑти запиÑÑŒ, дублирующийÑÑ ÐºÐ»ÑŽÑ‡ в таблице '%-.192s'"
+ serbian "Ne mogu da piÅ¡em poÅ¡to postoji duplirani kljuÄ u tabeli '%-.192s'"
+ slo "Nemôžem zapísaÅ¥, duplikát kľúÄa v tabuľke '%-.192s'"
+ spa "No puedo escribir, clave duplicada en la tabla '%-.192s'"
+ swe "Kan inte skriva, dubbel söknyckel i register '%-.192s'"
+ ukr "Ðе можу запиÑати, дублюючийÑÑ ÐºÐ»ÑŽÑ‡ в таблиці '%-.192s'"
+ER_ERROR_ON_CLOSE
+ cze "Chyba p-Bři zavírání '%-.192s' (chybový kód: %d)"
+ dan "Fejl ved lukning af '%-.192s' (Fejlkode: %d)"
+ nla "Fout bij het sluiten van '%-.192s' (Errcode: %d)"
+ eng "Error on close of '%-.192s' (errno: %d)"
+ est "Viga faili '%-.192s' sulgemisel (veakood: %d)"
+ fre "Erreur a la fermeture de '%-.192s' (Errcode: %d)"
+ ger "Fehler beim Schließen von '%-.192s' (Fehler: %d)"
+ greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κλείνοντας το '%-.192s' (κωδικός λάθους: %d)"
+ hun "Hiba a(z) '%-.192s' zarasakor. (hibakod: %d)"
+ ita "Errore durante la chiusura di '%-.192s' (errno: %d)"
+ kor "'%-.192s'닫는 중 ì—러 (ì—러번호: %d)"
+ nor "Feil ved lukking av '%-.192s' (Feilkode: %d)"
+ norwegian-ny "Feil ved lukking av '%-.192s' (Feilkode: %d)"
+ pol "Bł?d podczas zamykania '%-.192s' (Kod błędu: %d)"
+ por "Erro ao fechar '%-.192s' (erro no. %d)"
+ rum "Eroare inchizind '%-.192s' (errno: %d)"
+ rus "Ошибка при закрытии '%-.192s' (ошибка: %d)"
+ serbian "Greška pri zatvaranju '%-.192s' (errno: %d)"
+ slo "Chyba pri zatváraní '%-.192s' (chybový kód: %d)"
+ spa "Error en el cierre de '%-.192s' (Error: %d)"
+ swe "Fick fel vid stängning av '%-.192s' (Felkod: %d)"
+ ukr "Ðе можу закрити '%-.192s' (помилка: %d)"
+ER_ERROR_ON_READ
+ cze "Chyba p-BÅ™i Ätení souboru '%-.200s' (chybový kód: %d)"
+ dan "Fejl ved læsning af '%-.200s' (Fejlkode: %d)"
+ nla "Fout bij het lezen van file '%-.200s' (Errcode: %d)"
+ eng "Error reading file '%-.200s' (errno: %d)"
+ jps "'%-.200s' ファイルã®èª­ã¿è¾¼ã¿ã‚¨ãƒ©ãƒ¼ (errno: %d)",
+ est "Viga faili '%-.200s' lugemisel (veakood: %d)"
+ fre "Erreur en lecture du fichier '%-.200s' (Errcode: %d)"
+ ger "Fehler beim Lesen der Datei '%-.200s' (Fehler: %d)"
+ greek "ΠÏόβλημα κατά την ανάγνωση του αÏχείου '%-.200s' (κωδικός λάθους: %d)"
+ hun "Hiba a '%-.200s'file olvasasakor. (hibakod: %d)"
+ ita "Errore durante la lettura del file '%-.200s' (errno: %d)"
+ jpn "'%-.200s' ファイルã®èª­ã¿è¾¼ã¿ã‚¨ãƒ©ãƒ¼ (errno: %d)"
+ kor "'%-.200s'í™”ì¼ ì½ê¸° ì—러 (ì—러번호: %d)"
+ nor "Feil ved lesing av '%-.200s' (Feilkode: %d)"
+ norwegian-ny "Feil ved lesing av '%-.200s' (Feilkode: %d)"
+ pol "Bł?d podczas odczytu pliku '%-.200s' (Kod błędu: %d)"
+ por "Erro ao ler arquivo '%-.200s' (erro no. %d)"
+ rum "Eroare citind fisierul '%-.200s' (errno: %d)"
+ rus "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° '%-.200s' (ошибка: %d)"
+ serbian "GreÅ¡ka pri Äitanju file-a '%-.200s' (errno: %d)"
+ slo "Chyba pri Äítaní súboru '%-.200s' (chybový kód: %d)"
+ spa "Error leyendo el fichero '%-.200s' (Error: %d)"
+ swe "Fick fel vid läsning av '%-.200s' (Felkod %d)"
+ ukr "Ðе можу прочитати файл '%-.200s' (помилка: %d)"
+ER_ERROR_ON_RENAME
+ cze "Chyba p-Bři přejmenování '%-.210s' na '%-.210s' (chybový kód: %d)"
+ dan "Fejl ved omdøbning af '%-.210s' til '%-.210s' (Fejlkode: %d)"
+ nla "Fout bij het hernoemen van '%-.210s' naar '%-.210s' (Errcode: %d)"
+ eng "Error on rename of '%-.210s' to '%-.210s' (errno: %d)"
+ jps "'%-.210s' ã‚’ '%-.210s' ã« rename ã§ãã¾ã›ã‚“ (errno: %d)",
+ est "Viga faili '%-.210s' ümbernimetamisel '%-.210s'-ks (veakood: %d)"
+ fre "Erreur en renommant '%-.210s' en '%-.210s' (Errcode: %d)"
+ ger "Fehler beim Umbenennen von '%-.210s' in '%-.210s' (Fehler: %d)"
+ greek "ΠÏόβλημα κατά την μετονομασία του αÏχείου '%-.210s' to '%-.210s' (κωδικός λάθους: %d)"
+ hun "Hiba a '%-.210s' file atnevezesekor '%-.210s'. (hibakod: %d)"
+ ita "Errore durante la rinominazione da '%-.210s' a '%-.210s' (errno: %d)"
+ jpn "'%-.210s' ã‚’ '%-.210s' ã« rename ã§ãã¾ã›ã‚“ (errno: %d)"
+ kor "'%-.210s'를 '%-.210s'ë¡œ ì´ë¦„ 변경중 ì—러 (ì—러번호: %d)"
+ nor "Feil ved omdøping av '%-.210s' til '%-.210s' (Feilkode: %d)"
+ norwegian-ny "Feil ved omdøyping av '%-.210s' til '%-.210s' (Feilkode: %d)"
+ pol "Bł?d podczas zmieniania nazwy '%-.210s' na '%-.210s' (Kod błędu: %d)"
+ por "Erro ao renomear '%-.210s' para '%-.210s' (erro no. %d)"
+ rum "Eroare incercind sa renumesc '%-.210s' in '%-.210s' (errno: %d)"
+ rus "Ошибка при переименовании '%-.210s' в '%-.210s' (ошибка: %d)"
+ serbian "Greška pri promeni imena '%-.210s' na '%-.210s' (errno: %d)"
+ slo "Chyba pri premenovávaní '%-.210s' na '%-.210s' (chybový kód: %d)"
+ spa "Error en el renombrado de '%-.210s' a '%-.210s' (Error: %d)"
+ swe "Kan inte byta namn från '%-.210s' till '%-.210s' (Felkod: %d)"
+ ukr "Ðе можу перейменувати '%-.210s' у '%-.210s' (помилка: %d)"
+ER_ERROR_ON_WRITE
+ cze "Chyba p-Bři zápisu do souboru '%-.200s' (chybový kód: %d)"
+ dan "Fejl ved skriving av filen '%-.200s' (Fejlkode: %d)"
+ nla "Fout bij het wegschrijven van file '%-.200s' (Errcode: %d)"
+ eng "Error writing file '%-.200s' (errno: %d)"
+ jps "'%-.200s' ファイルを書ã事ãŒã§ãã¾ã›ã‚“ (errno: %d)",
+ est "Viga faili '%-.200s' kirjutamisel (veakood: %d)"
+ fre "Erreur d'écriture du fichier '%-.200s' (Errcode: %d)"
+ ger "Fehler beim Speichern der Datei '%-.200s' (Fehler: %d)"
+ greek "ΠÏόβλημα κατά την αποθήκευση του αÏχείου '%-.200s' (κωδικός λάθους: %d)"
+ hun "Hiba a '%-.200s' file irasakor. (hibakod: %d)"
+ ita "Errore durante la scrittura del file '%-.200s' (errno: %d)"
+ jpn "'%-.200s' ファイルを書ã事ãŒã§ãã¾ã›ã‚“ (errno: %d)"
+ kor "'%-.200s'í™”ì¼ ê¸°ë¡ ì¤‘ ì—러 (ì—러번호: %d)"
+ nor "Feil ved skriving av fila '%-.200s' (Feilkode: %d)"
+ norwegian-ny "Feil ved skriving av fila '%-.200s' (Feilkode: %d)"
+ pol "Bł?d podczas zapisywania pliku '%-.200s' (Kod błędu: %d)"
+ por "Erro ao gravar arquivo '%-.200s' (erro no. %d)"
+ rum "Eroare scriind fisierul '%-.200s' (errno: %d)"
+ rus "Ошибка запиÑи в файл '%-.200s' (ошибка: %d)"
+ serbian "Greška pri upisu '%-.200s' (errno: %d)"
+ slo "Chyba pri zápise do súboru '%-.200s' (chybový kód: %d)"
+ spa "Error escribiendo el archivo '%-.200s' (Error: %d)"
+ swe "Fick fel vid skrivning till '%-.200s' (Felkod %d)"
+ ukr "Ðе можу запиÑати файл '%-.200s' (помилка: %d)"
+ER_FILE_USED
+ cze "'%-.192s' je zam-BÄen proti zmÄ›nám"
+ dan "'%-.192s' er låst mod opdateringer"
+ nla "'%-.192s' is geblokeerd tegen veranderingen"
+ eng "'%-.192s' is locked against change"
+ jps "'%-.192s' ã¯ãƒ­ãƒƒã‚¯ã•ã‚Œã¦ã„ã¾ã™",
+ est "'%-.192s' on lukustatud muudatuste vastu"
+ fre "'%-.192s' est verrouillé contre les modifications"
+ ger "'%-.192s' ist für Änderungen gesperrt"
+ greek "'%-.192s' δεν επιτÏέπονται αλλαγές"
+ hun "'%-.192s' a valtoztatas ellen zarolva"
+ ita "'%-.192s' e` soggetto a lock contro i cambiamenti"
+ jpn "'%-.192s' ã¯ãƒ­ãƒƒã‚¯ã•ã‚Œã¦ã„ã¾ã™"
+ kor "'%-.192s'ê°€ 변경할 수 ì—†ë„ë¡ ìž ê²¨ìžˆì니다."
+ nor "'%-.192s' er låst mot oppdateringer"
+ norwegian-ny "'%-.192s' er låst mot oppdateringar"
+ pol "'%-.192s' jest zablokowany na wypadek zmian"
+ por "'%-.192s' está com travamento contra alterações"
+ rum "'%-.192s' este blocat pentry schimbari (loccked against change)"
+ rus "'%-.192s' заблокирован Ð´Ð»Ñ Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ð¹"
+ serbian "'%-.192s' je zakljuÄan za upis"
+ slo "'%-.192s' je zamknutý proti zmenám"
+ spa "'%-.192s' esta bloqueado contra cambios"
+ swe "'%-.192s' är låst mot användning"
+ ukr "'%-.192s' заблокований на внеÑÐµÐ½Ð½Ñ Ð·Ð¼Ñ–Ð½"
+ER_FILSORT_ABORT
+ cze "T-Břídění přerušeno"
+ dan "Sortering afbrudt"
+ nla "Sorteren afgebroken"
+ eng "Sort aborted"
+ jps "Sort 中断",
+ est "Sorteerimine katkestatud"
+ fre "Tri alphabétique abandonné"
+ ger "Sortiervorgang abgebrochen"
+ greek "Η διαδικασία ταξινόμισης ακυÏώθηκε"
+ hun "Sikertelen rendezes"
+ ita "Operazione di ordinamento abbandonata"
+ jpn "Sort 中断"
+ kor "소트가 중단ë˜ì—ˆìŠµë‹ˆë‹¤."
+ nor "Sortering avbrutt"
+ norwegian-ny "Sortering avbrote"
+ pol "Sortowanie przerwane"
+ por "Ordenação abortada"
+ rum "Sortare intrerupta"
+ rus "Сортировка прервана"
+ serbian "Sortiranje je prekinuto"
+ slo "Triedenie prerušené"
+ spa "Ordeancion cancelada"
+ swe "Sorteringen avbruten"
+ ukr "Ð¡Ð¾Ñ€Ñ‚ÑƒÐ²Ð°Ð½Ð½Ñ Ð¿ÐµÑ€ÐµÑ€Ð²Ð°Ð½Ð¾"
+ER_FORM_NOT_FOUND
+ cze "Pohled '%-.192s' pro '%-.192s' neexistuje"
+ dan "View '%-.192s' eksisterer ikke for '%-.192s'"
+ nla "View '%-.192s' bestaat niet voor '%-.192s'"
+ eng "View '%-.192s' doesn't exist for '%-.192s'"
+ jps "View '%-.192s' ㌠'%-.192s' ã«å®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“",
+ est "Vaade '%-.192s' ei eksisteeri '%-.192s' jaoks"
+ fre "La vue (View) '%-.192s' n'existe pas pour '%-.192s'"
+ ger "View '%-.192s' existiert für '%-.192s' nicht"
+ greek "Το View '%-.192s' δεν υπάÏχει για '%-.192s'"
+ hun "A(z) '%-.192s' nezet nem letezik a(z) '%-.192s'-hoz"
+ ita "La view '%-.192s' non esiste per '%-.192s'"
+ jpn "View '%-.192s' ㌠'%-.192s' ã«å®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“"
+ kor "ë·° '%-.192s'ê°€ '%-.192s'ì—서는 존재하지 ì•Šì니다."
+ nor "View '%-.192s' eksisterer ikke for '%-.192s'"
+ norwegian-ny "View '%-.192s' eksisterar ikkje for '%-.192s'"
+ pol "Widok '%-.192s' nie istnieje dla '%-.192s'"
+ por "Visão '%-.192s' não existe para '%-.192s'"
+ rum "View '%-.192s' nu exista pentru '%-.192s'"
+ rus "ПредÑтавление '%-.192s' не ÑущеÑтвует Ð´Ð»Ñ '%-.192s'"
+ serbian "View '%-.192s' ne postoji za '%-.192s'"
+ slo "Pohľad '%-.192s' neexistuje pre '%-.192s'"
+ spa "La vista '%-.192s' no existe para '%-.192s'"
+ swe "Formulär '%-.192s' finns inte i '%-.192s'"
+ ukr "ВиглÑд '%-.192s' не Ñ–Ñнує Ð´Ð»Ñ '%-.192s'"
+ER_GET_ERRNO
+ cze "Obsluha tabulky vr-Bátila chybu %d"
+ dan "Modtog fejl %d fra tabel håndteringen"
+ nla "Fout %d van tabel handler"
+ eng "Got error %d from storage engine"
+ est "Tabeli handler tagastas vea %d"
+ fre "Reçu l'erreur %d du handler de la table"
+ ger "Fehler %d (Speicher-Engine)"
+ greek "Ελήφθη μήνυμα λάθους %d από τον χειÏιστή πίνακα (table handler)"
+ hun "%d hibajelzes a tablakezelotol"
+ ita "Rilevato l'errore %d dal gestore delle tabelle"
+ jpn "Got error %d from table handler"
+ kor "í…Œì´ë¸” handlerì—ì„œ %d ì—러가 ë°œìƒ í•˜ì˜€ìŠµë‹ˆë‹¤."
+ nor "Mottok feil %d fra tabell håndterer"
+ norwegian-ny "Mottok feil %d fra tabell handterar"
+ pol "Otrzymano bł?d %d z obsługi tabeli"
+ por "Obteve erro %d no manipulador de tabelas"
+ rum "Eroarea %d obtinuta din handlerul tabelei"
+ rus "Получена ошибка %d от обработчика таблиц"
+ serbian "Handler tabela je vratio grešku %d"
+ slo "Obsluha tabuľky vrátila chybu %d"
+ spa "Error %d desde el manejador de la tabla"
+ swe "Fick felkod %d från databashanteraren"
+ ukr "Отримано помилку %d від деÑкриптора таблиці"
+ER_ILLEGAL_HA
+ cze "Obsluha tabulky '%-.192s' nem-Bá tento parametr"
+ dan "Denne mulighed eksisterer ikke for tabeltypen '%-.192s'"
+ nla "Tabel handler voor '%-.192s' heeft deze optie niet"
+ eng "Table storage engine for '%-.192s' doesn't have this option"
+ est "Tabeli '%-.192s' handler ei toeta antud operatsiooni"
+ fre "Le handler de la table '%-.192s' n'a pas cette option"
+ ger "Diese Option gibt es nicht (Speicher-Engine für '%-.192s')"
+ greek "Ο χειÏιστής πίνακα (table handler) για '%-.192s' δεν διαθέτει αυτή την επιλογή"
+ hun "A(z) '%-.192s' tablakezelonek nincs ilyen opcioja"
+ ita "Il gestore delle tabelle per '%-.192s' non ha questa opzione"
+ jpn "Table handler for '%-.192s' doesn't have this option"
+ kor "'%-.192s'ì˜ í…Œì´ë¸” handler는 ì´ëŸ¬í•œ ì˜µì…˜ì„ ì œê³µí•˜ì§€ ì•Šì니다."
+ nor "Tabell håndtereren for '%-.192s' har ikke denne muligheten"
+ norwegian-ny "Tabell håndteraren for '%-.192s' har ikkje denne moglegheita"
+ pol "Obsługa tabeli '%-.192s' nie posiada tej opcji"
+ por "Manipulador de tabela para '%-.192s' não tem esta opção"
+ rum "Handlerul tabelei pentru '%-.192s' nu are aceasta optiune"
+ rus "Обработчик таблицы '%-.192s' не поддерживает Ñту возможноÑÑ‚ÑŒ"
+ serbian "Handler tabela za '%-.192s' nema ovu opciju"
+ slo "Obsluha tabuľky '%-.192s' nemá tento parameter"
+ spa "El manejador de la tabla de '%-.192s' no tiene esta opcion"
+ swe "Tabellhanteraren for tabell '%-.192s' stödjer ej detta"
+ ukr "ДеÑкриптор таблиці '%-.192s' не має цієї влаÑтивоÑÑ‚Ñ–"
+ER_KEY_NOT_FOUND
+ cze "Nemohu naj-Bít záznam v '%-.192s'"
+ dan "Kan ikke finde posten i '%-.192s'"
+ nla "Kan record niet vinden in '%-.192s'"
+ eng "Can't find record in '%-.192s'"
+ jps "'%-.192s'ã®ãªã‹ã«ãƒ¬ã‚³ãƒ¼ãƒ‰ãŒè¦‹ä»˜ã‹ã‚Šã¾ã›ã‚“",
+ est "Ei suuda leida kirjet '%-.192s'-s"
+ fre "Ne peut trouver l'enregistrement dans '%-.192s'"
+ ger "Kann Datensatz in '%-.192s' nicht finden"
+ greek "ΑδÏνατη η ανεÏÏεση εγγÏαφής στο '%-.192s'"
+ hun "Nem talalhato a rekord '%-.192s'-ben"
+ ita "Impossibile trovare il record in '%-.192s'"
+ jpn "'%-.192s'ã®ãªã‹ã«ãƒ¬ã‚³ãƒ¼ãƒ‰ãŒè¦‹ä»˜ã‹ã‚Šã¾ã›ã‚“"
+ kor "'%-.192s'ì—ì„œ 레코드를 ì°¾ì„ ìˆ˜ ì—†ì니다."
+ nor "Kan ikke finne posten i '%-.192s'"
+ norwegian-ny "Kan ikkje finne posten i '%-.192s'"
+ pol "Nie można znaleĽć rekordu w '%-.192s'"
+ por "Não pode encontrar registro em '%-.192s'"
+ rum "Nu pot sa gasesc recordul in '%-.192s'"
+ rus "Ðевозможно найти запиÑÑŒ в '%-.192s'"
+ serbian "Ne mogu da pronađem slog u '%-.192s'"
+ slo "Nemôžem nájsť záznam v '%-.192s'"
+ spa "No puedo encontrar el registro en '%-.192s'"
+ swe "Hittar inte posten '%-.192s'"
+ ukr "Ðе можу запиÑати у '%-.192s'"
+ER_NOT_FORM_FILE
+ cze "Nespr-Bávná informace v souboru '%-.200s'"
+ dan "Forkert indhold i: '%-.200s'"
+ nla "Verkeerde info in file: '%-.200s'"
+ eng "Incorrect information in file: '%-.200s'"
+ jps "ファイル '%-.200s' ã® info ãŒé–“é•ã£ã¦ã„るよã†ã§ã™",
+ est "Vigane informatsioon failis '%-.200s'"
+ fre "Information erronnée dans le fichier: '%-.200s'"
+ ger "Falsche Information in Datei '%-.200s'"
+ greek "Λάθος πληÏοφοÏίες στο αÏχείο: '%-.200s'"
+ hun "Ervenytelen info a file-ban: '%-.200s'"
+ ita "Informazione errata nel file: '%-.200s'"
+ jpn "ファイル '%-.200s' ã® info ãŒé–“é•ã£ã¦ã„るよã†ã§ã™"
+ kor "í™”ì¼ì˜ 부정확한 ì •ë³´: '%-.200s'"
+ nor "Feil informasjon i filen: '%-.200s'"
+ norwegian-ny "Feil informasjon i fila: '%-.200s'"
+ pol "Niewła?ciwa informacja w pliku: '%-.200s'"
+ por "Informação incorreta no arquivo '%-.200s'"
+ rum "Informatie incorecta in fisierul: '%-.200s'"
+ rus "ÐÐµÐºÐ¾Ñ€Ñ€ÐµÐºÑ‚Ð½Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð² файле '%-.200s'"
+ serbian "Pogrešna informacija u file-u: '%-.200s'"
+ slo "Nesprávna informácia v súbore: '%-.200s'"
+ spa "Informacion erronea en el archivo: '%-.200s'"
+ swe "Felaktig fil: '%-.200s'"
+ ukr "Хибна Ñ–Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ñ–Ñ Ñƒ файлі: '%-.200s'"
+ER_NOT_KEYFILE
+ cze "Nespr-Bávný klÃ­Ä pro tabulku '%-.200s'; pokuste se ho opravit"
+ dan "Fejl i indeksfilen til tabellen '%-.200s'; prøv at reparere den"
+ nla "Verkeerde zoeksleutel file voor tabel: '%-.200s'; probeer het te repareren"
+ eng "Incorrect key file for table '%-.200s'; try to repair it"
+ jps "'%-.200s' テーブル㮠key file ãŒé–“é•ã£ã¦ã„るよã†ã§ã™. 修復をã—ã¦ãã ã•ã„",
+ est "Tabeli '%-.200s' võtmefail on vigane; proovi seda parandada"
+ fre "Index corrompu dans la table: '%-.200s'; essayez de le réparer"
+ ger "Fehlerhafte Index-Datei für Tabelle '%-.200s'; versuche zu reparieren"
+ greek "Λάθος αÏχείο ταξινόμισης (key file) για τον πίνακα: '%-.200s'; ΠαÏακαλώ, διοÏθώστε το!"
+ hun "Ervenytelen kulcsfile a tablahoz: '%-.200s'; probalja kijavitani!"
+ ita "File chiave errato per la tabella : '%-.200s'; prova a riparalo"
+ jpn "'%-.200s' テーブル㮠key file ãŒé–“é•ã£ã¦ã„るよã†ã§ã™. 修復をã—ã¦ãã ã•ã„"
+ kor "'%-.200s' í…Œì´ë¸”ì˜ ë¶€ì •í™•í•œ 키 존재. 수정하시오!"
+ nor "Tabellen '%-.200s' har feil i nøkkelfilen; forsøk å reparer den"
+ norwegian-ny "Tabellen '%-.200s' har feil i nykkelfila; prøv å reparere den"
+ pol "Niewła?ciwy plik kluczy dla tabeli: '%-.200s'; spróbuj go naprawić"
+ por "Arquivo de índice incorreto para tabela '%-.200s'; tente repará-lo"
+ rum "Cheia fisierului incorecta pentru tabela: '%-.200s'; incearca s-o repari"
+ rus "Ðекорректный индекÑный файл Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ‹: '%-.200s'. Попробуйте воÑÑтановить его"
+ serbian "Pogrešan key file za tabelu: '%-.200s'; probajte da ga ispravite"
+ slo "Nesprávny kÄ¾ÃºÄ pre tabuľku '%-.200s'; pokúste sa ho opraviÅ¥"
+ spa "Clave de archivo erronea para la tabla: '%-.200s'; intente repararlo"
+ swe "Fatalt fel vid hantering av register '%-.200s'; kör en reparation"
+ ukr "Хибний файл ключей Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ–: '%-.200s'; Спробуйте його відновити"
+ER_OLD_KEYFILE
+ cze "Star-Bý klíÄový soubor pro '%-.192s'; opravte ho."
+ dan "Gammel indeksfil for tabellen '%-.192s'; reparer den"
+ nla "Oude zoeksleutel file voor tabel '%-.192s'; repareer het!"
+ eng "Old key file for table '%-.192s'; repair it!"
+ jps "'%-.192s' テーブルã¯å¤ã„å½¢å¼ã® key file ã®ã‚ˆã†ã§ã™; 修復をã—ã¦ãã ã•ã„",
+ est "Tabeli '%-.192s' võtmefail on aegunud; paranda see!"
+ fre "Vieux fichier d'index pour la table '%-.192s'; réparez le!"
+ ger "Alte Index-Datei für Tabelle '%-.192s'. Bitte reparieren"
+ greek "Παλαιό αÏχείο ταξινόμισης (key file) για τον πίνακα '%-.192s'; ΠαÏακαλώ, διοÏθώστε το!"
+ hun "Regi kulcsfile a '%-.192s'tablahoz; probalja kijavitani!"
+ ita "File chiave vecchio per la tabella '%-.192s'; riparalo!"
+ jpn "'%-.192s' テーブルã¯å¤ã„å½¢å¼ã® key file ã®ã‚ˆã†ã§ã™; 修復をã—ã¦ãã ã•ã„"
+ kor "'%-.192s' í…Œì´ë¸”ì˜ ì´ì „ë²„ì ¼ì˜ í‚¤ 존재. 수정하시오!"
+ nor "Gammel nøkkelfil for tabellen '%-.192s'; reparer den!"
+ norwegian-ny "Gammel nykkelfil for tabellen '%-.192s'; reparer den!"
+ pol "Plik kluczy dla tabeli '%-.192s' jest starego typu; napraw go!"
+ por "Arquivo de índice desatualizado para tabela '%-.192s'; repare-o!"
+ rum "Cheia fisierului e veche pentru tabela '%-.192s'; repar-o!"
+ rus "Старый индекÑный файл Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ‹ '%-.192s'; отремонтируйте его!"
+ serbian "Zastareo key file za tabelu '%-.192s'; ispravite ga"
+ slo "Starý kľúÄový súbor pre '%-.192s'; opravte ho!"
+ spa "Clave de archivo antigua para la tabla '%-.192s'; reparelo!"
+ swe "Gammal nyckelfil '%-.192s'; reparera registret"
+ ukr "Старий файл ключей Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ– '%-.192s'; Відновіть його!"
+ER_OPEN_AS_READONLY
+ cze "'%-.192s' je jen pro -BÄtení"
+ dan "'%-.192s' er skrivebeskyttet"
+ nla "'%-.192s' is alleen leesbaar"
+ eng "Table '%-.192s' is read only"
+ jps "'%-.192s' ã¯èª­ã¿è¾¼ã¿å°‚用ã§ã™",
+ est "Tabel '%-.192s' on ainult lugemiseks"
+ fre "'%-.192s' est en lecture seulement"
+ ger "Tabelle '%-.192s' ist nur lesbar"
+ greek "'%-.192s' επιτÏέπεται μόνο η ανάγνωση"
+ hun "'%-.192s' irasvedett"
+ ita "'%-.192s' e` di sola lettura"
+ jpn "'%-.192s' ã¯èª­ã¿è¾¼ã¿å°‚用ã§ã™"
+ kor "í…Œì´ë¸” '%-.192s'는 ì½ê¸°ì „ìš© 입니다."
+ nor "'%-.192s' er skrivebeskyttet"
+ norwegian-ny "'%-.192s' er skrivetryggja"
+ pol "'%-.192s' jest tylko do odczytu"
+ por "Tabela '%-.192s' é somente para leitura"
+ rum "Tabela '%-.192s' e read-only"
+ rus "Таблица '%-.192s' предназначена только Ð´Ð»Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ"
+ serbian "Tabelu '%-.192s' je dozvoljeno samo Äitati"
+ slo "'%-.192s' is ÄítaÅ¥ only"
+ spa "'%-.192s' es de solo lectura"
+ swe "'%-.192s' är skyddad mot förändring"
+ ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' тільки Ð´Ð»Ñ Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ"
+ER_OUTOFMEMORY HY001 S1001
+ cze "M-Bálo paměti. Přestartujte daemona a zkuste znovu (je potřeba %d bytů)"
+ dan "Ikke mere hukommelse. Genstart serveren og prøv igen (mangler %d bytes)"
+ nla "Geen geheugen meer. Herstart server en probeer opnieuw (%d bytes nodig)"
+ eng "Out of memory; restart server and try again (needed %d bytes)"
+ jps "Out of memory. デーモンをリスタートã—ã¦ã¿ã¦ãã ã•ã„ (%d bytes å¿…è¦)",
+ est "Mälu sai otsa. Proovi MySQL uuesti käivitada (puudu jäi %d baiti)"
+ fre "Manque de mémoire. Redémarrez le démon et ré-essayez (%d octets nécessaires)"
+ ger "Kein Speicher vorhanden (%d Bytes benötigt). Bitte Server neu starten"
+ greek "Δεν υπάÏχει διαθέσιμη μνήμη. ΠÏοσπαθήστε πάλι, επανεκινώντας τη διαδικασία (demon) (χÏειάζονται %d bytes)"
+ hun "Nincs eleg memoria. Inditsa ujra a demont, es probalja ismet. (%d byte szukseges.)"
+ ita "Memoria esaurita. Fai ripartire il demone e riprova (richiesti %d bytes)"
+ jpn "Out of memory. デーモンをリスタートã—ã¦ã¿ã¦ãã ã•ã„ (%d bytes å¿…è¦)"
+ kor "Out of memory. ë°ëª¬ì„ 재 실행 후 다시 시작하시오 (needed %d bytes)"
+ nor "Ikke mer minne. Star på nytt tjenesten og prøv igjen (trengte %d byter)"
+ norwegian-ny "Ikkje meir minne. Start på nytt tenesten og prøv igjen (trengte %d bytar)"
+ pol "Zbyt mało pamięci. Uruchom ponownie demona i spróbuj ponownie (potrzeba %d bajtów)"
+ por "Sem memória. Reinicie o programa e tente novamente (necessita de %d bytes)"
+ rum "Out of memory. Porneste daemon-ul din nou si incearca inca o data (e nevoie de %d bytes)"
+ rus "ÐедоÑтаточно памÑти. ПерезапуÑтите Ñервер и попробуйте еще раз (нужно %d байт)"
+ serbian "Nema memorije. Restartujte MySQL server i probajte ponovo (potrebno je %d byte-ova)"
+ slo "Málo pamäti. Reštartujte daemona a skúste znova (je potrebných %d bytov)"
+ spa "Memoria insuficiente. Reinicie el demonio e intentelo otra vez (necesita %d bytes)"
+ swe "Oväntat slut på minnet, starta om programmet och försök på nytt (Behövde %d bytes)"
+ ukr "Брак пам'ÑÑ‚Ñ–. РеÑтартуйте Ñервер та Ñпробуйте знову (потрібно %d байтів)"
+ER_OUT_OF_SORTMEMORY HY001 S1001
+ cze "M-Bálo paměti pro třídění. Zvyšte velikost třídícího bufferu"
+ dan "Ikke mere sorteringshukommelse. Øg sorteringshukommelse (sort buffer size) for serveren"
+ nla "Geen geheugen om te sorteren. Verhoog de server sort buffer size"
+ eng "Out of sort memory; increase server sort buffer size"
+ jps "Out of sort memory. sort buffer size ãŒè¶³ã‚Šãªã„よã†ã§ã™.",
+ est "Mälu sai sorteerimisel otsa. Suurenda MySQL-i sorteerimispuhvrit"
+ fre "Manque de mémoire pour le tri. Augmentez-la."
+ ger "Kein Speicher zum Sortieren vorhanden. sort_buffer_size sollte im Server erhöht werden"
+ greek "Δεν υπάÏχει διαθέσιμη μνήμη για ταξινόμιση. Αυξήστε το sort buffer size για τη διαδικασία (demon)"
+ hun "Nincs eleg memoria a rendezeshez. Novelje a rendezo demon puffermeretet"
+ ita "Memoria per gli ordinamenti esaurita. Incrementare il 'sort_buffer' al demone"
+ jpn "Out of sort memory. sort buffer size ãŒè¶³ã‚Šãªã„よã†ã§ã™."
+ kor "Out of sort memory. daemon sort bufferì˜ í¬ê¸°ë¥¼ ì¦ê°€ì‹œí‚¤ì„¸ìš”"
+ nor "Ikke mer sorteringsminne. Øk sorteringsminnet (sort buffer size) for tjenesten"
+ norwegian-ny "Ikkje meir sorteringsminne. Auk sorteringsminnet (sorteringsbffer storleik) for tenesten"
+ pol "Zbyt mało pamięci dla sortowania. Zwiększ wielko?ć bufora demona dla sortowania"
+ por "Sem memória para ordenação. Aumente tamanho do 'buffer' de ordenação"
+ rum "Out of memory pentru sortare. Largeste marimea buffer-ului pentru sortare in daemon (sort buffer size)"
+ rus "ÐедоÑтаточно памÑти Ð´Ð»Ñ Ñортировки. Увеличьте размер буфера Ñортировки на Ñервере"
+ serbian "Nema memorije za sortiranje. Povećajte veliÄinu sort buffer-a MySQL server-u"
+ slo "Málo pamäti pre triedenie, zvýšte veľkosť triediaceho bufferu"
+ spa "Memoria de ordenacion insuficiente. Incremente el tamano del buffer de ordenacion"
+ swe "Sorteringsbufferten räcker inte till. Kontrollera startparametrarna"
+ ukr "Брак пам'ÑÑ‚Ñ– Ð´Ð»Ñ ÑортуваннÑ. Треба збільшити розмір буфера ÑÐ¾Ñ€Ñ‚ÑƒÐ²Ð°Ð½Ð½Ñ Ñƒ Ñервера"
+ER_UNEXPECTED_EOF
+ cze "Neo-BÄekávaný konec souboru pÅ™i Ätení '%-.192s' (chybový kód: %d)"
+ dan "Uventet afslutning på fil (eof) ved læsning af filen '%-.192s' (Fejlkode: %d)"
+ nla "Onverwachte eof gevonden tijdens het lezen van file '%-.192s' (Errcode: %d)"
+ eng "Unexpected EOF found when reading file '%-.192s' (errno: %d)"
+ jps "'%-.192s' ファイルを読ã¿è¾¼ã¿ä¸­ã« EOF ãŒäºˆæœŸã›ã¬æ‰€ã§ç¾ã‚Œã¾ã—ãŸ. (errno: %d)",
+ est "Ootamatu faililõpumärgend faili '%-.192s' lugemisel (veakood: %d)"
+ fre "Fin de fichier inattendue en lisant '%-.192s' (Errcode: %d)"
+ ger "Unerwartetes Ende beim Lesen der Datei '%-.192s' (Fehler: %d)"
+ greek "Κατά τη διάÏκεια της ανάγνωσης, βÏέθηκε απÏοσδόκητα το τέλος του αÏχείου '%-.192s' (κωδικός λάθους: %d)"
+ hun "Varatlan filevege-jel a '%-.192s'olvasasakor. (hibakod: %d)"
+ ita "Fine del file inaspettata durante la lettura del file '%-.192s' (errno: %d)"
+ jpn "'%-.192s' ファイルを読ã¿è¾¼ã¿ä¸­ã« EOF ãŒäºˆæœŸã›ã¬æ‰€ã§ç¾ã‚Œã¾ã—ãŸ. (errno: %d)"
+ kor "'%-.192s' í™”ì¼ì„ ì½ëŠ” ë„중 ìž˜ëª»ëœ eofì„ ë°œê²¬ (ì—러번호: %d)"
+ nor "Uventet slutt på fil (eof) ved lesing av filen '%-.192s' (Feilkode: %d)"
+ norwegian-ny "Uventa slutt på fil (eof) ved lesing av fila '%-.192s' (Feilkode: %d)"
+ pol "Nieoczekiwany 'eof' napotkany podczas czytania z pliku '%-.192s' (Kod błędu: %d)"
+ por "Encontrado fim de arquivo inesperado ao ler arquivo '%-.192s' (erro no. %d)"
+ rum "Sfirsit de fisier neasteptat in citirea fisierului '%-.192s' (errno: %d)"
+ rus "Ðеожиданный конец файла '%-.192s' (ошибка: %d)"
+ serbian "NeoÄekivani kraj pri Äitanju file-a '%-.192s' (errno: %d)"
+ slo "NeoÄakávaný koniec súboru pri Äítaní '%-.192s' (chybový kód: %d)"
+ spa "Inesperado fin de ficheroU mientras leiamos el archivo '%-.192s' (Error: %d)"
+ swe "Oväntat filslut vid läsning från '%-.192s' (Felkod: %d)"
+ ukr "Хибний кінець файлу '%-.192s' (помилка: %d)"
+ER_CON_COUNT_ERROR 08004
+ cze "P-Bříliš mnoho spojení"
+ dan "For mange forbindelser (connections)"
+ nla "Te veel verbindingen"
+ eng "Too many connections"
+ jps "接続ãŒå¤šã™ãŽã¾ã™",
+ est "Liiga palju samaaegseid ühendusi"
+ fre "Trop de connexions"
+ ger "Zu viele Verbindungen"
+ greek "ΥπάÏχουν πολλές συνδέσεις..."
+ hun "Tul sok kapcsolat"
+ ita "Troppe connessioni"
+ jpn "接続ãŒå¤šã™ãŽã¾ã™"
+ kor "너무 ë§Žì€ ì—°ê²°... max_connectionì„ ì¦ê°€ 시키시오..."
+ nor "For mange tilkoblinger (connections)"
+ norwegian-ny "For mange tilkoplingar (connections)"
+ pol "Zbyt wiele poł?czeń"
+ por "Excesso de conexões"
+ rum "Prea multe conectiuni"
+ rus "Слишком много Ñоединений"
+ serbian "Previše konekcija"
+ slo "Príliš mnoho spojení"
+ spa "Demasiadas conexiones"
+ swe "För många anslutningar"
+ ukr "Забагато з'єднань"
+ER_OUT_OF_RESOURCES
+ cze "M-Bálo prostoru/paměti pro thread"
+ dan "Udgået for tråde/hukommelse"
+ nla "Geen thread geheugen meer; controleer of mysqld of andere processen al het beschikbare geheugen gebruikt. Zo niet, dan moet u wellicht 'ulimit' gebruiken om mysqld toe te laten meer geheugen te benutten, of u kunt extra swap ruimte toevoegen"
+ eng "Out of memory; check if mysqld or some other process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space"
+ jps "Out of memory; mysqld ã‹ãã®ä»–ã®ãƒ—ロセスãŒãƒ¡ãƒ¢ãƒªãƒ¼ã‚’å…¨ã¦ä½¿ã£ã¦ã„ã‚‹ã‹ç¢ºèªã—ã¦ãã ã•ã„. メモリーを使ã„切ã£ã¦ã„ãªã„å ´åˆã€'ulimit' を設定ã—㦠mysqld ã®ãƒ¡ãƒ¢ãƒªãƒ¼ä½¿ç”¨é™ç•Œé‡ã‚’多ãã™ã‚‹ã‹ã€swap space を増やã—ã¦ã¿ã¦ãã ã•ã„",
+ est "Mälu sai otsa. Võimalik, et aitab swap-i lisamine või käsu 'ulimit' abil MySQL-le rohkema mälu kasutamise lubamine"
+ fre "Manque de 'threads'/mémoire"
+ ger "Kein Speicher mehr vorhanden. Prüfen Sie, ob mysqld oder ein anderer Prozess den gesamten Speicher verbraucht. Wenn nicht, sollten Sie mit 'ulimit' dafür sorgen, dass mysqld mehr Speicher benutzen darf, oder mehr Swap-Speicher einrichten"
+ greek "ΠÏόβλημα με τη διαθέσιμη μνήμη (Out of thread space/memory)"
+ hun "Elfogyott a thread-memoria"
+ ita "Fine dello spazio/memoria per i thread"
+ jpn "Out of memory; mysqld ã‹ãã®ä»–ã®ãƒ—ロセスãŒãƒ¡ãƒ¢ãƒªãƒ¼ã‚’å…¨ã¦ä½¿ã£ã¦ã„ã‚‹ã‹ç¢ºèªã—ã¦ãã ã•ã„. メモリーを使ã„切ã£ã¦ã„ãªã„å ´åˆã€'ulimit' を設定ã—㦠mysqld ã®ãƒ¡ãƒ¢ãƒªãƒ¼ä½¿ç”¨é™ç•Œé‡ã‚’多ãã™ã‚‹ã‹ã€swap space を増やã—ã¦ã¿ã¦ãã ã•ã„"
+# This message failed to convert from euc-kr, skipped
+ nor "Tomt for tråd plass/minne"
+ norwegian-ny "Tomt for tråd plass/minne"
+ pol "Zbyt mało miejsca/pamięci dla w?tku"
+ por "Sem memória. Verifique se o mysqld ou algum outro processo está usando toda memória disponível. Se não, você pode ter que usar 'ulimit' para permitir ao mysqld usar mais memória ou você pode adicionar mais área de 'swap'"
+ rum "Out of memory; Verifica daca mysqld sau vreun alt proces foloseste toate memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui mysqld sa foloseasca mai multa memorie ori adauga mai mult spatiu pentru swap (swap space)"
+ rus "ÐедоÑтаточно памÑти; удоÑтоверьтеÑÑŒ, что mysqld или какой-либо другой процеÑÑ Ð½Ðµ занимает вÑÑŽ доÑтупную памÑÑ‚ÑŒ. ЕÑли нет, то вы можете иÑпользовать ulimit, чтобы выделить Ð´Ð»Ñ mysqld больше памÑти, или увеличить объем файла подкачки"
+ serbian "Nema memorije; Proverite da li MySQL server ili neki drugi proces koristi svu slobodnu memoriju. (UNIX: Ako ne, probajte da upotrebite 'ulimit' komandu da biste dozvolili daemon-u da koristi više memorije ili probajte da dodate više swap memorije)"
+ slo "Málo miesta-pamäti pre vlákno"
+ spa "Memoria/espacio de tranpaso insuficiente"
+ swe "Fick slut på minnet. Kontrollera om mysqld eller någon annan process använder allt tillgängligt minne. Om inte, försök använda 'ulimit' eller allokera mera swap"
+ ukr "Брак пам'ÑÑ‚Ñ–; Перевірте чи mysqld або ÑкіÑÑŒ інші процеÑи викориÑтовують уÑÑŽ доÑтупну пам'ÑÑ‚ÑŒ. Як ні, то ви можете ÑкориÑтатиÑÑ 'ulimit', аби дозволити mysqld викориÑтовувати більше пам'ÑÑ‚Ñ– або ви можете додати більше міÑÑ†Ñ Ð¿Ñ–Ð´ Ñвап"
+ER_BAD_HOST_ERROR 08S01
+ cze "Nemohu zjistit jm-Béno stroje pro Vaši adresu"
+ dan "Kan ikke få værtsnavn for din adresse"
+ nla "Kan de hostname niet krijgen van uw adres"
+ eng "Can't get hostname for your address"
+ jps "ãã® address ã® hostname ãŒå¼•ã‘ã¾ã›ã‚“.",
+ est "Ei suuda lahendada IP aadressi masina nimeks"
+ fre "Ne peut obtenir de hostname pour votre adresse"
+ ger "Kann Hostnamen für diese Adresse nicht erhalten"
+ greek "Δεν έγινε γνωστό το hostname για την address σας"
+ hun "A gepnev nem allapithato meg a cimbol"
+ ita "Impossibile risalire al nome dell'host dall'indirizzo (risoluzione inversa)"
+ jpn "ãã® address ã® hostname ãŒå¼•ã‘ã¾ã›ã‚“."
+ kor "ë‹¹ì‹ ì˜ ì»´í“¨í„°ì˜ í˜¸ìŠ¤íŠ¸ì´ë¦„ì„ ì–»ì„ ìˆ˜ ì—†ì니다."
+ nor "Kan ikke få tak i vertsnavn for din adresse"
+ norwegian-ny "Kan ikkje få tak i vertsnavn for di adresse"
+ pol "Nie można otrzymać nazwy hosta dla twojego adresu"
+ por "Não pode obter nome do 'host' para seu endereço"
+ rum "Nu pot sa obtin hostname-ul adresei tale"
+ rus "Ðевозможно получить Ð¸Ð¼Ñ Ñ…Ð¾Ñта Ð´Ð»Ñ Ð²Ð°ÑˆÐµÐ³Ð¾ адреÑа"
+ serbian "Ne mogu da dobijem ime host-a za vašu IP adresu"
+ slo "Nemôžem zistiť meno hostiteľa pre vašu adresu"
+ spa "No puedo obtener el nombre de maquina de tu direccion"
+ swe "Kan inte hitta 'hostname' för din adress"
+ ukr "Ðе можу визначити ім'Ñ Ñ…Ð¾Ñту Ð´Ð»Ñ Ð²Ð°ÑˆÐ¾Ñ— адреÑи"
+ER_HANDSHAKE_ERROR 08S01
+ cze "Chyba p-Bři ustavování spojení"
+ dan "Forkert håndtryk (handshake)"
+ nla "Verkeerde handshake"
+ eng "Bad handshake"
+ est "Väär handshake"
+ fre "Mauvais 'handshake'"
+ ger "Ungültiger Handshake"
+ greek "Η αναγνώÏιση (handshake) δεν έγινε σωστά"
+ hun "A kapcsolatfelvetel nem sikerult (Bad handshake)"
+ ita "Negoziazione impossibile"
+ nor "Feil håndtrykk (handshake)"
+ norwegian-ny "Feil handtrykk (handshake)"
+ pol "ZÅ‚y uchwyt(handshake)"
+ por "Negociação de acesso falhou"
+ rum "Prost inceput de conectie (bad handshake)"
+ rus "Ðекорректное приветÑтвие"
+ serbian "LoÅ¡ poÄetak komunikacije (handshake)"
+ slo "Chyba pri nadväzovaní spojenia"
+ spa "Protocolo erroneo"
+ swe "Fel vid initiering av kommunikationen med klienten"
+ ukr "Ðевірна уÑтановка зв'Ñзку"
+ER_DBACCESS_DENIED_ERROR 42000
+ cze "P-Břístup pro uživatele '%-.48s'@'%-.64s' k databázi '%-.192s' není povolen"
+ dan "Adgang nægtet bruger: '%-.48s'@'%-.64s' til databasen '%-.192s'"
+ nla "Toegang geweigerd voor gebruiker: '%-.48s'@'%-.64s' naar database '%-.192s'"
+ eng "Access denied for user '%-.48s'@'%-.64s' to database '%-.192s'"
+ jps "ユーザー '%-.48s'@'%-.64s' ã® '%-.192s' データベースã¸ã®ã‚¢ã‚¯ã‚»ã‚¹ã‚’æ‹’å¦ã—ã¾ã™",
+ est "Ligipääs keelatud kasutajale '%-.48s'@'%-.64s' andmebaasile '%-.192s'"
+ fre "Accès refusé pour l'utilisateur: '%-.48s'@'@%-.64s'. Base '%-.192s'"
+ ger "Benutzer '%-.48s'@'%-.64s' hat keine Zugriffsberechtigung für Datenbank '%-.192s'"
+ greek "Δεν επιτέÏεται η Ï€Ïόσβαση στο χÏήστη: '%-.48s'@'%-.64s' στη βάση δεδομένων '%-.192s'"
+ hun "A(z) '%-.48s'@'%-.64s' felhasznalo szamara tiltott eleres az '%-.192s' adabazishoz."
+ ita "Accesso non consentito per l'utente: '%-.48s'@'%-.64s' al database '%-.192s'"
+ jpn "ユーザー '%-.48s'@'%-.64s' ã® '%-.192s' データベースã¸ã®ã‚¢ã‚¯ã‚»ã‚¹ã‚’æ‹’å¦ã—ã¾ã™"
+ kor "'%-.48s'@'%-.64s' 사용ìžëŠ” '%-.192s' ë°ì´íƒ€ë² ì´ìŠ¤ì— ì ‘ê·¼ì´ ê±°ë¶€ ë˜ì—ˆìŠµë‹ˆë‹¤."
+ nor "Tilgang nektet for bruker: '%-.48s'@'%-.64s' til databasen '%-.192s' nektet"
+ norwegian-ny "Tilgang ikkje tillate for brukar: '%-.48s'@'%-.64s' til databasen '%-.192s' nekta"
+ por "Acesso negado para o usuário '%-.48s'@'%-.64s' ao banco de dados '%-.192s'"
+ rum "Acces interzis pentru utilizatorul: '%-.48s'@'%-.64s' la baza de date '%-.192s'"
+ rus "Ð”Ð»Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ '%-.48s'@'%-.64s' доÑтуп к базе данных '%-.192s' закрыт"
+ serbian "Pristup je zabranjen korisniku '%-.48s'@'%-.64s' za bazu '%-.192s'"
+ slo "Zakázaný prístup pre užívateľa: '%-.48s'@'%-.64s' k databázi '%-.192s'"
+ spa "Acceso negado para usuario: '%-.48s'@'%-.64s' para la base de datos '%-.192s'"
+ swe "Användare '%-.48s'@'%-.64s' är ej berättigad att använda databasen %-.192s"
+ ukr "ДоÑтуп заборонено Ð´Ð»Ñ ÐºÐ¾Ñ€Ð¸Ñтувача: '%-.48s'@'%-.64s' до бази данних '%-.192s'"
+ER_ACCESS_DENIED_ERROR 28000
+ cze "P-Břístup pro uživatele '%-.48s'@'%-.64s' (s heslem %s)"
+ dan "Adgang nægtet bruger: '%-.48s'@'%-.64s' (Bruger adgangskode: %s)"
+ nla "Toegang geweigerd voor gebruiker: '%-.48s'@'%-.64s' (Wachtwoord gebruikt: %s)"
+ eng "Access denied for user '%-.48s'@'%-.64s' (using password: %s)"
+ jps "ユーザー '%-.48s'@'%-.64s' ã‚’æ‹’å¦ã—ã¾ã™.uUsing password: %s)",
+ est "Ligipääs keelatud kasutajale '%-.48s'@'%-.64s' (kasutab parooli: %s)"
+ fre "Accès refusé pour l'utilisateur: '%-.48s'@'@%-.64s' (mot de passe: %s)"
+ ger "Benutzer '%-.48s'@'%-.64s' hat keine Zugriffsberechtigung (verwendetes Passwort: %s)"
+ greek "Δεν επιτέÏεται η Ï€Ïόσβαση στο χÏήστη: '%-.48s'@'%-.64s' (χÏήση password: %s)"
+ hun "A(z) '%-.48s'@'%-.64s' felhasznalo szamara tiltott eleres. (Hasznalja a jelszot: %s)"
+ ita "Accesso non consentito per l'utente: '%-.48s'@'%-.64s' (Password: %s)"
+ jpn "ユーザー '%-.48s'@'%-.64s' ã‚’æ‹’å¦ã—ã¾ã™.uUsing password: %s)"
+ kor "'%-.48s'@'%-.64s' 사용ìžëŠ” ì ‘ê·¼ì´ ê±°ë¶€ ë˜ì—ˆìŠµë‹ˆë‹¤. (using password: %s)"
+ nor "Tilgang nektet for bruker: '%-.48s'@'%-.64s' (Bruker passord: %s)"
+ norwegian-ny "Tilgang ikke tillate for brukar: '%-.48s'@'%-.64s' (Brukar passord: %s)"
+ por "Acesso negado para o usuário '%-.48s'@'%-.64s' (senha usada: %s)"
+ rum "Acces interzis pentru utilizatorul: '%-.48s'@'%-.64s' (Folosind parola: %s)"
+ rus "ДоÑтуп закрыт Ð´Ð»Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ '%-.48s'@'%-.64s' (был иÑпользован пароль: %s)"
+ serbian "Pristup je zabranjen korisniku '%-.48s'@'%-.64s' (koristi lozinku: '%s')"
+ slo "Zakázaný prístup pre užívateľa: '%-.48s'@'%-.64s' (použitie hesla: %s)"
+ spa "Acceso negado para usuario: '%-.48s'@'%-.64s' (Usando clave: %s)"
+ swe "Användare '%-.48s'@'%-.64s' är ej berättigad att logga in (Använder lösen: %s)"
+ ukr "ДоÑтуп заборонено Ð´Ð»Ñ ÐºÐ¾Ñ€Ð¸Ñтувача: '%-.48s'@'%-.64s' (ВикориÑтано пароль: %s)"
+ER_NO_DB_ERROR 3D000
+ cze "Nebyla vybr-Bána žádná databáze"
+ dan "Ingen database valgt"
+ nla "Geen database geselecteerd"
+ eng "No database selected"
+ jps "データベースãŒé¸æŠžã•ã‚Œã¦ã„ã¾ã›ã‚“.",
+ est "Andmebaasi ei ole valitud"
+ fre "Aucune base n'a été sélectionnée"
+ ger "Keine Datenbank ausgewählt"
+ greek "Δεν επιλέχθηκε βάση δεδομένων"
+ hun "Nincs kivalasztott adatbazis"
+ ita "Nessun database selezionato"
+ jpn "データベースãŒé¸æŠžã•ã‚Œã¦ã„ã¾ã›ã‚“."
+ kor "ì„ íƒëœ ë°ì´íƒ€ë² ì´ìŠ¤ê°€ 없습니다."
+ nor "Ingen database valgt"
+ norwegian-ny "Ingen database vald"
+ pol "Nie wybrano żadnej bazy danych"
+ por "Nenhum banco de dados foi selecionado"
+ rum "Nici o baza de data nu a fost selectata inca"
+ rus "База данных не выбрана"
+ serbian "Ni jedna baza nije selektovana"
+ slo "Nebola vybraná databáza"
+ spa "Base de datos no seleccionada"
+ swe "Ingen databas i användning"
+ ukr "Базу данних не вибрано"
+ER_UNKNOWN_COM_ERROR 08S01
+ cze "Nezn-Bámý příkaz"
+ dan "Ukendt kommando"
+ nla "Onbekend commando"
+ eng "Unknown command"
+ jps "ãã®ã‚³ãƒžãƒ³ãƒ‰ã¯ä½•ï¼Ÿ",
+ est "Tundmatu käsk"
+ fre "Commande inconnue"
+ ger "Unbekannter Befehl"
+ greek "Αγνωστη εντολή"
+ hun "Ervenytelen parancs"
+ ita "Comando sconosciuto"
+ jpn "ãã®ã‚³ãƒžãƒ³ãƒ‰ã¯ä½•ï¼Ÿ"
+ kor "명령어가 뭔지 모르겠어요..."
+ nor "Ukjent kommando"
+ norwegian-ny "Ukjent kommando"
+ pol "Nieznana komenda"
+ por "Comando desconhecido"
+ rum "Comanda invalida"
+ rus "ÐеизвеÑÑ‚Ð½Ð°Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ð° коммуникационного протокола"
+ serbian "Nepoznata komanda"
+ slo "Neznámy príkaz"
+ spa "Comando desconocido"
+ swe "Okänt commando"
+ ukr "Ðевідома команда"
+ER_BAD_NULL_ERROR 23000
+ cze "Sloupec '%-.192s' nem-Bůže být null"
+ dan "Kolonne '%-.192s' kan ikke være NULL"
+ nla "Kolom '%-.192s' kan niet null zijn"
+ eng "Column '%-.192s' cannot be null"
+ jps "Column '%-.192s' 㯠null ã«ã¯ã§ããªã„ã®ã§ã™",
+ est "Tulp '%-.192s' ei saa omada nullväärtust"
+ fre "Le champ '%-.192s' ne peut être vide (null)"
+ ger "Feld '%-.192s' darf nicht NULL sein"
+ greek "Το πεδίο '%-.192s' δεν μποÏεί να είναι κενό (null)"
+ hun "A(z) '%-.192s' oszlop erteke nem lehet nulla"
+ ita "La colonna '%-.192s' non puo` essere nulla"
+ jpn "Column '%-.192s' 㯠null ã«ã¯ã§ããªã„ã®ã§ã™"
+ kor "칼럼 '%-.192s'는 ë„(Null)ì´ ë˜ë©´ 안ë©ë‹ˆë‹¤. "
+ nor "Kolonne '%-.192s' kan ikke vere null"
+ norwegian-ny "Kolonne '%-.192s' kan ikkje vere null"
+ pol "Kolumna '%-.192s' nie może być null"
+ por "Coluna '%-.192s' não pode ser vazia"
+ rum "Coloana '%-.192s' nu poate sa fie null"
+ rus "Столбец '%-.192s' не может принимать величину NULL"
+ serbian "Kolona '%-.192s' ne može biti NULL"
+ slo "Pole '%-.192s' nemôže byť null"
+ spa "La columna '%-.192s' no puede ser nula"
+ swe "Kolumn '%-.192s' får inte vara NULL"
+ ukr "Стовбець '%-.192s' не може бути нульовим"
+ER_BAD_DB_ERROR 42000
+ cze "Nezn-Bámá databáze '%-.192s'"
+ dan "Ukendt database '%-.192s'"
+ nla "Onbekende database '%-.192s'"
+ eng "Unknown database '%-.192s'"
+ jps "'%-.192s' ãªã‚“ã¦ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ã¯çŸ¥ã‚Šã¾ã›ã‚“.",
+ est "Tundmatu andmebaas '%-.192s'"
+ fre "Base '%-.192s' inconnue"
+ ger "Unbekannte Datenbank '%-.192s'"
+ greek "Αγνωστη βάση δεδομένων '%-.192s'"
+ hun "Ervenytelen adatbazis: '%-.192s'"
+ ita "Database '%-.192s' sconosciuto"
+ jpn "'%-.192s' ãªã‚“ã¦ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ã¯çŸ¥ã‚Šã¾ã›ã‚“."
+ kor "ë°ì´íƒ€ë² ì´ìŠ¤ '%-.192s'는 알수 ì—†ìŒ"
+ nor "Ukjent database '%-.192s'"
+ norwegian-ny "Ukjent database '%-.192s'"
+ pol "Nieznana baza danych '%-.192s'"
+ por "Banco de dados '%-.192s' desconhecido"
+ rum "Baza de data invalida '%-.192s'"
+ rus "ÐеизвеÑÑ‚Ð½Ð°Ñ Ð±Ð°Ð·Ð° данных '%-.192s'"
+ serbian "Nepoznata baza '%-.192s'"
+ slo "Neznáma databáza '%-.192s'"
+ spa "Base de datos desconocida '%-.192s'"
+ swe "Okänd databas: '%-.192s'"
+ ukr "Ðевідома база данних '%-.192s'"
+ER_TABLE_EXISTS_ERROR 42S01
+ cze "Tabulka '%-.192s' ji-Bž existuje"
+ dan "Tabellen '%-.192s' findes allerede"
+ nla "Tabel '%-.192s' bestaat al"
+ eng "Table '%-.192s' already exists"
+ jps "Table '%-.192s' ã¯æ—¢ã«ã‚ã‚Šã¾ã™",
+ est "Tabel '%-.192s' juba eksisteerib"
+ fre "La table '%-.192s' existe déjà"
+ ger "Tabelle '%-.192s' bereits vorhanden"
+ greek "Ο πίνακας '%-.192s' υπάÏχει ήδη"
+ hun "A(z) '%-.192s' tabla mar letezik"
+ ita "La tabella '%-.192s' esiste gia`"
+ jpn "Table '%-.192s' ã¯æ—¢ã«ã‚ã‚Šã¾ã™"
+ kor "í…Œì´ë¸” '%-.192s'는 ì´ë¯¸ 존재함"
+ nor "Tabellen '%-.192s' eksisterer allerede"
+ norwegian-ny "Tabellen '%-.192s' eksisterar allereide"
+ pol "Tabela '%-.192s' już istnieje"
+ por "Tabela '%-.192s' já existe"
+ rum "Tabela '%-.192s' exista deja"
+ rus "Таблица '%-.192s' уже ÑущеÑтвует"
+ serbian "Tabela '%-.192s' već postoji"
+ slo "Tabuľka '%-.192s' už existuje"
+ spa "La tabla '%-.192s' ya existe"
+ swe "Tabellen '%-.192s' finns redan"
+ ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' вже Ñ–Ñнує"
+ER_BAD_TABLE_ERROR 42S02
+ cze "Nezn-Bámá tabulka '%-.100s'"
+ dan "Ukendt tabel '%-.100s'"
+ nla "Onbekende tabel '%-.100s'"
+ eng "Unknown table '%-.100s'"
+ jps "table '%-.100s' ã¯ã‚ã‚Šã¾ã›ã‚“.",
+ est "Tundmatu tabel '%-.100s'"
+ fre "Table '%-.100s' inconnue"
+ ger "Unbekannte Tabelle '%-.100s'"
+ greek "Αγνωστος πίνακας '%-.100s'"
+ hun "Ervenytelen tabla: '%-.100s'"
+ ita "Tabella '%-.100s' sconosciuta"
+ jpn "table '%-.100s' ã¯ã‚ã‚Šã¾ã›ã‚“."
+ kor "í…Œì´ë¸” '%-.100s'는 알수 ì—†ìŒ"
+ nor "Ukjent tabell '%-.100s'"
+ norwegian-ny "Ukjent tabell '%-.100s'"
+ pol "Nieznana tabela '%-.100s'"
+ por "Tabela '%-.100s' desconhecida"
+ rum "Tabela '%-.100s' este invalida"
+ rus "ÐеизвеÑÑ‚Ð½Ð°Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ð° '%-.100s'"
+ serbian "Nepoznata tabela '%-.100s'"
+ slo "Neznáma tabuľka '%-.100s'"
+ spa "Tabla '%-.100s' desconocida"
+ swe "Okänd tabell '%-.100s'"
+ ukr "Ðевідома Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.100s'"
+ER_NON_UNIQ_ERROR 23000
+ cze "Sloupec '%-.192s' v %-.192s nen-Bí zcela jasný"
+ dan "Felt: '%-.192s' i tabel %-.192s er ikke entydigt"
+ nla "Kolom: '%-.192s' in %-.192s is niet eenduidig"
+ eng "Column '%-.192s' in %-.192s is ambiguous"
+ est "Väli '%-.192s' %-.192s-s ei ole ühene"
+ fre "Champ: '%-.192s' dans %-.192s est ambigu"
+ ger "Feld '%-.192s' in %-.192s ist nicht eindeutig"
+ greek "Το πεδίο: '%-.192s' σε %-.192s δεν έχει καθοÏιστεί"
+ hun "A(z) '%-.192s' oszlop %-.192s-ben ketertelmu"
+ ita "Colonna: '%-.192s' di %-.192s e` ambigua"
+ jpn "Column: '%-.192s' in %-.192s is ambiguous"
+ kor "칼럼: '%-.192s' in '%-.192s' ì´ ëª¨í˜¸í•¨"
+ nor "Felt: '%-.192s' i tabell %-.192s er ikke entydig"
+ norwegian-ny "Kolonne: '%-.192s' i tabell %-.192s er ikkje eintydig"
+ pol "Kolumna: '%-.192s' w %-.192s jest dwuznaczna"
+ por "Coluna '%-.192s' em '%-.192s' é ambígua"
+ rum "Coloana: '%-.192s' in %-.192s este ambigua"
+ rus "Столбец '%-.192s' в %-.192s задан неоднозначно"
+ serbian "Kolona '%-.192s' u %-.192s nije jedinstvena u kontekstu"
+ slo "Pole: '%-.192s' v %-.192s je nejasné"
+ spa "La columna: '%-.192s' en %-.192s es ambigua"
+ swe "Kolumn '%-.192s' i %-.192s är inte unik"
+ ukr "Стовбець '%-.192s' у %-.192s визначений неоднозначно"
+ER_SERVER_SHUTDOWN 08S01
+ cze "Prob-Bíhá ukonÄování práce serveru"
+ dan "Database nedlukning er i gang"
+ nla "Bezig met het stoppen van de server"
+ eng "Server shutdown in progress"
+ jps "Server を shutdown 中...",
+ est "Serveri seiskamine käib"
+ fre "Arrêt du serveur en cours"
+ ger "Der Server wird heruntergefahren"
+ greek "ΕναÏξη διαδικασίας αποσÏνδεσης του εξυπηÏετητή (server shutdown)"
+ hun "A szerver leallitasa folyamatban"
+ ita "Shutdown del server in corso"
+ jpn "Server を shutdown 中..."
+ kor "Server가 셧다운 중입니다."
+ nor "Database nedkobling er i gang"
+ norwegian-ny "Tenar nedkopling er i gang"
+ pol "Trwa kończenie działania serwera"
+ por "'Shutdown' do servidor em andamento"
+ rum "Terminarea serverului este in desfasurare"
+ rus "Сервер находитÑÑ Ð² процеÑÑе оÑтановки"
+ serbian "Gašenje servera je u toku"
+ slo "Prebieha ukonÄovanie práce servera"
+ spa "Desconexion de servidor en proceso"
+ swe "Servern går nu ned"
+ ukr "ЗавершуєтьÑÑ Ñ€Ð°Ð±Ð¾Ñ‚Ð° Ñервера"
+ER_BAD_FIELD_ERROR 42S22 S0022
+ cze "Nezn-Bámý sloupec '%-.192s' v %-.192s"
+ dan "Ukendt kolonne '%-.192s' i tabel %-.192s"
+ nla "Onbekende kolom '%-.192s' in %-.192s"
+ eng "Unknown column '%-.192s' in '%-.192s'"
+ jps "'%-.192s' column 㯠'%-.192s' ã«ã¯ã‚ã‚Šã¾ã›ã‚“.",
+ est "Tundmatu tulp '%-.192s' '%-.192s'-s"
+ fre "Champ '%-.192s' inconnu dans %-.192s"
+ ger "Unbekanntes Tabellenfeld '%-.192s' in %-.192s"
+ greek "Αγνωστο πεδίο '%-.192s' σε '%-.192s'"
+ hun "A(z) '%-.192s' oszlop ervenytelen '%-.192s'-ben"
+ ita "Colonna sconosciuta '%-.192s' in '%-.192s'"
+ jpn "'%-.192s' column 㯠'%-.192s' ã«ã¯ã‚ã‚Šã¾ã›ã‚“."
+ kor "Unknown 칼럼 '%-.192s' in '%-.192s'"
+ nor "Ukjent kolonne '%-.192s' i tabell %-.192s"
+ norwegian-ny "Ukjent felt '%-.192s' i tabell %-.192s"
+ pol "Nieznana kolumna '%-.192s' w %-.192s"
+ por "Coluna '%-.192s' desconhecida em '%-.192s'"
+ rum "Coloana invalida '%-.192s' in '%-.192s'"
+ rus "ÐеизвеÑтный Ñтолбец '%-.192s' в '%-.192s'"
+ serbian "Nepoznata kolona '%-.192s' u '%-.192s'"
+ slo "Neznáme pole '%-.192s' v '%-.192s'"
+ spa "La columna '%-.192s' en %-.192s es desconocida"
+ swe "Okänd kolumn '%-.192s' i %-.192s"
+ ukr "Ðевідомий Ñтовбець '%-.192s' у '%-.192s'"
+ER_WRONG_FIELD_WITH_GROUP 42000 S1009
+ cze "Pou-Bžité '%-.192s' nebylo v group by"
+ dan "Brugte '%-.192s' som ikke var i group by"
+ nla "Opdracht gebruikt '%-.192s' dat niet in de GROUP BY voorkomt"
+ eng "'%-.192s' isn't in GROUP BY"
+ jps "'%-.192s' isn't in GROUP BY",
+ est "'%-.192s' puudub GROUP BY klauslis"
+ fre "'%-.192s' n'est pas dans 'group by'"
+ ger "'%-.192s' ist nicht in GROUP BY vorhanden"
+ greek "ΧÏησιμοποιήθηκε '%-.192s' που δεν υπήÏχε στο group by"
+ hun "Used '%-.192s' with wasn't in group by"
+ ita "Usato '%-.192s' che non e` nel GROUP BY"
+ kor "'%-.192s'ì€ GROUP BYì†ì— ì—†ìŒ"
+ nor "Brukte '%-.192s' som ikke var i group by"
+ norwegian-ny "Brukte '%-.192s' som ikkje var i group by"
+ pol "Użyto '%-.192s' bez umieszczenia w group by"
+ por "'%-.192s' não está em 'GROUP BY'"
+ rum "'%-.192s' nu exista in clauza GROUP BY"
+ rus "'%-.192s' не приÑутÑтвует в GROUP BY"
+ serbian "Entitet '%-.192s' nije naveden u komandi 'GROUP BY'"
+ slo "Použité '%-.192s' nebolo v 'group by'"
+ spa "Usado '%-.192s' el cual no esta group by"
+ swe "'%-.192s' finns inte i GROUP BY"
+ ukr "'%-.192s' не є у GROUP BY"
+ER_WRONG_GROUP_FIELD 42000 S1009
+ cze "Nemohu pou-Bžít group na '%-.192s'"
+ dan "Kan ikke gruppere på '%-.192s'"
+ nla "Kan '%-.192s' niet groeperen"
+ eng "Can't group on '%-.192s'"
+ est "Ei saa grupeerida '%-.192s' järgi"
+ fre "Ne peut regrouper '%-.192s'"
+ ger "Gruppierung über '%-.192s' nicht möglich"
+ greek "ΑδÏνατη η ομαδοποίηση (group on) '%-.192s'"
+ hun "A group nem hasznalhato: '%-.192s'"
+ ita "Impossibile raggruppare per '%-.192s'"
+ kor "'%-.192s'를 그룹할 수 ì—†ìŒ"
+ nor "Kan ikke gruppere på '%-.192s'"
+ norwegian-ny "Kan ikkje gruppere på '%-.192s'"
+ pol "Nie można grupować po '%-.192s'"
+ por "Não pode agrupar em '%-.192s'"
+ rum "Nu pot sa grupez pe (group on) '%-.192s'"
+ rus "Ðевозможно произвеÑти группировку по '%-.192s'"
+ serbian "Ne mogu da grupišem po '%-.192s'"
+ slo "Nemôžem použiť 'group' na '%-.192s'"
+ spa "No puedo agrupar por '%-.192s'"
+ swe "Kan inte använda GROUP BY med '%-.192s'"
+ ukr "Ðе можу групувати по '%-.192s'"
+ER_WRONG_SUM_SELECT 42000 S1009
+ cze "P-Bříkaz obsahuje zároveň funkci sum a sloupce"
+ dan "Udtrykket har summer (sum) funktioner og kolonner i samme udtryk"
+ nla "Opdracht heeft totaliseer functies en kolommen in dezelfde opdracht"
+ eng "Statement has sum functions and columns in same statement"
+ est "Lauses on korraga nii tulbad kui summeerimisfunktsioonid"
+ fre "Vous demandez la fonction sum() et des champs dans la même commande"
+ ger "Die Verwendung von Summierungsfunktionen und Spalten im selben Befehl ist nicht erlaubt"
+ greek "Η διατÏπωση πεÏιέχει sum functions και columns στην ίδια διατÏπωση"
+ ita "Il comando ha una funzione SUM e una colonna non specificata nella GROUP BY"
+ kor "Statement ê°€ sumê¸°ëŠ¥ì„ ë™ìž‘중ì´ê³  ì¹¼ëŸ¼ë„ ë™ì¼í•œ statement입니다."
+ nor "Uttrykket har summer (sum) funksjoner og kolonner i samme uttrykk"
+ norwegian-ny "Uttrykket har summer (sum) funksjoner og kolonner i same uttrykk"
+ pol "Zapytanie ma funkcje sumuj?ce i kolumny w tym samym zapytaniu"
+ por "Cláusula contém funções de soma e colunas juntas"
+ rum "Comanda are functii suma si coloane in aceeasi comanda"
+ rus "Выражение Ñодержит групповые функции и Ñтолбцы, но не включает GROUP BY. Ркак вы умудрилиÑÑŒ получить Ñто Ñообщение об ошибке?"
+ serbian "Izraz ima 'SUM' agregatnu funkciju i kolone u isto vreme"
+ slo "Príkaz obsahuje zároveň funkciu 'sum' a poľa"
+ spa "El estamento tiene funciones de suma y columnas en el mismo estamento"
+ swe "Kommandot har både sum functions och enkla funktioner"
+ ukr "У виразі викориÑтано підÑумовуючі функції порÑд з іменами Ñтовбців"
+ER_WRONG_VALUE_COUNT 21S01
+ cze "Po-BÄet sloupců neodpovídá zadané hodnotÄ›"
+ dan "Kolonne tæller stemmer ikke med antallet af værdier"
+ nla "Het aantal kolommen komt niet overeen met het aantal opgegeven waardes"
+ eng "Column count doesn't match value count"
+ est "Tulpade arv erineb väärtuste arvust"
+ ger "Die Anzahl der Spalten entspricht nicht der Anzahl der Werte"
+ greek "Το Column count δεν ταιÏιάζει με το value count"
+ hun "Az oszlopban levo ertek nem egyezik meg a szamitott ertekkel"
+ ita "Il numero delle colonne non e` uguale al numero dei valori"
+ kor "ì¹¼ëŸ¼ì˜ ì¹´ìš´íŠ¸ê°€ ê°’ì˜ ì¹´ìš´íŠ¸ì™€ ì¼ì¹˜í•˜ì§€ 않습니다."
+ nor "Felt telling stemmer verdi telling"
+ norwegian-ny "Kolonne telling stemmer verdi telling"
+ pol "Liczba kolumn nie odpowiada liczbie warto?ci"
+ por "Contagem de colunas não confere com a contagem de valores"
+ rum "Numarul de coloane nu este acelasi cu numarul valoarei"
+ rus "КоличеÑтво Ñтолбцов не Ñовпадает Ñ ÐºÐ¾Ð»Ð¸Ñ‡ÐµÑтвом значений"
+ serbian "Broj kolona ne odgovara broju vrednosti"
+ slo "PoÄet polí nezodpovedá zadanej hodnote"
+ spa "La columna con count no tiene valores para contar"
+ swe "Antalet kolumner motsvarar inte antalet värden"
+ ukr "КількіÑÑ‚ÑŒ Ñтовбців не Ñпівпадає з кількіÑÑ‚ÑŽ значень"
+ER_TOO_LONG_IDENT 42000 S1009
+ cze "Jm-Béno identifikátoru '%-.100s' je příliš dlouhé"
+ dan "Navnet '%-.100s' er for langt"
+ nla "Naam voor herkenning '%-.100s' is te lang"
+ eng "Identifier name '%-.100s' is too long"
+ jps "Identifier name '%-.100s' ã¯é•·ã™ãŽã¾ã™",
+ est "Identifikaatori '%-.100s' nimi on liiga pikk"
+ fre "Le nom de l'identificateur '%-.100s' est trop long"
+ ger "Name des Bezeichners '%-.100s' ist zu lang"
+ greek "Το identifier name '%-.100s' είναι Ï€Î¿Î»Ï Î¼ÎµÎ³Î¬Î»Î¿"
+ hun "A(z) '%-.100s' azonositonev tul hosszu."
+ ita "Il nome dell'identificatore '%-.100s' e` troppo lungo"
+ jpn "Identifier name '%-.100s' ã¯é•·ã™ãŽã¾ã™"
+ kor "Identifier '%-.100s'는 너무 길군요."
+ nor "Identifikator '%-.100s' er for lang"
+ norwegian-ny "Identifikator '%-.100s' er for lang"
+ pol "Nazwa identyfikatora '%-.100s' jest zbyt długa"
+ por "Nome identificador '%-.100s' é longo demais"
+ rum "Numele indentificatorului '%-.100s' este prea lung"
+ rus "Слишком длинный идентификатор '%-.100s'"
+ serbian "Ime '%-.100s' je predugaÄko"
+ slo "Meno identifikátora '%-.100s' je príliš dlhé"
+ spa "El nombre del identificador '%-.100s' es demasiado grande"
+ swe "Kolumnnamn '%-.100s' är för långt"
+ ukr "Ім'Ñ Ñ–Ð´ÐµÐ½Ñ‚Ð¸Ñ„Ñ–ÐºÐ°Ñ‚Ð¾Ñ€Ð° '%-.100s' задовге"
+ER_DUP_FIELDNAME 42S21 S1009
+ cze "Zdvojen-Bé jméno sloupce '%-.192s'"
+ dan "Feltnavnet '%-.192s' findes allerede"
+ nla "Dubbele kolom naam '%-.192s'"
+ eng "Duplicate column name '%-.192s'"
+ jps "'%-.192s' ã¨ã„ㆠcolumn åã¯é‡è¤‡ã—ã¦ã¾ã™",
+ est "Kattuv tulba nimi '%-.192s'"
+ fre "Nom du champ '%-.192s' déjà utilisé"
+ ger "Doppelter Spaltenname: '%-.192s'"
+ greek "Επανάληψη column name '%-.192s'"
+ hun "Duplikalt oszlopazonosito: '%-.192s'"
+ ita "Nome colonna duplicato '%-.192s'"
+ jpn "'%-.192s' ã¨ã„ㆠcolumn åã¯é‡è¤‡ã—ã¦ã¾ã™"
+ kor "ì¤‘ë³µëœ ì¹¼ëŸ¼ ì´ë¦„: '%-.192s'"
+ nor "Feltnavnet '%-.192s' eksisterte fra før"
+ norwegian-ny "Feltnamnet '%-.192s' eksisterte frå før"
+ pol "Powtórzona nazwa kolumny '%-.192s'"
+ por "Nome da coluna '%-.192s' duplicado"
+ rum "Numele coloanei '%-.192s' e duplicat"
+ rus "ДублирующееÑÑ Ð¸Ð¼Ñ Ñтолбца '%-.192s'"
+ serbian "Duplirano ime kolone '%-.192s'"
+ slo "Opakované meno poľa '%-.192s'"
+ spa "Nombre de columna duplicado '%-.192s'"
+ swe "Kolumnnamn '%-.192s finns flera gånger"
+ ukr "Дублююче ім'Ñ ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ '%-.192s'"
+ER_DUP_KEYNAME 42000 S1009
+ cze "Zdvojen-Bé jméno klíÄe '%-.192s'"
+ dan "Indeksnavnet '%-.192s' findes allerede"
+ nla "Dubbele zoeksleutel naam '%-.192s'"
+ eng "Duplicate key name '%-.192s'"
+ jps "'%-.192s' ã¨ã„ㆠkey ã®åå‰ã¯é‡è¤‡ã—ã¦ã„ã¾ã™",
+ est "Kattuv võtme nimi '%-.192s'"
+ fre "Nom de clef '%-.192s' déjà utilisé"
+ ger "Doppelter Name für Schlüssel vorhanden: '%-.192s'"
+ greek "Επανάληψη key name '%-.192s'"
+ hun "Duplikalt kulcsazonosito: '%-.192s'"
+ ita "Nome chiave duplicato '%-.192s'"
+ jpn "'%-.192s' ã¨ã„ㆠkey ã®åå‰ã¯é‡è¤‡ã—ã¦ã„ã¾ã™"
+ kor "ì¤‘ë³µëœ í‚¤ ì´ë¦„ : '%-.192s'"
+ nor "Nøkkelnavnet '%-.192s' eksisterte fra før"
+ norwegian-ny "Nøkkelnamnet '%-.192s' eksisterte frå før"
+ pol "Powtórzony nazwa klucza '%-.192s'"
+ por "Nome da chave '%-.192s' duplicado"
+ rum "Numele cheiei '%-.192s' e duplicat"
+ rus "ДублирующееÑÑ Ð¸Ð¼Ñ ÐºÐ»ÑŽÑ‡Ð° '%-.192s'"
+ serbian "Duplirano ime kljuÄa '%-.192s'"
+ slo "Opakované meno kľúÄa '%-.192s'"
+ spa "Nombre de clave duplicado '%-.192s'"
+ swe "Nyckelnamn '%-.192s' finns flera gånger"
+ ukr "Дублююче ім'Ñ ÐºÐ»ÑŽÑ‡Ð° '%-.192s'"
+# When using this error code, please use ER(ER_DUP_ENTRY_WITH_KEY_NAME)
+# for the message string. See, for example, code in handler.cc.
+ER_DUP_ENTRY 23000 S1009
+ cze "Zdvojen-Bý klÃ­Ä '%-.192s' (Äíslo klíÄe %d)"
+ dan "Ens værdier '%-.192s' for indeks %d"
+ nla "Dubbele ingang '%-.192s' voor zoeksleutel %d"
+ eng "Duplicate entry '%-.192s' for key %d"
+ jps "'%-.192s' 㯠key %d ã«ãŠã„ã¦é‡è¤‡ã—ã¦ã„ã¾ã™",
+ est "Kattuv väärtus '%-.192s' võtmele %d"
+ fre "Duplicata du champ '%-.192s' pour la clef %d"
+ ger "Doppelter Eintrag '%-.192s' für Schlüssel %d"
+ greek "Διπλή εγγÏαφή '%-.192s' για το κλειδί %d"
+ hun "Duplikalt bejegyzes '%-.192s' a %d kulcs szerint."
+ ita "Valore duplicato '%-.192s' per la chiave %d"
+ jpn "'%-.192s' 㯠key %d ã«ãŠã„ã¦é‡è¤‡ã—ã¦ã„ã¾ã™"
+ kor "ì¤‘ë³µëœ ìž…ë ¥ ê°’ '%-.192s': key %d"
+ nor "Like verdier '%-.192s' for nøkkel %d"
+ norwegian-ny "Like verdiar '%-.192s' for nykkel %d"
+ pol "Powtórzone wyst?pienie '%-.192s' dla klucza %d"
+ por "Entrada '%-.192s' duplicada para a chave %d"
+ rum "Cimpul '%-.192s' e duplicat pentru cheia %d"
+ rus "ДублирующаÑÑÑ Ð·Ð°Ð¿Ð¸ÑÑŒ '%-.192s' по ключу %d"
+ serbian "Dupliran unos '%-.192s' za kljuÄ '%d'"
+ slo "Opakovaný kÄ¾ÃºÄ '%-.192s' (Äíslo kľúÄa %d)"
+ spa "Entrada duplicada '%-.192s' para la clave %d"
+ swe "Dubbel nyckel '%-.192s' för nyckel %d"
+ ukr "Дублюючий Ð·Ð°Ð¿Ð¸Ñ '%-.192s' Ð´Ð»Ñ ÐºÐ»ÑŽÑ‡Ð° %d"
+ER_WRONG_FIELD_SPEC 42000 S1009
+ cze "Chybn-Bá specifikace sloupce '%-.192s'"
+ dan "Forkert kolonnespecifikaton for felt '%-.192s'"
+ nla "Verkeerde kolom specificatie voor kolom '%-.192s'"
+ eng "Incorrect column specifier for column '%-.192s'"
+ est "Vigane tulba kirjeldus tulbale '%-.192s'"
+ fre "Mauvais paramètre de champ pour le champ '%-.192s'"
+ ger "Falsche Spezifikation für Feld '%-.192s'"
+ greek "Εσφαλμένο column specifier για το πεδίο '%-.192s'"
+ hun "Rossz oszlopazonosito: '%-.192s'"
+ ita "Specifica errata per la colonna '%-.192s'"
+ kor "칼럼 '%-.192s'ì˜ ë¶€ì •í™•í•œ 칼럼 ì •ì˜ìž"
+ nor "Feil kolonne spesifikator for felt '%-.192s'"
+ norwegian-ny "Feil kolonne spesifikator for kolonne '%-.192s'"
+ pol "Błędna specyfikacja kolumny dla kolumny '%-.192s'"
+ por "Especificador de coluna incorreto para a coluna '%-.192s'"
+ rum "Specificandul coloanei '%-.192s' este incorect"
+ rus "Ðекорректный определитель Ñтолбца Ð´Ð»Ñ Ñтолбца '%-.192s'"
+ serbian "Pogrešan naziv kolone za kolonu '%-.192s'"
+ slo "Chyba v špecifikácii poľa '%-.192s'"
+ spa "Especificador de columna erroneo para la columna '%-.192s'"
+ swe "Felaktigt kolumntyp för kolumn '%-.192s'"
+ ukr "Ðевірний Ñпецифікатор ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ '%-.192s'"
+ER_PARSE_ERROR 42000 s1009
+ cze "%s bl-Bízko '%-.80s' na řádku %d"
+ dan "%s nær '%-.80s' på linje %d"
+ nla "%s bij '%-.80s' in regel %d"
+ eng "%s near '%-.80s' at line %d"
+ jps "%s : '%-.80s' 付近 : %d 行目",
+ est "%s '%-.80s' ligidal real %d"
+ fre "%s près de '%-.80s' à la ligne %d"
+ ger "%s bei '%-.80s' in Zeile %d"
+ greek "%s πλησίον '%-.80s' στη γÏαμμή %d"
+ hun "A %s a '%-.80s'-hez kozeli a %d sorban"
+ ita "%s vicino a '%-.80s' linea %d"
+ jpn "%s : '%-.80s' 付近 : %d 行目"
+ kor "'%s' ì—러 ê°™ì니다. ('%-.80s' 명령어 ë¼ì¸ %d)"
+ nor "%s nær '%-.80s' på linje %d"
+ norwegian-ny "%s attmed '%-.80s' på line %d"
+ pol "%s obok '%-.80s' w linii %d"
+ por "%s próximo a '%-.80s' na linha %d"
+ rum "%s linga '%-.80s' pe linia %d"
+ rus "%s около '%-.80s' на Ñтроке %d"
+ serbian "'%s' u iskazu '%-.80s' na liniji %d"
+ slo "%s blízko '%-.80s' na riadku %d"
+ spa "%s cerca '%-.80s' en la linea %d"
+ swe "%s nära '%-.80s' på rad %d"
+ ukr "%s Ð±Ñ–Ð»Ñ '%-.80s' в Ñтроці %d"
+ER_EMPTY_QUERY 42000
+ cze "V-Býsledek dotazu je prázdný"
+ dan "Forespørgsel var tom"
+ nla "Query was leeg"
+ eng "Query was empty"
+ jps "Query ãŒç©ºã§ã™.",
+ est "Tühi päring"
+ fre "Query est vide"
+ ger "Leere Abfrage"
+ greek "Το εÏώτημα (query) που θέσατε ήταν κενό"
+ hun "Ures lekerdezes."
+ ita "La query e` vuota"
+ jpn "Query ãŒç©ºã§ã™."
+ kor "쿼리결과가 없습니다."
+ nor "Forespørsel var tom"
+ norwegian-ny "Førespurnad var tom"
+ pol "Zapytanie było puste"
+ por "Consulta (query) estava vazia"
+ rum "Query-ul a fost gol"
+ rus "Ð—Ð°Ð¿Ñ€Ð¾Ñ Ð¾ÐºÐ°Ð·Ð°Ð»ÑÑ Ð¿ÑƒÑтым"
+ serbian "Upit je bio prazan"
+ slo "Výsledok požiadavky bol prázdny"
+ spa "La query estaba vacia"
+ swe "Frågan var tom"
+ ukr "ПуÑтий запит"
+ER_NONUNIQ_TABLE 42000 S1009
+ cze "Nejednozna-BÄná tabulka/alias: '%-.192s'"
+ dan "Tabellen/aliaset: '%-.192s' er ikke unikt"
+ nla "Niet unieke waarde tabel/alias: '%-.192s'"
+ eng "Not unique table/alias: '%-.192s'"
+ jps "'%-.192s' ã¯ä¸€æ„ã® table/alias åã§ã¯ã‚ã‚Šã¾ã›ã‚“",
+ est "Ei ole unikaalne tabel/alias '%-.192s'"
+ fre "Table/alias: '%-.192s' non unique"
+ ger "Tabellenname/Alias '%-.192s' nicht eindeutig"
+ greek "ΑδÏνατη η ανεÏÏεση unique table/alias: '%-.192s'"
+ hun "Nem egyedi tabla/alias: '%-.192s'"
+ ita "Tabella/alias non unico: '%-.192s'"
+ jpn "'%-.192s' ã¯ä¸€æ„ã® table/alias åã§ã¯ã‚ã‚Šã¾ã›ã‚“"
+ kor "Unique 하지 ì•Šì€ í…Œì´ë¸”/alias: '%-.192s'"
+ nor "Ikke unikt tabell/alias: '%-.192s'"
+ norwegian-ny "Ikkje unikt tabell/alias: '%-.192s'"
+ pol "Tabela/alias nie s? unikalne: '%-.192s'"
+ por "Tabela/alias '%-.192s' não única"
+ rum "Tabela/alias: '%-.192s' nu este unic"
+ rus "ПовторÑющаÑÑÑ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ð°/пÑевдоним '%-.192s'"
+ serbian "Tabela ili alias nisu bili jedinstveni: '%-.192s'"
+ slo "Nie jednoznaÄná tabuľka/alias: '%-.192s'"
+ spa "Tabla/alias: '%-.192s' es no unica"
+ swe "Icke unikt tabell/alias: '%-.192s'"
+ ukr "Ðеунікальна таблицÑ/пÑевдонім: '%-.192s'"
+ER_INVALID_DEFAULT 42000 S1009
+ cze "Chybn-Bá defaultní hodnota pro '%-.192s'"
+ dan "Ugyldig standardværdi for '%-.192s'"
+ nla "Foutieve standaard waarde voor '%-.192s'"
+ eng "Invalid default value for '%-.192s'"
+ est "Vigane vaikeväärtus '%-.192s' jaoks"
+ fre "Valeur par défaut invalide pour '%-.192s'"
+ ger "Fehlerhafter Vorgabewert (DEFAULT) für '%-.192s'"
+ greek "Εσφαλμένη Ï€ÏοκαθοÏισμένη τιμή (default value) για '%-.192s'"
+ hun "Ervenytelen ertek: '%-.192s'"
+ ita "Valore di default non valido per '%-.192s'"
+ kor "'%-.192s'ì˜ ìœ íš¨í•˜ì§€ 못한 ë””í´íŠ¸ ê°’ì„ ì‚¬ìš©í•˜ì…¨ìŠµë‹ˆë‹¤."
+ nor "Ugyldig standardverdi for '%-.192s'"
+ norwegian-ny "Ugyldig standardverdi for '%-.192s'"
+ pol "Niewła?ciwa warto?ć domy?lna dla '%-.192s'"
+ por "Valor padrão (default) inválido para '%-.192s'"
+ rum "Valoarea de default este invalida pentru '%-.192s'"
+ rus "Ðекорректное значение по умолчанию Ð´Ð»Ñ '%-.192s'"
+ serbian "Loša default vrednost za '%-.192s'"
+ slo "Chybná implicitná hodnota pre '%-.192s'"
+ spa "Valor por defecto invalido para '%-.192s'"
+ swe "Ogiltigt DEFAULT värde för '%-.192s'"
+ ukr "Ðевірне Ð·Ð½Ð°Ñ‡ÐµÐ½Ð½Ñ Ð¿Ð¾ замовчуванню Ð´Ð»Ñ '%-.192s'"
+ER_MULTIPLE_PRI_KEY 42000 S1009
+ cze "Definov-Báno více primárních klíÄů"
+ dan "Flere primærnøgler specificeret"
+ nla "Meerdere primaire zoeksleutels gedefinieerd"
+ eng "Multiple primary key defined"
+ jps "複数㮠primary key ãŒå®šç¾©ã•ã‚Œã¾ã—ãŸ",
+ est "Mitut primaarset võtit ei saa olla"
+ fre "Plusieurs clefs primaires définies"
+ ger "Mehrere Primärschlüssel (PRIMARY KEY) definiert"
+ greek "ΠεÏισσότεÏα από ένα primary key οÏίστηκαν"
+ hun "Tobbszoros elsodleges kulcs definialas."
+ ita "Definite piu` chiave primarie"
+ jpn "複数㮠primary key ãŒå®šç¾©ã•ã‚Œã¾ã—ãŸ"
+ kor "Multiple primary keyê°€ ì •ì˜ë˜ì–´ 있슴"
+ nor "Fleire primærnøkle spesifisert"
+ norwegian-ny "Fleire primærnyklar spesifisert"
+ pol "Zdefiniowano wiele kluczy podstawowych"
+ por "Definida mais de uma chave primária"
+ rum "Chei primare definite de mai multe ori"
+ rus "Указано неÑколько первичных ключей"
+ serbian "Definisani viÅ¡estruki primarni kljuÄevi"
+ slo "Zadefinovaných viac primárnych kľúÄov"
+ spa "Multiples claves primarias definidas"
+ swe "Flera PRIMARY KEY använda"
+ ukr "Первинного ключа визначено неодноразово"
+ER_TOO_MANY_KEYS 42000 S1009
+ cze "Zad-Báno příliÅ¡ mnoho klíÄů, je povoleno nejvíce %d klíÄů"
+ dan "For mange nøgler specificeret. Kun %d nøgler må bruges"
+ nla "Teveel zoeksleutels gedefinieerd. Maximaal zijn %d zoeksleutels toegestaan"
+ eng "Too many keys specified; max %d keys allowed"
+ jps "key ã®æŒ‡å®šãŒå¤šã™ãŽã¾ã™. key ã¯æœ€å¤§ %d ã¾ã§ã§ã™",
+ est "Liiga palju võtmeid. Maksimaalselt võib olla %d võtit"
+ fre "Trop de clefs sont définies. Maximum de %d clefs alloué"
+ ger "Zu viele Schlüssel definiert. Maximal %d Schlüssel erlaubt"
+ greek "ΠάÏα πολλά key οÏίσθηκαν. Το Ï€Î¿Î»Ï %d επιτÏέπονται"
+ hun "Tul sok kulcs. Maximum %d kulcs engedelyezett."
+ ita "Troppe chiavi. Sono ammesse max %d chiavi"
+ jpn "key ã®æŒ‡å®šãŒå¤šã™ãŽã¾ã™. key ã¯æœ€å¤§ %d ã¾ã§ã§ã™"
+ kor "너무 ë§Žì€ í‚¤ê°€ ì •ì˜ë˜ì–´ 있ì니다.. 최대 %dì˜ í‚¤ê°€ 가능함"
+ nor "For mange nøkler spesifisert. Maks %d nøkler tillatt"
+ norwegian-ny "For mange nykler spesifisert. Maks %d nyklar tillatt"
+ pol "Okre?lono zbyt wiele kluczy. Dostępnych jest maksymalnie %d kluczy"
+ por "Especificadas chaves demais. O máximo permitido são %d chaves"
+ rum "Prea multe chei. Numarul de chei maxim este %d"
+ rus "Указано Ñлишком много ключей. РазрешаетÑÑ ÑƒÐºÐ°Ð·Ñ‹Ð²Ð°Ñ‚ÑŒ не более %d ключей"
+ serbian "Navedeno je previÅ¡e kljuÄeva. Maksimum %d kljuÄeva je dozvoljeno"
+ slo "Zadaných ríliÅ¡ veľa kľúÄov. Najviac %d kľúÄov je povolených"
+ spa "Demasiadas claves primarias declaradas. Un maximo de %d claves son permitidas"
+ swe "För många nycklar använda. Man får ha högst %d nycklar"
+ ukr "Забагато ключів зазначено. Дозволено не більше %d ключів"
+ER_TOO_MANY_KEY_PARTS 42000 S1009
+ cze "Zad-Báno příliÅ¡ mnoho Äást klíÄů, je povoleno nejvíce %d Äástí"
+ dan "For mange nøgledele specificeret. Kun %d dele må bruges"
+ nla "Teveel zoeksleutel onderdelen gespecificeerd. Maximaal %d onderdelen toegestaan"
+ eng "Too many key parts specified; max %d parts allowed"
+ est "Võti koosneb liiga paljudest osadest. Maksimaalselt võib olla %d osa"
+ fre "Trop de parties specifiées dans la clef. Maximum de %d parties"
+ ger "Zu viele Teilschlüssel definiert. Maximal %d Teilschlüssel erlaubt"
+ greek "ΠάÏα πολλά key parts οÏίσθηκαν. Το Ï€Î¿Î»Ï %d επιτÏέπονται"
+ hun "Tul sok kulcsdarabot definialt. Maximum %d resz engedelyezett"
+ ita "Troppe parti di chiave specificate. Sono ammesse max %d parti"
+ kor "너무 ë§Žì€ í‚¤ 부분(parts)ë“¤ì´ ì •ì˜ë˜ì–´ 있ì니다.. 최대 %d ë¶€ë¶„ì´ ê°€ëŠ¥í•¨"
+ nor "For mange nøkkeldeler spesifisert. Maks %d deler tillatt"
+ norwegian-ny "For mange nykkeldelar spesifisert. Maks %d delar tillatt"
+ pol "Okre?lono zbyt wiele czę?ci klucza. Dostępnych jest maksymalnie %d czę?ci"
+ por "Especificadas partes de chave demais. O máximo permitido são %d partes"
+ rum "Prea multe chei. Numarul de chei maxim este %d"
+ rus "Указано Ñлишком много чаÑтей ÑоÑтавного ключа. РазрешаетÑÑ ÑƒÐºÐ°Ð·Ñ‹Ð²Ð°Ñ‚ÑŒ не более %d чаÑтей"
+ serbian "Navedeno je previÅ¡e delova kljuÄa. Maksimum %d delova je dozvoljeno"
+ slo "Zadaných ríliÅ¡ veľa Äastí kľúÄov. Je povolených najviac %d Äastí"
+ spa "Demasiadas partes de clave declaradas. Un maximo de %d partes son permitidas"
+ swe "För många nyckeldelar använda. Man får ha högst %d nyckeldelar"
+ ukr "Забагато чаÑтин ключа зазначено. Дозволено не більше %d чаÑтин"
+ER_TOO_LONG_KEY 42000 S1009
+ cze "Zadan-Bý klÃ­Ä byl příliÅ¡ dlouhý, nejvÄ›tší délka klíÄe je %d"
+ dan "Specificeret nøgle var for lang. Maksimal nøglelængde er %d"
+ nla "Gespecificeerde zoeksleutel was te lang. De maximale lengte is %d"
+ eng "Specified key was too long; max key length is %d bytes"
+ jps "key ãŒé•·ã™ãŽã¾ã™. key ã®é•·ã•ã¯æœ€å¤§ %d ã§ã™",
+ est "Võti on liiga pikk. Maksimaalne võtmepikkus on %d"
+ fre "La clé est trop longue. Longueur maximale: %d"
+ ger "Schlüssel ist zu lang. Die maximale Schlüssellänge beträgt %d"
+ greek "Το κλειδί που οÏίσθηκε είναι Ï€Î¿Î»Ï Î¼ÎµÎ³Î¬Î»Î¿. Το μέγιστο μήκος είναι %d"
+ hun "A megadott kulcs tul hosszu. Maximalis kulcshosszusag: %d"
+ ita "La chiave specificata e` troppo lunga. La max lunghezza della chiave e` %d"
+ jpn "key ãŒé•·ã™ãŽã¾ã™. key ã®é•·ã•ã¯æœ€å¤§ %d ã§ã™"
+ kor "ì •ì˜ëœ 키가 너무 ê¹ë‹ˆë‹¤. 최대 í‚¤ì˜ ê¸¸ì´ëŠ” %d입니다."
+ nor "Spesifisert nøkkel var for lang. Maks nøkkellengde er is %d"
+ norwegian-ny "Spesifisert nykkel var for lang. Maks nykkellengde er %d"
+ pol "Zdefinowany klucz jest zbyt długi. Maksymaln? długo?ci? klucza jest %d"
+ por "Chave especificada longa demais. O comprimento de chave máximo permitido é %d"
+ rum "Cheia specificata este prea lunga. Marimea maxima a unei chei este de %d"
+ rus "Указан Ñлишком длинный ключ. МакÑÐ¸Ð¼Ð°Ð»ÑŒÐ½Ð°Ñ Ð´Ð»Ð¸Ð½Ð° ключа ÑоÑтавлÑет %d байт"
+ serbian "Navedeni kljuÄ je predug. Maksimalna dužina kljuÄa je %d"
+ slo "Zadaný kÄ¾ÃºÄ je príliÅ¡ dlhý, najväÄÅ¡ia dĺžka kľúÄa je %d"
+ spa "Declaracion de clave demasiado larga. La maxima longitud de clave es %d"
+ swe "För lång nyckel. Högsta tillåtna nyckellängd är %d"
+ ukr "Зазначений ключ задовгий. Ðайбільша довжина ключа %d байтів"
+ER_KEY_COLUMN_DOES_NOT_EXITS 42000 S1009
+ cze "Kl-BíÄový sloupec '%-.192s' v tabulce neexistuje"
+ dan "Nøglefeltet '%-.192s' eksisterer ikke i tabellen"
+ nla "Zoeksleutel kolom '%-.192s' bestaat niet in tabel"
+ eng "Key column '%-.192s' doesn't exist in table"
+ jps "Key column '%-.192s' ãŒãƒ†ãƒ¼ãƒ–ルã«ã‚ã‚Šã¾ã›ã‚“.",
+ est "Võtme tulp '%-.192s' puudub tabelis"
+ fre "La clé '%-.192s' n'existe pas dans la table"
+ ger "In der Tabelle gibt es kein Schlüsselfeld '%-.192s'"
+ greek "Το πεδίο κλειδί '%-.192s' δεν υπάÏχει στον πίνακα"
+ hun "A(z) '%-.192s'kulcsoszlop nem letezik a tablaban"
+ ita "La colonna chiave '%-.192s' non esiste nella tabella"
+ jpn "Key column '%-.192s' ãŒãƒ†ãƒ¼ãƒ–ルã«ã‚ã‚Šã¾ã›ã‚“."
+ kor "Key 칼럼 '%-.192s'는 í…Œì´ë¸”ì— ì¡´ìž¬í•˜ì§€ 않습니다."
+ nor "Nøkkel felt '%-.192s' eksiterer ikke i tabellen"
+ norwegian-ny "Nykkel kolonne '%-.192s' eksiterar ikkje i tabellen"
+ pol "Kolumna '%-.192s' zdefiniowana w kluczu nie istnieje w tabeli"
+ por "Coluna chave '%-.192s' não existe na tabela"
+ rum "Coloana cheie '%-.192s' nu exista in tabela"
+ rus "Ключевой Ñтолбец '%-.192s' в таблице не ÑущеÑтвует"
+ serbian "KljuÄna kolona '%-.192s' ne postoji u tabeli"
+ slo "KľúÄový stĺpec '%-.192s' v tabuľke neexistuje"
+ spa "La columna clave '%-.192s' no existe en la tabla"
+ swe "Nyckelkolumn '%-.192s' finns inte"
+ ukr "Ключовий Ñтовбець '%-.192s' не Ñ–Ñнує у таблиці"
+ER_BLOB_USED_AS_KEY 42000 S1009
+ cze "Blob sloupec '%-.192s' nem-Bůže být použit jako klíÄ"
+ dan "BLOB feltet '%-.192s' kan ikke bruges ved specifikation af indeks"
+ nla "BLOB kolom '%-.192s' kan niet gebruikt worden bij zoeksleutel specificatie"
+ eng "BLOB column '%-.192s' can't be used in key specification with the used table type"
+ est "BLOB-tüüpi tulpa '%-.192s' ei saa kasutada võtmena"
+ fre "Champ BLOB '%-.192s' ne peut être utilisé dans une clé"
+ ger "BLOB-Feld '%-.192s' kann beim verwendeten Tabellentyp nicht als Schlüssel verwendet werden"
+ greek "Πεδίο Ï„Ïπου Blob '%-.192s' δεν μποÏεί να χÏησιμοποιηθεί στον οÏισμό ενός ÎºÎ»ÎµÎ¹Î´Î¹Î¿Ï (key specification)"
+ hun "Blob objektum '%-.192s' nem hasznalhato kulcskent"
+ ita "La colonna BLOB '%-.192s' non puo` essere usata nella specifica della chiave"
+ kor "BLOB 칼럼 '%-.192s'는 키 ì •ì˜ì—ì„œ ì‚¬ìš©ë  ìˆ˜ 없습니다."
+ nor "Blob felt '%-.192s' kan ikke brukes ved spesifikasjon av nøkler"
+ norwegian-ny "Blob kolonne '%-.192s' kan ikkje brukast ved spesifikasjon av nyklar"
+ pol "Kolumna typu Blob '%-.192s' nie może być użyta w specyfikacji klucza"
+ por "Coluna BLOB '%-.192s' não pode ser utilizada na especificação de chave para o tipo de tabela usado"
+ rum "Coloana de tip BLOB '%-.192s' nu poate fi folosita in specificarea cheii cu tipul de tabla folosit"
+ rus "Столбец типа BLOB '%-.192s' не может быть иÑпользован как значение ключа в таблице такого типа"
+ serbian "BLOB kolona '%-.192s' ne može biti upotrebljena za navoÄ‘enje kljuÄa sa tipom tabele koji se trenutno koristi"
+ slo "Blob pole '%-.192s' nemôže byÅ¥ použité ako kľúÄ"
+ spa "La columna Blob '%-.192s' no puede ser usada en una declaracion de clave"
+ swe "En BLOB '%-.192s' kan inte vara nyckel med den använda tabelltypen"
+ ukr "BLOB Ñтовбець '%-.192s' не може бути викориÑтаний у визначенні ключа в цьому типі таблиці"
+ER_TOO_BIG_FIELDLENGTH 42000 S1009
+ cze "P-Bříliš velká délka sloupce '%-.192s' (nejvíce %lu). Použijte BLOB"
+ dan "For stor feltlængde for kolonne '%-.192s' (maks = %lu). Brug BLOB i stedet"
+ nla "Te grote kolomlengte voor '%-.192s' (max = %lu). Maak hiervoor gebruik van het type BLOB"
+ eng "Column length too big for column '%-.192s' (max = %lu); use BLOB or TEXT instead"
+ jps "column '%-.192s' ã¯,確ä¿ã™ã‚‹ column ã®å¤§ãã•ãŒå¤šã™ãŽã¾ã™. (最大 %lu ã¾ã§). BLOB ã‚’ã‹ã‚ã‚Šã«ä½¿ç”¨ã—ã¦ãã ã•ã„.",
+ est "Tulba '%-.192s' pikkus on liiga pikk (maksimaalne pikkus: %lu). Kasuta BLOB väljatüüpi"
+ fre "Champ '%-.192s' trop long (max = %lu). Utilisez un BLOB"
+ ger "Feldlänge für Feld '%-.192s' zu groß (maximal %lu). BLOB- oder TEXT-Spaltentyp verwenden!"
+ greek "Î Î¿Î»Ï Î¼ÎµÎ³Î¬Î»Î¿ μήκος για το πεδίο '%-.192s' (max = %lu). ΠαÏακαλώ χÏησιμοποιείστε τον Ï„Ïπο BLOB"
+ hun "A(z) '%-.192s' oszlop tul hosszu. (maximum = %lu). Hasznaljon BLOB tipust inkabb."
+ ita "La colonna '%-.192s' e` troppo grande (max=%lu). Utilizza un BLOB."
+ jpn "column '%-.192s' ã¯,確ä¿ã™ã‚‹ column ã®å¤§ãã•ãŒå¤šã™ãŽã¾ã™. (最大 %lu ã¾ã§). BLOB ã‚’ã‹ã‚ã‚Šã«ä½¿ç”¨ã—ã¦ãã ã•ã„."
+ kor "칼럼 '%-.192s'ì˜ ì¹¼ëŸ¼ 길ì´ê°€ 너무 ê¹ë‹ˆë‹¤ (최대 = %lu). ëŒ€ì‹ ì— BLOB를 사용하세요."
+ nor "For stor nøkkellengde for kolonne '%-.192s' (maks = %lu). Bruk BLOB istedenfor"
+ norwegian-ny "For stor nykkellengde for felt '%-.192s' (maks = %lu). Bruk BLOB istadenfor"
+ pol "Zbyt duża długo?ć kolumny '%-.192s' (maks. = %lu). W zamian użyj typu BLOB"
+ por "Comprimento da coluna '%-.192s' grande demais (max = %lu); use BLOB em seu lugar"
+ rum "Lungimea coloanei '%-.192s' este prea lunga (maximum = %lu). Foloseste BLOB mai bine"
+ rus "Слишком Ð±Ð¾Ð»ÑŒÑˆÐ°Ñ Ð´Ð»Ð¸Ð½Ð° Ñтолбца '%-.192s' (макÑимум = %lu). ИÑпользуйте тип BLOB или TEXT вмеÑто текущего"
+ serbian "Previše podataka za kolonu '%-.192s' (maksimum je %lu). Upotrebite BLOB polje"
+ slo "Príliš veľká dĺžka pre pole '%-.192s' (maximum = %lu). Použite BLOB"
+ spa "Longitud de columna demasiado grande para la columna '%-.192s' (maximo = %lu).Usar BLOB en su lugar"
+ swe "För stor kolumnlängd angiven för '%-.192s' (max= %lu). Använd en BLOB instället"
+ ukr "Задовга довжина ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ '%-.192s' (max = %lu). ВикориÑтайте тип BLOB"
+ER_WRONG_AUTO_KEY 42000 S1009
+ cze "M-Bůžete mít pouze jedno AUTO pole a to musí být definováno jako klíÄ"
+ dan "Der kan kun specificeres eet AUTO_INCREMENT-felt, og det skal være indekseret"
+ nla "Er kan slechts 1 autofield zijn en deze moet als zoeksleutel worden gedefinieerd."
+ eng "Incorrect table definition; there can be only one auto column and it must be defined as a key"
+ jps "テーブルã®å®šç¾©ãŒé•ã„ã¾ã™; there can be only one auto column and it must be defined as a key",
+ est "Vigane tabelikirjeldus; Tabelis tohib olla üks auto_increment tüüpi tulp ning see peab olema defineeritud võtmena"
+ fre "Un seul champ automatique est permis et il doit être indexé"
+ ger "Falsche Tabellendefinition. Es darf nur eine AUTO_INCREMENT-Spalte geben, und diese muss als Schlüssel definiert werden"
+ greek "ΜποÏεί να υπάÏχει μόνο ένα auto field και Ï€Ïέπει να έχει οÏισθεί σαν key"
+ hun "Csak egy auto mezo lehetseges, es azt kulcskent kell definialni."
+ ita "Puo` esserci solo un campo AUTO e deve essere definito come chiave"
+ jpn "テーブルã®å®šç¾©ãŒé•ã„ã¾ã™; there can be only one auto column and it must be defined as a key"
+ kor "부정확한 í…Œì´ë¸” ì •ì˜; í…Œì´ë¸”ì€ í•˜ë‚˜ì˜ auto ì¹¼ëŸ¼ì´ ì¡´ìž¬í•˜ê³  키로 ì •ì˜ë˜ì–´ì ¸ì•¼ 합니다."
+ nor "Bare ett auto felt kan være definert som nøkkel."
+ norwegian-ny "Bare eitt auto felt kan være definert som nøkkel."
+ pol "W tabeli może być tylko jedno pole auto i musi ono być zdefiniowane jako klucz"
+ por "Definição incorreta de tabela. Somente é permitido um único campo auto-incrementado e ele tem que ser definido como chave"
+ rum "Definitia tabelei este incorecta; Nu pot fi mai mult de o singura coloana de tip auto si aceasta trebuie definita ca cheie"
+ rus "Ðекорректное определение таблицы: может ÑущеÑтвовать только один автоинкрементный Ñтолбец, и он должен быть определен как ключ"
+ serbian "PogreÅ¡na definicija tabele; U tabeli može postojati samo jedna 'AUTO' kolona i ona mora biti istovremeno definisana kao kolona kljuÄa"
+ slo "Môžete maÅ¥ iba jedno AUTO pole a to musí byÅ¥ definované ako kľúÄ"
+ spa "Puede ser solamente un campo automatico y este debe ser definido como una clave"
+ swe "Det får finnas endast ett AUTO_INCREMENT-fält och detta måste vara en nyckel"
+ ukr "Ðевірне Ð²Ð¸Ð·Ð½Ð°Ñ‡ÐµÐ½Ð½Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ–; Може бути лише один автоматичний Ñтовбець, що повинен бути визначений Ñк ключ"
+ER_READY
+ cze "%s: p-Břipraven na spojení\nVersion: '%s' socket: '%s' port: %d""
+ dan "%s: klar til tilslutninger\nVersion: '%s' socket: '%s' port: %d""
+ nla "%s: klaar voor verbindingen\nVersion: '%s' socket: '%s' port: %d""
+ eng "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d"
+ jps "%s: 準備完了¥nVersion: '%s' socket: '%s' port: %d"",
+ est "%s: ootab ühendusi\nVersion: '%s' socket: '%s' port: %d""
+ fre "%s: Prêt pour des connexions\nVersion: '%s' socket: '%s' port: %d""
+ ger "%s: Bereit für Verbindungen.\nVersion: '%s' Socket: '%s' Port: %d"
+ greek "%s: σε αναμονή συνδέσεων\nVersion: '%s' socket: '%s' port: %d""
+ hun "%s: kapcsolatra kesz\nVersion: '%s' socket: '%s' port: %d""
+ ita "%s: Pronto per le connessioni\nVersion: '%s' socket: '%s' port: %d""
+ jpn "%s: 準備完了\nVersion: '%s' socket: '%s' port: %d""
+ kor "%s: 연결 준비중입니다\nVersion: '%s' socket: '%s' port: %d""
+ nor "%s: klar for tilkoblinger\nVersion: '%s' socket: '%s' port: %d""
+ norwegian-ny "%s: klar for tilkoblingar\nVersion: '%s' socket: '%s' port: %d""
+ pol "%s: gotowe do poł?czenia\nVersion: '%s' socket: '%s' port: %d""
+ por "%s: Pronto para conexões\nVersion: '%s' socket: '%s' port: %d""
+ rum "%s: sint gata pentru conectii\nVersion: '%s' socket: '%s' port: %d""
+ rus "%s: Готов принимать ÑоединениÑ.\nВерÑиÑ: '%s' Ñокет: '%s' порт: %d"
+ serbian "%s: Spreman za konekcije\nVersion: '%s' socket: '%s' port: %d""
+ slo "%s: pripravený na spojenie\nVersion: '%s' socket: '%s' port: %d""
+ spa "%s: preparado para conexiones\nVersion: '%s' socket: '%s' port: %d""
+ swe "%s: klar att ta emot klienter\nVersion: '%s' socket: '%s' port: %d""
+ ukr "%s: Готовий Ð´Ð»Ñ Ð·'єднань!\nVersion: '%s' socket: '%s' port: %d""
+ER_NORMAL_SHUTDOWN
+ cze "%s: norm-Bální ukonÄení\n"
+ dan "%s: Normal nedlukning\n"
+ nla "%s: Normaal afgesloten \n"
+ eng "%s: Normal shutdown\n"
+ est "%s: MySQL lõpetas\n"
+ fre "%s: Arrêt normal du serveur\n"
+ ger "%s: Normal heruntergefahren\n"
+ greek "%s: Φυσιολογική διαδικασία shutdown\n"
+ hun "%s: Normal leallitas\n"
+ ita "%s: Shutdown normale\n"
+ kor "%s: ì •ìƒì ì¸ shutdown\n"
+ nor "%s: Normal avslutning\n"
+ norwegian-ny "%s: Normal nedkopling\n"
+ pol "%s: Standardowe zakończenie działania\n"
+ por "%s: 'Shutdown' normal\n"
+ rum "%s: Terminare normala\n"
+ rus "%s: ÐšÐ¾Ñ€Ñ€ÐµÐºÑ‚Ð½Ð°Ñ Ð¾Ñтановка\n"
+ serbian "%s: Normalno gašenje\n"
+ slo "%s: normálne ukonÄenie\n"
+ spa "%s: Apagado normal\n"
+ swe "%s: Normal avslutning\n"
+ ukr "%s: Ðормальне завершеннÑ\n"
+ER_GOT_SIGNAL
+ cze "%s: p-BÅ™ijat signal %d, konÄím\n"
+ dan "%s: Fangede signal %d. Afslutter!!\n"
+ nla "%s: Signaal %d. Systeem breekt af!\n"
+ eng "%s: Got signal %d. Aborting!\n"
+ jps "%s: Got signal %d. 中断!¥n",
+ est "%s: sain signaali %d. Lõpetan!\n"
+ fre "%s: Reçu le signal %d. Abandonne!\n"
+ ger "%s: Signal %d erhalten. Abbruch!\n"
+ greek "%s: Ελήφθη το μήνυμα %d. Η διαδικασία εγκαταλείπεται!\n"
+ hun "%s: %d jelzes. Megszakitva!\n"
+ ita "%s: Ricevuto segnale %d. Interruzione!\n"
+ jpn "%s: Got signal %d. 中断!\n"
+ kor "%s: %d 신호가 들어왔ìŒ. 중지!\n"
+ nor "%s: Oppdaget signal %d. Avslutter!\n"
+ norwegian-ny "%s: Oppdaga signal %d. Avsluttar!\n"
+ pol "%s: Otrzymano sygnał %d. Kończenie działania!\n"
+ por "%s: Obteve sinal %d. Abortando!\n"
+ rum "%s: Semnal %d obtinut. Aborting!\n"
+ rus "%s: Получен Ñигнал %d. Прекращаем!\n"
+ serbian "%s: Dobio signal %d. Prekidam!\n"
+ slo "%s: prijatý signál %d, ukonÄenie (Abort)!\n"
+ spa "%s: Recibiendo signal %d. Abortando!\n"
+ swe "%s: Fick signal %d. Avslutar!\n"
+ ukr "%s: Отримано Ñигнал %d. ПерериваюÑÑŒ!\n"
+ER_SHUTDOWN_COMPLETE
+ cze "%s: ukon-BÄení práce hotovo\n"
+ dan "%s: Server lukket\n"
+ nla "%s: Afsluiten afgerond\n"
+ eng "%s: Shutdown complete\n"
+ jps "%s: Shutdown 完了¥n",
+ est "%s: Lõpp\n"
+ fre "%s: Arrêt du serveur terminé\n"
+ ger "%s: Herunterfahren beendet\n"
+ greek "%s: Η διαδικασία Shutdown ολοκληÏώθηκε\n"
+ hun "%s: A leallitas kesz\n"
+ ita "%s: Shutdown completato\n"
+ jpn "%s: Shutdown 完了\n"
+ kor "%s: Shutdown ì´ ì™„ë£Œë¨!\n"
+ nor "%s: Avslutning komplett\n"
+ norwegian-ny "%s: Nedkopling komplett\n"
+ pol "%s: Zakończenie działania wykonane\n"
+ por "%s: 'Shutdown' completo\n"
+ rum "%s: Terminare completa\n"
+ rus "%s: ОÑтановка завершена\n"
+ serbian "%s: Gašenje završeno\n"
+ slo "%s: práca ukonÄená\n"
+ spa "%s: Apagado completado\n"
+ swe "%s: Avslutning klar\n"
+ ukr "%s: Роботу завершено\n"
+ER_FORCING_CLOSE 08S01
+ cze "%s: n-Básilné uzavření threadu %ld uživatele '%-.48s'\n"
+ dan "%s: Forceret nedlukning af tråd: %ld bruger: '%-.48s'\n"
+ nla "%s: Afsluiten afgedwongen van thread %ld gebruiker: '%-.48s'\n"
+ eng "%s: Forcing close of thread %ld user: '%-.48s'\n"
+ jps "%s: スレッド %ld 強制終了 user: '%-.48s'¥n",
+ est "%s: Sulgen jõuga lõime %ld kasutaja: '%-.48s'\n"
+ fre "%s: Arrêt forcé de la tâche (thread) %ld utilisateur: '%-.48s'\n"
+ ger "%s: Thread %ld zwangsweise beendet. Benutzer: '%-.48s'\n"
+ greek "%s: Το thread θα κλείσει %ld user: '%-.48s'\n"
+ hun "%s: A(z) %ld thread kenyszeritett zarasa. Felhasznalo: '%-.48s'\n"
+ ita "%s: Forzata la chiusura del thread %ld utente: '%-.48s'\n"
+ jpn "%s: スレッド %ld 強制終了 user: '%-.48s'\n"
+ kor "%s: thread %ldì˜ ê°•ì œ 종료 user: '%-.48s'\n"
+ nor "%s: Påtvinget avslutning av tråd %ld bruker: '%-.48s'\n"
+ norwegian-ny "%s: Påtvinga avslutning av tråd %ld brukar: '%-.48s'\n"
+ pol "%s: Wymuszenie zamknięcia w?tku %ld użytkownik: '%-.48s'\n"
+ por "%s: Forçando finalização da 'thread' %ld - usuário '%-.48s'\n"
+ rum "%s: Terminare fortata a thread-ului %ld utilizatorului: '%-.48s'\n"
+ rus "%s: Принудительно закрываем поток %ld пользователÑ: '%-.48s'\n"
+ serbian "%s: Usiljeno gašenje thread-a %ld koji pripada korisniku: '%-.48s'\n"
+ slo "%s: násilné ukonÄenie vlákna %ld užívateľa '%-.48s'\n"
+ spa "%s: Forzando a cerrar el thread %ld usuario: '%-.48s'\n"
+ swe "%s: Stänger av tråd %ld; användare: '%-.48s'\n"
+ ukr "%s: ПриÑкорюю Ð·Ð°ÐºÑ€Ð¸Ñ‚Ñ‚Ñ Ð³Ñ–Ð»ÐºÐ¸ %ld кориÑтувача: '%-.48s'\n"
+ER_IPSOCK_ERROR 08S01
+ cze "Nemohu vytvo-Břit IP socket"
+ dan "Kan ikke oprette IP socket"
+ nla "Kan IP-socket niet openen"
+ eng "Can't create IP socket"
+ jps "IP socket ãŒä½œã‚Œã¾ã›ã‚“",
+ est "Ei suuda luua IP socketit"
+ fre "Ne peut créer la connexion IP (socket)"
+ ger "Kann IP-Socket nicht erzeugen"
+ greek "Δεν είναι δυνατή η δημιουÏγία IP socket"
+ hun "Az IP socket nem hozhato letre"
+ ita "Impossibile creare il socket IP"
+ jpn "IP socket ãŒä½œã‚Œã¾ã›ã‚“"
+ kor "IP ì†Œì¼“ì„ ë§Œë“¤ì§€ 못했습니다."
+ nor "Kan ikke opprette IP socket"
+ norwegian-ny "Kan ikkje opprette IP socket"
+ pol "Nie można stworzyć socket'u IP"
+ por "Não pode criar o soquete IP"
+ rum "Nu pot crea IP socket"
+ rus "Ðевозможно Ñоздать IP-Ñокет"
+ serbian "Ne mogu da kreiram IP socket"
+ slo "Nemôžem vytvoriť IP socket"
+ spa "No puedo crear IP socket"
+ swe "Kan inte skapa IP-socket"
+ ukr "Ðе можу Ñтворити IP роз'єм"
+ER_NO_SUCH_INDEX 42S12 S1009
+ cze "Tabulka '%-.192s' nem-Bá index odpovídající CREATE INDEX. Vytvořte tabulku znovu"
+ dan "Tabellen '%-.192s' har ikke den nøgle, som blev brugt i CREATE INDEX. Genopret tabellen"
+ nla "Tabel '%-.192s' heeft geen INDEX zoals deze gemaakt worden met CREATE INDEX. Maak de tabel opnieuw"
+ eng "Table '%-.192s' has no index like the one used in CREATE INDEX; recreate the table"
+ jps "Table '%-.192s' ã¯ãã®ã‚ˆã†ãª index ã‚’æŒã£ã¦ã„ã¾ã›ã‚“(CREATE INDEX 実行時ã«æŒ‡å®šã•ã‚Œã¦ã„ã¾ã›ã‚“). テーブルを作り直ã—ã¦ãã ã•ã„",
+ est "Tabelil '%-.192s' puuduvad võtmed. Loo tabel uuesti"
+ fre "La table '%-.192s' n'a pas d'index comme celle utilisée dans CREATE INDEX. Recréez la table"
+ ger "Tabelle '%-.192s' besitzt keinen wie den in CREATE INDEX verwendeten Index. Tabelle neu anlegen"
+ greek "Ο πίνακας '%-.192s' δεν έχει ευÏετήÏιο (index) σαν αυτό που χÏησιμοποιείτε στην CREATE INDEX. ΠαÏακαλώ, ξαναδημιουÏγήστε τον πίνακα"
+ hun "A(z) '%-.192s' tablahoz nincs meg a CREATE INDEX altal hasznalt index. Alakitsa at a tablat"
+ ita "La tabella '%-.192s' non ha nessun indice come quello specificatato dalla CREATE INDEX. Ricrea la tabella"
+ jpn "Table '%-.192s' ã¯ãã®ã‚ˆã†ãª index ã‚’æŒã£ã¦ã„ã¾ã›ã‚“(CREATE INDEX 実行時ã«æŒ‡å®šã•ã‚Œã¦ã„ã¾ã›ã‚“). テーブルを作り直ã—ã¦ãã ã•ã„"
+ kor "í…Œì´ë¸” '%-.192s'는 ì¸ë±ìŠ¤ë¥¼ 만들지 않았습니다. alter í…Œì´ë¸”ëª…ë ¹ì„ ì´ìš©í•˜ì—¬ í…Œì´ë¸”ì„ ìˆ˜ì •í•˜ì„¸ìš”..."
+ nor "Tabellen '%-.192s' har ingen index som den som er brukt i CREATE INDEX. Gjenopprett tabellen"
+ norwegian-ny "Tabellen '%-.192s' har ingen index som den som er brukt i CREATE INDEX. Oprett tabellen på nytt"
+ pol "Tabela '%-.192s' nie ma indeksu takiego jak w CREATE INDEX. Stwórz tabelę"
+ por "Tabela '%-.192s' não possui um índice como o usado em CREATE INDEX. Recrie a tabela"
+ rum "Tabela '%-.192s' nu are un index ca acela folosit in CREATE INDEX. Re-creeaza tabela"
+ rus "Ð’ таблице '%-.192s' нет такого индекÑа, как в CREATE INDEX. Создайте таблицу заново"
+ serbian "Tabela '%-.192s' nema isti indeks kao onaj upotrebljen pri komandi 'CREATE INDEX'. Napravite tabelu ponovo"
+ slo "Tabuľka '%-.192s' nemá index zodpovedajúci CREATE INDEX. Vytvorte tabulku znova"
+ spa "La tabla '%-.192s' no tiene indice como el usado en CREATE INDEX. Crea de nuevo la tabla"
+ swe "Tabellen '%-.192s' har inget index som motsvarar det angivna i CREATE INDEX. Skapa om tabellen"
+ ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' має індекÑ, що не Ñпівпадає з вказанним у CREATE INDEX. Створіть таблицю знову"
+ER_WRONG_FIELD_TERMINATORS 42000 S1009
+ cze "Argument separ-Bátoru položek nebyl oÄekáván. PÅ™eÄtÄ›te si manuál"
+ dan "Felt adskiller er ikke som forventet, se dokumentationen"
+ nla "De argumenten om velden te scheiden zijn anders dan verwacht. Raadpleeg de handleiding"
+ eng "Field separator argument is not what is expected; check the manual"
+ est "Väljade eraldaja erineb oodatust. Tutvu kasutajajuhendiga"
+ fre "Séparateur de champs inconnu. Vérifiez dans le manuel"
+ ger "Feldbegrenzer-Argument ist nicht in der erwarteten Form. Bitte im Handbuch nachlesen"
+ greek "Ο διαχωÏιστής πεδίων δεν είναι αυτός που αναμενόταν. ΠαÏακαλώ ανατÏέξτε στο manual"
+ hun "A mezoelvalaszto argumentumok nem egyeznek meg a varttal. Nezze meg a kezikonyvben!"
+ ita "L'argomento 'Field separator' non e` quello atteso. Controlla il manuale"
+ kor "í•„ë“œ êµ¬ë¶„ìž ì¸ìˆ˜ë“¤ì´ 완전하지 않습니다. ë©”ë‰´ì–¼ì„ ì°¾ì•„ 보세요."
+ nor "Felt skiller argumentene er ikke som forventet, se dokumentasjonen"
+ norwegian-ny "Felt skiljer argumenta er ikkje som venta, sjå dokumentasjonen"
+ pol "Nie oczekiwano separatora. SprawdĽ podręcznik"
+ por "Argumento separador de campos não é o esperado. Cheque o manual"
+ rum "Argumentul pentru separatorul de cimpuri este diferit de ce ma asteptam. Verifica manualul"
+ rus "Ðргумент Ñ€Ð°Ð·Ð´ÐµÐ»Ð¸Ñ‚ÐµÐ»Ñ Ð¿Ð¾Ð»ÐµÐ¹ - не тот, который ожидалÑÑ. ОбращайтеÑÑŒ к документации"
+ serbian "Argument separatora polja nije ono Å¡to se oÄekivalo. Proverite uputstvo MySQL server-a"
+ slo "Argument oddeľovaÄ polí nezodpovedá požiadavkám. Skontrolujte v manuáli"
+ spa "Los separadores de argumentos del campo no son los especificados. Comprueba el manual"
+ swe "Fältseparatorerna är vad som förväntades. Kontrollera mot manualen"
+ ukr "Хибний розділювач полів. Почитайте документацію"
+ER_BLOBS_AND_NO_TERMINATED 42000 S1009
+ cze "Nen-Bí možné použít pevný rowlength s BLOBem. Použijte 'fields terminated by'."
+ dan "Man kan ikke bruge faste feltlængder med BLOB. Brug i stedet 'fields terminated by'."
+ nla "Bij het gebruik van BLOBs is het niet mogelijk om vaste rijlengte te gebruiken. Maak s.v.p. gebruik van 'fields terminated by'."
+ eng "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'"
+ est "BLOB-tüüpi väljade olemasolul ei saa kasutada fikseeritud väljapikkust. Vajalik 'fields terminated by' määrang."
+ fre "Vous ne pouvez utiliser des lignes de longueur fixe avec des BLOBs. Utiliser 'fields terminated by'."
+ ger "Eine feste Zeilenlänge kann für BLOB-Felder nicht verwendet werden. Bitte 'fields terminated by' verwenden"
+ greek "Δεν μποÏείτε να χÏησιμοποιήσετε fixed rowlength σε BLOBs. ΠαÏακαλώ χÏησιμοποιείστε 'fields terminated by'."
+ hun "Fix hosszusagu BLOB-ok nem hasznalhatok. Hasznalja a 'mezoelvalaszto jelet' ."
+ ita "Non possono essere usate righe a lunghezza fissa con i BLOB. Usa 'FIELDS TERMINATED BY'."
+ jpn "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'."
+ kor "BLOB로는 고정길ì´ì˜ lowlength를 사용할 수 없습니다. 'fields terminated by'를 사용하세요."
+ nor "En kan ikke bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'."
+ norwegian-ny "Ein kan ikkje bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'."
+ pol "Nie można użyć stałej długo?ci wiersza z polami typu BLOB. Użyj 'fields terminated by'."
+ por "Você não pode usar comprimento de linha fixo com BLOBs. Por favor, use campos com comprimento limitado."
+ rum "Nu poti folosi lungime de cimp fix pentru BLOB-uri. Foloseste 'fields terminated by'."
+ rus "ФикÑированный размер запиÑи Ñ Ð¿Ð¾Ð»Ñми типа BLOB иÑпользовать нельзÑ, применÑйте 'fields terminated by'"
+ serbian "Ne možete koristiti fiksnu veliÄinu sloga kada imate BLOB polja. Molim koristite 'fields terminated by' opciju."
+ slo "Nie je možné použiť fixnú dĺžku s BLOBom. Použite 'fields terminated by'."
+ spa "No puedes usar longitudes de filas fijos con BLOBs. Por favor usa 'campos terminados por '."
+ swe "Man kan inte använda fast radlängd med blobs. Använd 'fields terminated by'"
+ ukr "Ðе можна викориÑтовувати Ñталу довжину Ñтроки з BLOB. ЗкориÑтайтеÑÑ 'fields terminated by'"
+ER_TEXTFILE_NOT_READABLE
+ cze "Soubor '%-.128s' mus-Bí být v adresáři databáze nebo Äitelný pro vÅ¡echny"
+ dan "Filen '%-.128s' skal være i database-folderen og kunne læses af alle"
+ nla "Het bestand '%-.128s' dient in de database directory voor the komen of leesbaar voor iedereen te zijn."
+ eng "The file '%-.128s' must be in the database directory or be readable by all"
+ jps "ファイル '%-.128s' 㯠databse ã® directory ã«ã‚ã‚‹ã‹å…¨ã¦ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ãŒèª­ã‚るよã†ã«è¨±å¯ã•ã‚Œã¦ã„ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“.",
+ est "Fail '%-.128s' peab asuma andmebaasi kataloogis või olema kõigile loetav"
+ fre "Le fichier '%-.128s' doit être dans le répertoire de la base et lisible par tous"
+ ger "Datei '%-.128s' muss im Datenbank-Verzeichnis vorhanden oder lesbar für alle sein"
+ greek "Το αÏχείο '%-.128s' Ï€Ïέπει να υπάÏχει στο database directory ή να μποÏεί να διαβαστεί από όλους"
+ hun "A(z) '%-.128s'-nak az adatbazis konyvtarban kell lennie, vagy mindenki szamara olvashatonak"
+ ita "Il file '%-.128s' deve essere nella directory del database e deve essere leggibile da tutti"
+ jpn "ファイル '%-.128s' 㯠databse ã® directory ã«ã‚ã‚‹ã‹å…¨ã¦ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ãŒèª­ã‚るよã†ã«è¨±å¯ã•ã‚Œã¦ã„ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“."
+ kor "'%-.128s' í™”ì¼ëŠ” ë°ì´íƒ€ë² ì´ìŠ¤ ë””ë ‰í† ë¦¬ì— ì¡´ìž¬í•˜ê±°ë‚˜ 모ë‘ì—게 ì½ê¸° 가능하여야 합니다."
+ nor "Filen '%-.128s' må være i database-katalogen for å være lesbar for alle"
+ norwegian-ny "Filen '%-.128s' må være i database-katalogen for å være lesbar for alle"
+ pol "Plik '%-.128s' musi znajdować sie w katalogu bazy danych lub mieć prawa czytania przez wszystkich"
+ por "Arquivo '%-.128s' tem que estar no diretório do banco de dados ou ter leitura possível para todos"
+ rum "Fisierul '%-.128s' trebuie sa fie in directorul bazei de data sau trebuie sa poata sa fie citit de catre toata lumea (verifica permisiile)"
+ rus "Файл '%-.128s' должен находитьÑÑ Ð² том же каталоге, что и база данных, или быть общедоÑтупным Ð´Ð»Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ"
+ serbian "File '%-.128s' mora biti u direktorijumu gde su file-ovi baze i mora imati odgovarajuća prava pristupa"
+ slo "Súbor '%-.128s' musí byÅ¥ v adresári databázy, alebo Äitateľný pre vÅ¡etkých"
+ spa "El archivo '%-.128s' debe estar en el directorio de la base de datos o ser de lectura por todos"
+ swe "Textfilen '%-.128s' måste finnas i databasbiblioteket eller vara läsbar för alla"
+ ukr "Файл '%-.128s' повинен бути у теці бази данних або мати вÑтановлене право на Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ Ð´Ð»Ñ ÑƒÑÑ–Ñ…"
+ER_FILE_EXISTS_ERROR
+ cze "Soubor '%-.200s' ji-Bž existuje"
+ dan "Filen '%-.200s' eksisterer allerede"
+ nla "Het bestand '%-.200s' bestaat reeds"
+ eng "File '%-.200s' already exists"
+ jps "File '%-.200s' ã¯æ—¢ã«å­˜åœ¨ã—ã¾ã™",
+ est "Fail '%-.200s' juba eksisteerib"
+ fre "Le fichier '%-.200s' existe déjà"
+ ger "Datei '%-.200s' bereits vorhanden"
+ greek "Το αÏχείο '%-.200s' υπάÏχει ήδη"
+ hun "A '%-.200s' file mar letezik."
+ ita "Il file '%-.200s' esiste gia`"
+ jpn "File '%-.200s' ã¯æ—¢ã«å­˜åœ¨ã—ã¾ã™"
+ kor "'%-.200s' í™”ì¼ì€ ì´ë¯¸ 존재합니다."
+ nor "Filen '%-.200s' eksisterte allerede"
+ norwegian-ny "Filen '%-.200s' eksisterte allereide"
+ pol "Plik '%-.200s' już istnieje"
+ por "Arquivo '%-.200s' já existe"
+ rum "Fisierul '%-.200s' exista deja"
+ rus "Файл '%-.200s' уже ÑущеÑтвует"
+ serbian "File '%-.200s' već postoji"
+ slo "Súbor '%-.200s' už existuje"
+ spa "El archivo '%-.200s' ya existe"
+ swe "Filen '%-.200s' existerar redan"
+ ukr "Файл '%-.200s' вже Ñ–Ñнує"
+ER_LOAD_INFO
+ cze "Z-Báznamů: %ld Vymazáno: %ld PÅ™eskoÄeno: %ld Varování: %ld"
+ dan "Poster: %ld Fjernet: %ld Sprunget over: %ld Advarsler: %ld"
+ nla "Records: %ld Verwijderd: %ld Overgeslagen: %ld Waarschuwingen: %ld"
+ eng "Records: %ld Deleted: %ld Skipped: %ld Warnings: %ld"
+ jps "レコード数: %ld 削除: %ld Skipped: %ld Warnings: %ld",
+ est "Kirjeid: %ld Kustutatud: %ld Vahele jäetud: %ld Hoiatusi: %ld"
+ fre "Enregistrements: %ld Effacés: %ld Non traités: %ld Avertissements: %ld"
+ ger "Datensätze: %ld Gelöscht: %ld Ausgelassen: %ld Warnungen: %ld"
+ greek "ΕγγÏαφές: %ld ΔιαγÏαφές: %ld ΠαÏεκάμφθησαν: %ld ΠÏοειδοποιήσεις: %ld"
+ hun "Rekordok: %ld Torolve: %ld Skipped: %ld Warnings: %ld"
+ ita "Records: %ld Cancellati: %ld Saltati: %ld Avvertimenti: %ld"
+ jpn "レコード数: %ld 削除: %ld Skipped: %ld Warnings: %ld"
+ kor "레코드: %ld개 삭제: %ld개 스킵: %ld개 경고: %ld개"
+ nor "Poster: %ld Fjernet: %ld Hoppet over: %ld Advarsler: %ld"
+ norwegian-ny "Poster: %ld Fjerna: %ld Hoppa over: %ld Ã…tvaringar: %ld"
+ pol "Recordów: %ld Usuniętych: %ld Pominiętych: %ld Ostrzeżeń: %ld"
+ por "Registros: %ld - Deletados: %ld - Ignorados: %ld - Avisos: %ld"
+ rum "Recorduri: %ld Sterse: %ld Sarite (skipped): %ld Atentionari (warnings): %ld"
+ rus "ЗапиÑей: %ld Удалено: %ld Пропущено: %ld Предупреждений: %ld"
+ serbian "Slogova: %ld Izbrisano: %ld PreskoÄeno: %ld Upozorenja: %ld"
+ slo "Záznamov: %ld Zmazaných: %ld PreskoÄených: %ld Varovania: %ld"
+ spa "Registros: %ld Borrados: %ld Saltados: %ld Peligros: %ld"
+ swe "Rader: %ld Bortagna: %ld Dubletter: %ld Varningar: %ld"
+ ukr "ЗапиÑів: %ld Видалено: %ld Пропущено: %ld ЗаÑтережень: %ld"
+ER_ALTER_INFO
+ cze "Z-Báznamů: %ld Zdvojených: %ld"
+ dan "Poster: %ld Ens: %ld"
+ nla "Records: %ld Dubbel: %ld"
+ eng "Records: %ld Duplicates: %ld"
+ jps "レコード数: %ld é‡è¤‡: %ld",
+ est "Kirjeid: %ld Kattuvaid: %ld"
+ fre "Enregistrements: %ld Doublons: %ld"
+ ger "Datensätze: %ld Duplikate: %ld"
+ greek "ΕγγÏαφές: %ld Επαναλήψεις: %ld"
+ hun "Rekordok: %ld Duplikalva: %ld"
+ ita "Records: %ld Duplicati: %ld"
+ jpn "レコード数: %ld é‡è¤‡: %ld"
+ kor "레코드: %ld개 중복: %ld개"
+ nor "Poster: %ld Like: %ld"
+ norwegian-ny "Poster: %ld Like: %ld"
+ pol "Rekordów: %ld Duplikatów: %ld"
+ por "Registros: %ld - Duplicados: %ld"
+ rum "Recorduri: %ld Duplicate: %ld"
+ rus "ЗапиÑей: %ld Дубликатов: %ld"
+ serbian "Slogova: %ld Duplikata: %ld"
+ slo "Záznamov: %ld Opakovaných: %ld"
+ spa "Registros: %ld Duplicados: %ld"
+ swe "Rader: %ld Dubletter: %ld"
+ ukr "ЗапиÑів: %ld Дублікатів: %ld"
+ER_WRONG_SUB_KEY
+ cze "Chybn-Bá podÄást klíÄe -- není to Å™etÄ›zec nebo je delší než délka Äásti klíÄe"
+ dan "Forkert indeksdel. Den anvendte nøgledel er ikke en streng eller længden er større end nøglelængden"
+ nla "Foutief sub-gedeelte van de zoeksleutel. De gebruikte zoeksleutel is geen onderdeel van een string of of de gebruikte lengte is langer dan de zoeksleutel"
+ eng "Incorrect prefix key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique prefix keys"
+ est "Vigane võtme osa. Kasutatud võtmeosa ei ole string tüüpi, määratud pikkus on pikem kui võtmeosa või tabelihandler ei toeta seda tüüpi võtmeid"
+ fre "Mauvaise sous-clef. Ce n'est pas un 'string' ou la longueur dépasse celle définie dans la clef"
+ ger "Falscher Unterteilschlüssel. Der verwendete Schlüsselteil ist entweder kein String, die verwendete Länge ist länger als der Teilschlüssel oder die Speicher-Engine unterstützt keine Unterteilschlüssel"
+ greek "Εσφαλμένο sub part key. Το χÏησιμοποιοÏμενο key part δεν είναι string ή το μήκος του είναι μεγαλÏτεÏο"
+ hun "Rossz alkulcs. A hasznalt kulcsresz nem karaktersorozat vagy hosszabb, mint a kulcsresz"
+ ita "Sotto-parte della chiave errata. La parte di chiave utilizzata non e` una stringa o la lunghezza e` maggiore della parte di chiave."
+ jpn "Incorrect prefix key; the used key part isn't a string or the used length is longer than the key part"
+ kor "부정확한 서버 파트 키. ì‚¬ìš©ëœ í‚¤ 파트가 스트ë§ì´ 아니거나 키 íŒŒíŠ¸ì˜ ê¸¸ì´ê°€ 너무 ê¹ë‹ˆë‹¤."
+ nor "Feil delnøkkel. Den brukte delnøkkelen er ikke en streng eller den oppgitte lengde er lengre enn nøkkel lengden"
+ norwegian-ny "Feil delnykkel. Den brukte delnykkelen er ikkje ein streng eller den oppgitte lengda er lengre enn nykkellengden"
+ pol "Błędna podczę?ć klucza. Użyta czę?ć klucza nie jest łańcuchem lub użyta długo?ć jest większa niż czę?ć klucza"
+ por "Sub parte da chave incorreta. A parte da chave usada não é uma 'string' ou o comprimento usado é maior que parte da chave ou o manipulador de tabelas não suporta sub chaves únicas"
+ rum "Componentul cheii este incorrect. Componentul folosit al cheii nu este un sir sau lungimea folosita este mai lunga decit lungimea cheii"
+ rus "ÐÐµÐºÐ¾Ñ€Ñ€ÐµÐºÑ‚Ð½Ð°Ñ Ñ‡Ð°ÑÑ‚ÑŒ ключа. ИÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÐµÐ¼Ð°Ñ Ñ‡Ð°ÑÑ‚ÑŒ ключа не ÑвлÑетÑÑ Ñтрокой, ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ð°Ñ Ð´Ð»Ð¸Ð½Ð° больше, чем длина чаÑти ключа, или обработчик таблицы не поддерживает уникальные чаÑти ключа"
+ serbian "PogreÅ¡an pod-kljuÄ dela kljuÄa. Upotrebljeni deo kljuÄa nije string, upotrebljena dužina je veća od dela kljuÄa ili handler tabela ne podržava jedinstvene pod-kljuÄeve"
+ slo "Incorrect prefix key; the used key part isn't a string or the used length is longer than the key part"
+ spa "Parte de la clave es erronea. Una parte de la clave no es una cadena o la longitud usada es tan grande como la parte de la clave"
+ swe "Felaktig delnyckel. Nyckeldelen är inte en sträng eller den angivna längden är längre än kolumnlängden"
+ ukr "Ðевірна чаÑтина ключа. ВикориÑтана чаÑтина ключа не Ñ” Ñтрокою, задовга або вказівник таблиці не підтримує унікальних чаÑтин ключей"
+ER_CANT_REMOVE_ALL_FIELDS 42000
+ cze "Nen-Bí možné vymazat všechny položky s ALTER TABLE. Použijte DROP TABLE"
+ dan "Man kan ikke slette alle felter med ALTER TABLE. Brug DROP TABLE i stedet."
+ nla "Het is niet mogelijk alle velden te verwijderen met ALTER TABLE. Gebruik a.u.b. DROP TABLE hiervoor!"
+ eng "You can't delete all columns with ALTER TABLE; use DROP TABLE instead"
+ jps "ALTER TABLE ã§å…¨ã¦ã® column ã¯å‰Šé™¤ã§ãã¾ã›ã‚“. DROP TABLE を使用ã—ã¦ãã ã•ã„",
+ est "ALTER TABLE kasutades ei saa kustutada kõiki tulpasid. Kustuta tabel DROP TABLE abil"
+ fre "Vous ne pouvez effacer tous les champs avec ALTER TABLE. Utilisez DROP TABLE"
+ ger "Mit ALTER TABLE können nicht alle Felder auf einmal gelöscht werden. Dafür DROP TABLE verwenden"
+ greek "Δεν είναι δυνατή η διαγÏαφή όλων των πεδίων με ALTER TABLE. ΠαÏακαλώ χÏησιμοποιείστε DROP TABLE"
+ hun "Az osszes mezo nem torolheto az ALTER TABLE-lel. Hasznalja a DROP TABLE-t helyette"
+ ita "Non si possono cancellare tutti i campi con una ALTER TABLE. Utilizzare DROP TABLE"
+ jpn "ALTER TABLE ã§å…¨ã¦ã® column ã¯å‰Šé™¤ã§ãã¾ã›ã‚“. DROP TABLE を使用ã—ã¦ãã ã•ã„"
+ kor "ALTER TABLE 명령으로는 모든 ì¹¼ëŸ¼ì„ ì§€ìš¸ 수 없습니다. DROP TABLE ëª…ë ¹ì„ ì´ìš©í•˜ì„¸ìš”."
+ nor "En kan ikke slette alle felt med ALTER TABLE. Bruk DROP TABLE isteden."
+ norwegian-ny "Ein kan ikkje slette alle felt med ALTER TABLE. Bruk DROP TABLE istadenfor."
+ pol "Nie można usun?ć wszystkich pól wykorzystuj?c ALTER TABLE. W zamian użyj DROP TABLE"
+ por "Você não pode deletar todas as colunas com ALTER TABLE; use DROP TABLE em seu lugar"
+ rum "Nu poti sterge toate coloanele cu ALTER TABLE. Foloseste DROP TABLE in schimb"
+ rus "ÐÐµÐ»ÑŒÐ·Ñ ÑƒÐ´Ð°Ð»Ð¸Ñ‚ÑŒ вÑе Ñтолбцы Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ ALTER TABLE. ИÑпользуйте DROP TABLE"
+ serbian "Ne možete da izbrišete sve kolone pomoću komande 'ALTER TABLE'. Upotrebite komandu 'DROP TABLE' ako želite to da uradite"
+ slo "One nemôžem zmazať all fields with ALTER TABLE; use DROP TABLE instead"
+ spa "No puede borrar todos los campos con ALTER TABLE. Usa DROP TABLE para hacerlo"
+ swe "Man kan inte radera alla fält med ALTER TABLE. Använd DROP TABLE istället"
+ ukr "Ðе можливо видалити вÑÑ– Ñтовбці за допомогою ALTER TABLE. Ð”Ð»Ñ Ñ†ÑŒÐ¾Ð³Ð¾ ÑкориÑтайтеÑÑ DROP TABLE"
+ER_CANT_DROP_FIELD_OR_KEY 42000
+ cze "Nemohu zru-BÅ¡it '%-.192s' (provést DROP). Zkontrolujte, zda neexistují záznamy/klíÄe"
+ dan "Kan ikke udføre DROP '%-.192s'. Undersøg om feltet/nøglen eksisterer."
+ nla "Kan '%-.192s' niet weggooien. Controleer of het veld of de zoeksleutel daadwerkelijk bestaat."
+ eng "Can't DROP '%-.192s'; check that column/key exists"
+ jps "'%-.192s' を破棄ã§ãã¾ã›ã‚“ã§ã—ãŸ; check that column/key exists",
+ est "Ei suuda kustutada '%-.192s'. Kontrolli kas tulp/võti eksisteerib"
+ fre "Ne peut effacer (DROP) '%-.192s'. Vérifiez s'il existe"
+ ger "Kann '%-.192s' nicht löschen. Existiert die Spalte oder der Schlüssel?"
+ greek "ΑδÏνατη η διαγÏαφή (DROP) '%-.192s'. ΠαÏακαλώ ελέγξτε αν το πεδίο/κλειδί υπάÏχει"
+ hun "A DROP '%-.192s' nem lehetseges. Ellenorizze, hogy a mezo/kulcs letezik-e"
+ ita "Impossibile cancellare '%-.192s'. Controllare che il campo chiave esista"
+ jpn "'%-.192s' を破棄ã§ãã¾ã›ã‚“ã§ã—ãŸ; check that column/key exists"
+ kor "'%-.192s'를 DROPí•  수 없습니다. 칼럼ì´ë‚˜ 키가 존재하는지 채í¬í•˜ì„¸ìš”."
+ nor "Kan ikke DROP '%-.192s'. Undersøk om felt/nøkkel eksisterer."
+ norwegian-ny "Kan ikkje DROP '%-.192s'. Undersøk om felt/nøkkel eksisterar."
+ pol "Nie można wykonać operacji DROP '%-.192s'. SprawdĽ, czy to pole/klucz istnieje"
+ por "Não se pode fazer DROP '%-.192s'. Confira se esta coluna/chave existe"
+ rum "Nu pot sa DROP '%-.192s'. Verifica daca coloana/cheia exista"
+ rus "Ðевозможно удалить (DROP) '%-.192s'. УбедитеÑÑŒ что Ñтолбец/ключ дейÑтвительно ÑущеÑтвует"
+ serbian "Ne mogu da izvrÅ¡im komandu drop 'DROP' na '%-.192s'. Proverite da li ta kolona (odnosno kljuÄ) postoji"
+ slo "Nemôžem zruÅ¡iÅ¥ (DROP) '%-.192s'. Skontrolujte, Äi neexistujú záznamy/kľúÄe"
+ spa "No puedo ELIMINAR '%-.192s'. compuebe que el campo/clave existe"
+ swe "Kan inte ta bort '%-.192s'. Kontrollera att fältet/nyckel finns"
+ ukr "Ðе можу DROP '%-.192s'. Перевірте, чи цей Ñтовбець/ключ Ñ–Ñнує"
+ER_INSERT_INFO
+ cze "Z-Báznamů: %ld Zdvojených: %ld Varování: %ld"
+ dan "Poster: %ld Ens: %ld Advarsler: %ld"
+ nla "Records: %ld Dubbel: %ld Waarschuwing: %ld"
+ eng "Records: %ld Duplicates: %ld Warnings: %ld"
+ jps "レコード数: %ld é‡è¤‡æ•°: %ld Warnings: %ld",
+ est "Kirjeid: %ld Kattuvaid: %ld Hoiatusi: %ld"
+ fre "Enregistrements: %ld Doublons: %ld Avertissements: %ld"
+ ger "Datensätze: %ld Duplikate: %ld Warnungen: %ld"
+ greek "ΕγγÏαφές: %ld Επαναλήψεις: %ld ΠÏοειδοποιήσεις: %ld"
+ hun "Rekordok: %ld Duplikalva: %ld Warnings: %ld"
+ ita "Records: %ld Duplicati: %ld Avvertimenti: %ld"
+ jpn "レコード数: %ld é‡è¤‡æ•°: %ld Warnings: %ld"
+ kor "레코드: %ld개 중복: %ld개 경고: %ld개"
+ nor "Poster: %ld Like: %ld Advarsler: %ld"
+ norwegian-ny "Postar: %ld Like: %ld Ã…tvaringar: %ld"
+ pol "Rekordów: %ld Duplikatów: %ld Ostrzeżeń: %ld"
+ por "Registros: %ld - Duplicados: %ld - Avisos: %ld"
+ rum "Recorduri: %ld Duplicate: %ld Atentionari (warnings): %ld"
+ rus "ЗапиÑей: %ld Дубликатов: %ld Предупреждений: %ld"
+ serbian "Slogova: %ld Duplikata: %ld Upozorenja: %ld"
+ slo "Záznamov: %ld Opakovaných: %ld Varovania: %ld"
+ spa "Registros: %ld Duplicados: %ld Peligros: %ld"
+ swe "Rader: %ld Dubletter: %ld Varningar: %ld"
+ ukr "ЗапиÑів: %ld Дублікатів: %ld ЗаÑтережень: %ld"
+ER_UPDATE_TABLE_USED
+ eng "You can't specify target table '%-.192s' for update in FROM clause"
+ ger "Die Verwendung der zu aktualisierenden Zieltabelle '%-.192s' ist in der FROM-Klausel nicht zulässig."
+ rus "Ðе допуÑкаетÑÑ ÑƒÐºÐ°Ð·Ð°Ð½Ð¸Ðµ таблицы '%-.192s' в ÑпиÑке таблиц FROM Ð´Ð»Ñ Ð²Ð½ÐµÑÐµÐ½Ð¸Ñ Ð² нее изменений"
+ swe "INSERT-table '%-.192s' får inte finnas i FROM tabell-listan"
+ ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' що змінюєтьÑÑ Ð½Ðµ дозволена у переліку таблиць FROM"
+ER_NO_SUCH_THREAD
+ cze "Nezn-Bámá identifikace threadu: %lu"
+ dan "Ukendt tråd id: %lu"
+ nla "Onbekend thread id: %lu"
+ eng "Unknown thread id: %lu"
+ jps "thread id: %lu ã¯ã‚ã‚Šã¾ã›ã‚“",
+ est "Tundmatu lõim: %lu"
+ fre "Numéro de tâche inconnu: %lu"
+ ger "Unbekannte Thread-ID: %lu"
+ greek "Αγνωστο thread id: %lu"
+ hun "Ervenytelen szal (thread) id: %lu"
+ ita "Thread id: %lu sconosciuto"
+ jpn "thread id: %lu ã¯ã‚ã‚Šã¾ã›ã‚“"
+ kor "알수 없는 쓰레드 id: %lu"
+ nor "Ukjent tråd id: %lu"
+ norwegian-ny "Ukjent tråd id: %lu"
+ pol "Nieznany identyfikator w?tku: %lu"
+ por "'Id' de 'thread' %lu desconhecido"
+ rum "Id-ul: %lu thread-ului este necunoscut"
+ rus "ÐеизвеÑтный номер потока: %lu"
+ serbian "Nepoznat thread identifikator: %lu"
+ slo "Neznáma identifikácia vlákna: %lu"
+ spa "Identificador del thread: %lu desconocido"
+ swe "Finns ingen tråd med id %lu"
+ ukr "Ðевідомий ідентифікатор гілки: %lu"
+ER_KILL_DENIED_ERROR
+ cze "Nejste vlastn-Bíkem threadu %lu"
+ dan "Du er ikke ejer af tråden %lu"
+ nla "U bent geen bezitter van thread %lu"
+ eng "You are not owner of thread %lu"
+ jps "thread %lu ã®ã‚ªãƒ¼ãƒŠãƒ¼ã§ã¯ã‚ã‚Šã¾ã›ã‚“",
+ est "Ei ole lõime %lu omanik"
+ fre "Vous n'êtes pas propriétaire de la tâche no: %lu"
+ ger "Sie sind nicht Eigentümer von Thread %lu"
+ greek "Δεν είσθε owner του thread %lu"
+ hun "A %lu thread-nek mas a tulajdonosa"
+ ita "Utente non proprietario del thread %lu"
+ jpn "thread %lu ã®ã‚ªãƒ¼ãƒŠãƒ¼ã§ã¯ã‚ã‚Šã¾ã›ã‚“"
+ kor "쓰레드(Thread) %luì˜ ì†Œìœ ìžê°€ 아닙니다."
+ nor "Du er ikke eier av tråden %lu"
+ norwegian-ny "Du er ikkje eigar av tråd %lu"
+ pol "Nie jeste? wła?cicielem w?tku %lu"
+ por "Você não é proprietário da 'thread' %lu"
+ rum "Nu sinteti proprietarul threadului %lu"
+ rus "Ð’Ñ‹ не ÑвлÑетеÑÑŒ владельцем потока %lu"
+ serbian "Vi niste vlasnik thread-a %lu"
+ slo "Nie ste vlastníkom vlákna %lu"
+ spa "Tu no eres el propietario del thread%lu"
+ swe "Du är inte ägare till tråd %lu"
+ ukr "Ви не володар гілки %lu"
+ER_NO_TABLES_USED
+ cze "Nejsou pou-Bžity žádné tabulky"
+ dan "Ingen tabeller i brug"
+ nla "Geen tabellen gebruikt."
+ eng "No tables used"
+ est "Ãœhtegi tabelit pole kasutusel"
+ fre "Aucune table utilisée"
+ ger "Keine Tabellen verwendet"
+ greek "Δεν χÏησιμοποιήθηκαν πίνακες"
+ hun "Nincs hasznalt tabla"
+ ita "Nessuna tabella usata"
+ kor "ì–´ë–¤ í…Œì´ë¸”ë„ ì‚¬ìš©ë˜ì§€ 않았습니다."
+ nor "Ingen tabeller i bruk"
+ norwegian-ny "Ingen tabellar i bruk"
+ pol "Nie ma żadej użytej tabeli"
+ por "Nenhuma tabela usada"
+ rum "Nici o tabela folosita"
+ rus "Ðикакие таблицы не иÑпользованы"
+ serbian "Nema upotrebljenih tabela"
+ slo "Nie je použitá žiadna tabuľka"
+ spa "No ha tablas usadas"
+ swe "Inga tabeller angivna"
+ ukr "Ðе викориÑтано таблиць"
+ER_TOO_BIG_SET
+ cze "P-Bříliš mnoho řetězců pro sloupec %-.192s a SET"
+ dan "For mange tekststrenge til specifikationen af SET i kolonne %-.192s"
+ nla "Teveel strings voor kolom %-.192s en SET"
+ eng "Too many strings for column %-.192s and SET"
+ est "Liiga palju string tulbale %-.192s tüübile SET"
+ fre "Trop de chaînes dans la colonne %-.192s avec SET"
+ ger "Zu viele Strings für Feld %-.192s und SET angegeben"
+ greek "ΠάÏα πολλά strings για το πεδίο %-.192s και SET"
+ hun "Tul sok karakter: %-.192s es SET"
+ ita "Troppe stringhe per la colonna %-.192s e la SET"
+ kor "칼럼 %-.192s와 SETì—ì„œ 스트ë§ì´ 너무 많습니다."
+ nor "For mange tekststrenger kolonne %-.192s og SET"
+ norwegian-ny "For mange tekststrengar felt %-.192s og SET"
+ pol "Zbyt wiele łańcuchów dla kolumny %-.192s i polecenia SET"
+ por "'Strings' demais para coluna '%-.192s' e SET"
+ rum "Prea multe siruri pentru coloana %-.192s si SET"
+ rus "Слишком много значений Ð´Ð»Ñ Ñтолбца %-.192s в SET"
+ serbian "Previše string-ova za kolonu '%-.192s' i komandu 'SET'"
+ slo "Príliš mnoho reťazcov pre pole %-.192s a SET"
+ spa "Muchas strings para columna %-.192s y SET"
+ swe "För många alternativ till kolumn %-.192s för SET"
+ ukr "Забагато Ñтрок Ð´Ð»Ñ ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ %-.192s та SET"
+ER_NO_UNIQUE_LOGFILE
+ cze "Nemohu vytvo-BÅ™it jednoznaÄné jméno logovacího souboru %-.200s.(1-999)\n"
+ dan "Kan ikke lave unikt log-filnavn %-.200s.(1-999)\n"
+ nla "Het is niet mogelijk een unieke naam te maken voor de logfile %-.200s.(1-999)\n"
+ eng "Can't generate a unique log-filename %-.200s.(1-999)\n"
+ est "Ei suuda luua unikaalset logifaili nime %-.200s.(1-999)\n"
+ fre "Ne peut générer un unique nom de journal %-.200s.(1-999)\n"
+ ger "Kann keinen eindeutigen Dateinamen für die Logdatei %-.200s(1-999) erzeugen\n"
+ greek "ΑδÏνατη η δημιουÏγία unique log-filename %-.200s.(1-999)\n"
+ hun "Egyedi log-filenev nem generalhato: %-.200s.(1-999)\n"
+ ita "Impossibile generare un nome del file log unico %-.200s.(1-999)\n"
+ kor "Unique ë¡œê·¸í™”ì¼ '%-.200s'를 만들수 없습니다.(1-999)\n"
+ nor "Kan ikke lage unikt loggfilnavn %-.200s.(1-999)\n"
+ norwegian-ny "Kan ikkje lage unikt loggfilnavn %-.200s.(1-999)\n"
+ pol "Nie można stworzyć unikalnej nazwy pliku z logiem %-.200s.(1-999)\n"
+ por "Não pode gerar um nome de arquivo de 'log' único '%-.200s'.(1-999)\n"
+ rum "Nu pot sa generez un nume de log unic %-.200s.(1-999)\n"
+ rus "Ðевозможно Ñоздать уникальное Ð¸Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð° журнала %-.200s.(1-999)\n"
+ serbian "Ne mogu da generišem jedinstveno ime log-file-a: '%-.200s.(1-999)'\n"
+ slo "Nemôžem vytvoriť unikátne meno log-súboru %-.200s.(1-999)\n"
+ spa "No puede crear un unico archivo log %-.200s.(1-999)\n"
+ swe "Kan inte generera ett unikt filnamn %-.200s.(1-999)\n"
+ ukr "Ðе можу згенерувати унікальне ім'Ñ log-файлу %-.200s.(1-999)\n"
+ER_TABLE_NOT_LOCKED_FOR_WRITE
+ cze "Tabulka '%-.192s' byla zam-BÄena s READ a nemůže být zmÄ›nÄ›na"
+ dan "Tabellen '%-.192s' var låst med READ lås og kan ikke opdateres"
+ nla "Tabel '%-.192s' was gelocked met een lock om te lezen. Derhalve kunnen geen wijzigingen worden opgeslagen."
+ eng "Table '%-.192s' was locked with a READ lock and can't be updated"
+ jps "Table '%-.192s' 㯠READ lock ã«ãªã£ã¦ã„ã¦ã€æ›´æ–°ã¯ã§ãã¾ã›ã‚“",
+ est "Tabel '%-.192s' on lukustatud READ lukuga ning ei ole muudetav"
+ fre "Table '%-.192s' verrouillée lecture (READ): modification impossible"
+ ger "Tabelle '%-.192s' ist mit Lesesperre versehen und kann nicht aktualisiert werden"
+ greek "Ο πίνακας '%-.192s' έχει κλειδωθεί με READ lock και δεν επιτÏέπονται αλλαγές"
+ hun "A(z) '%-.192s' tabla zarolva lett (READ lock) es nem lehet frissiteni"
+ ita "La tabella '%-.192s' e` soggetta a lock in lettura e non puo` essere aggiornata"
+ jpn "Table '%-.192s' 㯠READ lock ã«ãªã£ã¦ã„ã¦ã€æ›´æ–°ã¯ã§ãã¾ã›ã‚“"
+ kor "í…Œì´ë¸” '%-.192s'는 READ ë½ì´ 잠겨있어서 갱신할 수 없습니다."
+ nor "Tabellen '%-.192s' var låst med READ lås og kan ikke oppdateres"
+ norwegian-ny "Tabellen '%-.192s' var låst med READ lås og kan ikkje oppdaterast"
+ pol "Tabela '%-.192s' została zablokowana przez READ i nie może zostać zaktualizowana"
+ por "Tabela '%-.192s' foi travada com trava de leitura e não pode ser atualizada"
+ rum "Tabela '%-.192s' a fost locked cu un READ lock si nu poate fi actualizata"
+ rus "Таблица '%-.192s' заблокирована уровнем READ lock и не может быть изменена"
+ serbian "Tabela '%-.192s' je zakljuÄana READ lock-om; iz nje se može samo Äitati ali u nju se ne može pisati"
+ slo "Tabuľka '%-.192s' bola zamknutá s READ a nemôže byť zmenená"
+ spa "Tabla '%-.192s' fue trabada con un READ lock y no puede ser actualizada"
+ swe "Tabell '%-.192s' kan inte uppdateras emedan den är låst för läsning"
+ ukr "Таблицю '%-.192s' заблоковано тільки Ð´Ð»Ñ Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ, тому Ñ—Ñ— не можна оновити"
+ER_TABLE_NOT_LOCKED
+ cze "Tabulka '%-.192s' nebyla zam-BÄena s LOCK TABLES"
+ dan "Tabellen '%-.192s' var ikke låst med LOCK TABLES"
+ nla "Tabel '%-.192s' was niet gelocked met LOCK TABLES"
+ eng "Table '%-.192s' was not locked with LOCK TABLES"
+ jps "Table '%-.192s' 㯠LOCK TABLES ã«ã‚ˆã£ã¦ãƒ­ãƒƒã‚¯ã•ã‚Œã¦ã„ã¾ã›ã‚“",
+ est "Tabel '%-.192s' ei ole lukustatud käsuga LOCK TABLES"
+ fre "Table '%-.192s' non verrouillée: utilisez LOCK TABLES"
+ ger "Tabelle '%-.192s' wurde nicht mit LOCK TABLES gesperrt"
+ greek "Ο πίνακας '%-.192s' δεν έχει κλειδωθεί με LOCK TABLES"
+ hun "A(z) '%-.192s' tabla nincs zarolva a LOCK TABLES-szel"
+ ita "Non e` stato impostato il lock per la tabella '%-.192s' con LOCK TABLES"
+ jpn "Table '%-.192s' 㯠LOCK TABLES ã«ã‚ˆã£ã¦ãƒ­ãƒƒã‚¯ã•ã‚Œã¦ã„ã¾ã›ã‚“"
+ kor "í…Œì´ë¸” '%-.192s'는 LOCK TABLES 명령으로 잠기지 않았습니다."
+ nor "Tabellen '%-.192s' var ikke låst med LOCK TABLES"
+ norwegian-ny "Tabellen '%-.192s' var ikkje låst med LOCK TABLES"
+ pol "Tabela '%-.192s' nie została zablokowana poleceniem LOCK TABLES"
+ por "Tabela '%-.192s' não foi travada com LOCK TABLES"
+ rum "Tabela '%-.192s' nu a fost locked cu LOCK TABLES"
+ rus "Таблица '%-.192s' не была заблокирована Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ LOCK TABLES"
+ serbian "Tabela '%-.192s' nije bila zakljuÄana komandom 'LOCK TABLES'"
+ slo "Tabuľka '%-.192s' nebola zamknutá s LOCK TABLES"
+ spa "Tabla '%-.192s' no fue trabada con LOCK TABLES"
+ swe "Tabell '%-.192s' är inte låst med LOCK TABLES"
+ ukr "Таблицю '%-.192s' не було блоковано з LOCK TABLES"
+ER_BLOB_CANT_HAVE_DEFAULT 42000
+ cze "Blob polo-Bžka '%-.192s' nemůže mít defaultní hodnotu"
+ dan "BLOB feltet '%-.192s' kan ikke have en standard værdi"
+ nla "Blob veld '%-.192s' can geen standaardwaarde bevatten"
+ eng "BLOB/TEXT column '%-.192s' can't have a default value"
+ est "BLOB-tüüpi tulp '%-.192s' ei saa omada vaikeväärtust"
+ fre "BLOB '%-.192s' ne peut avoir de valeur par défaut"
+ ger "BLOB/TEXT-Feld '%-.192s' darf keinen Vorgabewert (DEFAULT) haben"
+ greek "Τα Blob πεδία '%-.192s' δεν μποÏοÏν να έχουν Ï€ÏοκαθοÏισμένες τιμές (default value)"
+ hun "A(z) '%-.192s' blob objektumnak nem lehet alapertelmezett erteke"
+ ita "Il campo BLOB '%-.192s' non puo` avere un valore di default"
+ jpn "BLOB column '%-.192s' can't have a default value"
+ kor "BLOB 칼럼 '%-.192s' 는 ë””í´íŠ¸ ê°’ì„ ê°€ì§ˆ 수 없습니다."
+ nor "Blob feltet '%-.192s' kan ikke ha en standard verdi"
+ norwegian-ny "Blob feltet '%-.192s' kan ikkje ha ein standard verdi"
+ pol "Pole typu blob '%-.192s' nie może mieć domy?lnej warto?ci"
+ por "Coluna BLOB '%-.192s' não pode ter um valor padrão (default)"
+ rum "Coloana BLOB '%-.192s' nu poate avea o valoare default"
+ rus "Ðевозможно указывать значение по умолчанию Ð´Ð»Ñ Ñтолбца BLOB '%-.192s'"
+ serbian "BLOB kolona '%-.192s' ne može imati default vrednost"
+ slo "Pole BLOB '%-.192s' nemôže mať implicitnú hodnotu"
+ spa "Campo Blob '%-.192s' no puede tener valores patron"
+ swe "BLOB fält '%-.192s' kan inte ha ett DEFAULT-värde"
+ ukr "Стовбець BLOB '%-.192s' не може мати Ð·Ð½Ð°Ñ‡ÐµÐ½Ð½Ñ Ð¿Ð¾ замовчуванню"
+ER_WRONG_DB_NAME 42000
+ cze "Nep-Břípustné jméno databáze '%-.100s'"
+ dan "Ugyldigt database navn '%-.100s'"
+ nla "Databasenaam '%-.100s' is niet getoegestaan"
+ eng "Incorrect database name '%-.100s'"
+ jps "指定ã—㟠database å '%-.100s' ãŒé–“é•ã£ã¦ã„ã¾ã™",
+ est "Vigane andmebaasi nimi '%-.100s'"
+ fre "Nom de base de donnée illégal: '%-.100s'"
+ ger "Unerlaubter Datenbankname '%-.100s'"
+ greek "Λάθος όνομα βάσης δεδομένων '%-.100s'"
+ hun "Hibas adatbazisnev: '%-.100s'"
+ ita "Nome database errato '%-.100s'"
+ jpn "指定ã—㟠database å '%-.100s' ãŒé–“é•ã£ã¦ã„ã¾ã™"
+ kor "'%-.100s' ë°ì´íƒ€ë² ì´ìŠ¤ì˜ ì´ë¦„ì´ ë¶€ì •í™•í•©ë‹ˆë‹¤."
+ nor "Ugyldig database navn '%-.100s'"
+ norwegian-ny "Ugyldig database namn '%-.100s'"
+ pol "Niedozwolona nazwa bazy danych '%-.100s'"
+ por "Nome de banco de dados '%-.100s' incorreto"
+ rum "Numele bazei de date este incorect '%-.100s'"
+ rus "Ðекорректное Ð¸Ð¼Ñ Ð±Ð°Ð·Ñ‹ данных '%-.100s'"
+ serbian "Pogrešno ime baze '%-.100s'"
+ slo "Neprípustné meno databázy '%-.100s'"
+ spa "Nombre de base de datos ilegal '%-.100s'"
+ swe "Felaktigt databasnamn '%-.100s'"
+ ukr "Ðевірне ім'Ñ Ð±Ð°Ð·Ð¸ данних '%-.100s'"
+ER_WRONG_TABLE_NAME 42000
+ cze "Nep-Břípustné jméno tabulky '%-.100s'"
+ dan "Ugyldigt tabel navn '%-.100s'"
+ nla "Niet toegestane tabelnaam '%-.100s'"
+ eng "Incorrect table name '%-.100s'"
+ jps "指定ã—㟠table å '%-.100s' ã¯ã¾ã¡ãŒã£ã¦ã„ã¾ã™",
+ est "Vigane tabeli nimi '%-.100s'"
+ fre "Nom de table illégal: '%-.100s'"
+ ger "Unerlaubter Tabellenname '%-.100s'"
+ greek "Λάθος όνομα πίνακα '%-.100s'"
+ hun "Hibas tablanev: '%-.100s'"
+ ita "Nome tabella errato '%-.100s'"
+ jpn "指定ã—㟠table å '%-.100s' ã¯ã¾ã¡ãŒã£ã¦ã„ã¾ã™"
+ kor "'%-.100s' í…Œì´ë¸” ì´ë¦„ì´ ë¶€ì •í™•í•©ë‹ˆë‹¤."
+ nor "Ugyldig tabell navn '%-.100s'"
+ norwegian-ny "Ugyldig tabell namn '%-.100s'"
+ pol "Niedozwolona nazwa tabeli '%-.100s'..."
+ por "Nome de tabela '%-.100s' incorreto"
+ rum "Numele tabelei este incorect '%-.100s'"
+ rus "Ðекорректное Ð¸Ð¼Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ‹ '%-.100s'"
+ serbian "Pogrešno ime tabele '%-.100s'"
+ slo "Neprípustné meno tabuľky '%-.100s'"
+ spa "Nombre de tabla ilegal '%-.100s'"
+ swe "Felaktigt tabellnamn '%-.100s'"
+ ukr "Ðевірне ім'Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ– '%-.100s'"
+ER_TOO_BIG_SELECT 42000
+ cze "Zadan-Bý SELECT by procházel příliš mnoho záznamů a trval velmi dlouho. Zkontrolujte tvar WHERE a je-li SELECT v pořádku, použijte SET SQL_BIG_SELECTS=1"
+ dan "SELECT ville undersøge for mange poster og ville sandsynligvis tage meget lang tid. Undersøg WHERE delen og brug SET SQL_BIG_SELECTS=1 hvis udtrykket er korrekt"
+ nla "Het SELECT-statement zou te veel records analyseren en dus veel tijd in beslagnemen. Kijk het WHERE-gedeelte van de query na en kies SET SQL_BIG_SELECTS=1 als het stament in orde is."
+ eng "The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay"
+ est "SELECT lause peab läbi vaatama suure hulga kirjeid ja võtaks tõenäoliselt liiga kaua aega. Tasub kontrollida WHERE klauslit ja vajadusel kasutada käsku SET SQL_BIG_SELECTS=1"
+ fre "SELECT va devoir examiner beaucoup d'enregistrements ce qui va prendre du temps. Vérifiez la clause WHERE et utilisez SET SQL_BIG_SELECTS=1 si SELECT se passe bien"
+ ger "Die Ausführung des SELECT würde zu viele Datensätze untersuchen und wahrscheinlich sehr lange dauern. Bitte WHERE-Klausel überprüfen und gegebenenfalls SET SQL_BIG_SELECTS=1 oder SET SQL_MAX_JOIN_SIZE=# verwenden"
+ greek "Το SELECT θα εξετάσει μεγάλο αÏιθμό εγγÏαφών και πιθανώς θα καθυστεÏήσει. ΠαÏακαλώ εξετάστε τις παÏαμέτÏους του WHERE και χÏησιμοποιείστε SET SQL_BIG_SELECTS=1 αν το SELECT είναι σωστό"
+ hun "A SELECT tul sok rekordot fog megvizsgalni es nagyon sokaig fog tartani. Ellenorizze a WHERE-t es hasznalja a SET SQL_BIG_SELECTS=1 beallitast, ha a SELECT okay"
+ ita "La SELECT dovrebbe esaminare troppi record e usare troppo tempo. Controllare la WHERE e usa SET SQL_BIG_SELECTS=1 se e` tutto a posto."
+ kor "SELECT 명령ì—ì„œ 너무 ë§Žì€ ë ˆì½”ë“œë¥¼ 찾기 ë•Œë¬¸ì— ë§Žì€ ì‹œê°„ì´ ì†Œìš”ë©ë‹ˆë‹¤. ë”°ë¼ì„œ WHERE ë¬¸ì„ ì ê²€í•˜ê±°ë‚˜, 만약 SELECTê°€ okë˜ë©´ SET SQL_BIG_SELECTS=1 ì˜µì…˜ì„ ì‚¬ìš©í•˜ì„¸ìš”."
+ nor "SELECT ville undersøke for mange poster og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt"
+ norwegian-ny "SELECT ville undersøkje for mange postar og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt"
+ pol "Operacja SELECT będzie dotyczyła zbyt wielu rekordów i prawdopodobnie zajmie bardzo dużo czasu. SprawdĽ warunek WHERE i użyj SQL_OPTION BIG_SELECTS=1 je?li operacja SELECT jest poprawna"
+ por "O SELECT examinaria registros demais e provavelmente levaria muito tempo. Cheque sua cláusula WHERE e use SET SQL_BIG_SELECTS=1, se o SELECT estiver correto"
+ rum "SELECT-ul ar examina prea multe cimpuri si probabil ar lua prea mult timp; verifica clauza WHERE si foloseste SET SQL_BIG_SELECTS=1 daca SELECT-ul e okay"
+ rus "Ð”Ð»Ñ Ñ‚Ð°ÐºÐ¾Ð¹ выборки SELECT должен будет проÑмотреть Ñлишком много запиÑей и, видимо, Ñто займет очень много времени. Проверьте ваше указание WHERE, и, еÑли в нем вÑе в порÑдке, укажите SET SQL_BIG_SELECTS=1"
+ serbian "Komanda 'SELECT' će ispitati previše slogova i potrošiti previše vremena. Proverite vaš 'WHERE' filter i upotrebite 'SET OPTION SQL_BIG_SELECTS=1' ako želite baš ovakvu komandu"
+ slo "Zadaná požiadavka SELECT by prechádzala príliš mnoho záznamov a trvala by príliš dlho. Skontrolujte tvar WHERE a ak je v poriadku, použite SET SQL_BIG_SELECTS=1"
+ spa "El SELECT puede examinar muchos registros y probablemente con mucho tiempo. Verifique tu WHERE y usa SET SQL_BIG_SELECTS=1 si el SELECT esta correcto"
+ swe "Den angivna frågan skulle läsa mer än MAX_JOIN_SIZE rader. Kontrollera din WHERE och använd SET SQL_BIG_SELECTS=1 eller SET MAX_JOIN_SIZE=# ifall du vill hantera stora joins"
+ ukr "Запиту SELECT потрібно обробити багато запиÑів, що, певне, займе дуже багато чаÑу. Перевірте ваше WHERE та викориÑтовуйте SET SQL_BIG_SELECTS=1, Ñкщо цей запит SELECT Ñ” вірним"
+ER_UNKNOWN_ERROR
+ cze "Nezn-Bámá chyba"
+ dan "Ukendt fejl"
+ nla "Onbekende Fout"
+ eng "Unknown error"
+ est "Tundmatu viga"
+ fre "Erreur inconnue"
+ ger "Unbekannter Fehler"
+ greek "ΠÏοέκυψε άγνωστο λάθος"
+ hun "Ismeretlen hiba"
+ ita "Errore sconosciuto"
+ kor "알수 없는 ì—러입니다."
+ nor "Ukjent feil"
+ norwegian-ny "Ukjend feil"
+ por "Erro desconhecido"
+ rum "Eroare unknown"
+ rus "ÐеизвеÑÑ‚Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°"
+ serbian "Nepoznata greška"
+ slo "Neznámá chyba"
+ spa "Error desconocido"
+ swe "Oidentifierat fel"
+ ukr "Ðевідома помилка"
+ER_UNKNOWN_PROCEDURE 42000
+ cze "Nezn-Bámá procedura %-.192s"
+ dan "Ukendt procedure %-.192s"
+ nla "Onbekende procedure %-.192s"
+ eng "Unknown procedure '%-.192s'"
+ est "Tundmatu protseduur '%-.192s'"
+ fre "Procédure %-.192s inconnue"
+ ger "Unbekannte Prozedur '%-.192s'"
+ greek "Αγνωστη διαδικασία '%-.192s'"
+ hun "Ismeretlen eljaras: '%-.192s'"
+ ita "Procedura '%-.192s' sconosciuta"
+ kor "알수 없는 수행문 : '%-.192s'"
+ nor "Ukjent prosedyre %-.192s"
+ norwegian-ny "Ukjend prosedyre %-.192s"
+ pol "Unkown procedure %-.192s"
+ por "'Procedure' '%-.192s' desconhecida"
+ rum "Procedura unknown '%-.192s'"
+ rus "ÐеизвеÑÑ‚Ð½Ð°Ñ Ð¿Ñ€Ð¾Ñ†ÐµÐ´ÑƒÑ€Ð° '%-.192s'"
+ serbian "Nepoznata procedura '%-.192s'"
+ slo "Neznámá procedúra '%-.192s'"
+ spa "Procedimiento desconocido %-.192s"
+ swe "Okänd procedur: %-.192s"
+ ukr "Ðевідома процедура '%-.192s'"
+ER_WRONG_PARAMCOUNT_TO_PROCEDURE 42000
+ cze "Chybn-Bý poÄet parametrů procedury %-.192s"
+ dan "Forkert antal parametre til proceduren %-.192s"
+ nla "Foutief aantal parameters doorgegeven aan procedure %-.192s"
+ eng "Incorrect parameter count to procedure '%-.192s'"
+ est "Vale parameetrite hulk protseduurile '%-.192s'"
+ fre "Mauvais nombre de paramètres pour la procedure %-.192s"
+ ger "Falsche Parameterzahl für Prozedur '%-.192s'"
+ greek "Λάθος αÏιθμός παÏαμέτÏων στη διαδικασία '%-.192s'"
+ hun "Rossz parameter a(z) '%-.192s'eljaras szamitasanal"
+ ita "Numero di parametri errato per la procedura '%-.192s'"
+ kor "'%-.192s' ìˆ˜í–‰ë¬¸ì— ëŒ€í•œ 부정확한 파ë¼ë©”í„°"
+ nor "Feil parameter antall til prosedyren %-.192s"
+ norwegian-ny "Feil parameter tal til prosedyra %-.192s"
+ pol "Incorrect parameter count to procedure %-.192s"
+ por "Número de parâmetros incorreto para a 'procedure' '%-.192s'"
+ rum "Procedura '%-.192s' are un numar incorect de parametri"
+ rus "Ðекорректное количеÑтво параметров Ð´Ð»Ñ Ð¿Ñ€Ð¾Ñ†ÐµÐ´ÑƒÑ€Ñ‹ '%-.192s'"
+ serbian "Pogrešan broj parametara za proceduru '%-.192s'"
+ slo "Chybný poÄet parametrov procedúry '%-.192s'"
+ spa "Equivocado parametro count para procedimiento %-.192s"
+ swe "Felaktigt antal parametrar till procedur %-.192s"
+ ukr "Хибна кількіÑÑ‚ÑŒ параметрів процедури '%-.192s'"
+ER_WRONG_PARAMETERS_TO_PROCEDURE
+ cze "Chybn-Bé parametry procedury %-.192s"
+ dan "Forkert(e) parametre til proceduren %-.192s"
+ nla "Foutieve parameters voor procedure %-.192s"
+ eng "Incorrect parameters to procedure '%-.192s'"
+ est "Vigased parameetrid protseduurile '%-.192s'"
+ fre "Paramètre erroné pour la procedure %-.192s"
+ ger "Falsche Parameter für Prozedur '%-.192s'"
+ greek "Λάθος παÏάμετÏοι στην διαδικασία '%-.192s'"
+ hun "Rossz parameter a(z) '%-.192s' eljarasban"
+ ita "Parametri errati per la procedura '%-.192s'"
+ kor "'%-.192s' ìˆ˜í–‰ë¬¸ì— ëŒ€í•œ 부정확한 파ë¼ë©”í„°"
+ nor "Feil parametre til prosedyren %-.192s"
+ norwegian-ny "Feil parameter til prosedyra %-.192s"
+ pol "Incorrect parameters to procedure %-.192s"
+ por "Parâmetros incorretos para a 'procedure' '%-.192s'"
+ rum "Procedura '%-.192s' are parametrii incorecti"
+ rus "Ðекорректные параметры Ð´Ð»Ñ Ð¿Ñ€Ð¾Ñ†ÐµÐ´ÑƒÑ€Ñ‹ '%-.192s'"
+ serbian "Pogrešni parametri prosleđeni proceduri '%-.192s'"
+ slo "Chybné parametre procedúry '%-.192s'"
+ spa "Equivocados parametros para procedimiento %-.192s"
+ swe "Felaktiga parametrar till procedur %-.192s"
+ ukr "Хибний параметер процедури '%-.192s'"
+ER_UNKNOWN_TABLE 42S02
+ cze "Nezn-Bámá tabulka '%-.192s' v %-.32s"
+ dan "Ukendt tabel '%-.192s' i %-.32s"
+ nla "Onbekende tabel '%-.192s' in %-.32s"
+ eng "Unknown table '%-.192s' in %-.32s"
+ est "Tundmatu tabel '%-.192s' %-.32s-s"
+ fre "Table inconnue '%-.192s' dans %-.32s"
+ ger "Unbekannte Tabelle '%-.192s' in '%-.32s'"
+ greek "Αγνωστος πίνακας '%-.192s' σε %-.32s"
+ hun "Ismeretlen tabla: '%-.192s' %-.32s-ban"
+ ita "Tabella '%-.192s' sconosciuta in %-.32s"
+ jpn "Unknown table '%-.192s' in %-.32s"
+ kor "알수 없는 í…Œì´ë¸” '%-.192s' (ë°ì´íƒ€ë² ì´ìŠ¤ %-.32s)"
+ nor "Ukjent tabell '%-.192s' i %-.32s"
+ norwegian-ny "Ukjend tabell '%-.192s' i %-.32s"
+ pol "Unknown table '%-.192s' in %-.32s"
+ por "Tabela '%-.192s' desconhecida em '%-.32s'"
+ rum "Tabla '%-.192s' invalida in %-.32s"
+ rus "ÐеизвеÑÑ‚Ð½Ð°Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ð° '%-.192s' в %-.32s"
+ serbian "Nepoznata tabela '%-.192s' u '%-.32s'"
+ slo "Neznáma tabuľka '%-.192s' v %-.32s"
+ spa "Tabla desconocida '%-.192s' in %-.32s"
+ swe "Okänd tabell '%-.192s' i '%-.32s'"
+ ukr "Ðевідома Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' у %-.32s"
+ER_FIELD_SPECIFIED_TWICE 42000
+ cze "Polo-Bžka '%-.192s' je zadána dvakrát"
+ dan "Feltet '%-.192s' er anvendt to gange"
+ nla "Veld '%-.192s' is dubbel gespecificeerd"
+ eng "Column '%-.192s' specified twice"
+ est "Tulp '%-.192s' on määratletud topelt"
+ fre "Champ '%-.192s' spécifié deux fois"
+ ger "Feld '%-.192s' wurde zweimal angegeben"
+ greek "Το πεδίο '%-.192s' έχει οÏισθεί δÏο φοÏές"
+ hun "A(z) '%-.192s' mezot ketszer definialta"
+ ita "Campo '%-.192s' specificato 2 volte"
+ kor "칼럼 '%-.192s'는 ë‘번 ì •ì˜ë˜ì–´ 있ì니다."
+ nor "Feltet '%-.192s' er spesifisert to ganger"
+ norwegian-ny "Feltet '%-.192s' er spesifisert to gangar"
+ pol "Field '%-.192s' specified twice"
+ por "Coluna '%-.192s' especificada duas vezes"
+ rum "Coloana '%-.192s' specificata de doua ori"
+ rus "Столбец '%-.192s' указан дважды"
+ serbian "Kolona '%-.192s' je navedena dva puta"
+ slo "Pole '%-.192s' je zadané dvakrát"
+ spa "Campo '%-.192s' especificado dos veces"
+ swe "Fält '%-.192s' är redan använt"
+ ukr "Стовбець '%-.192s' зазначено двічі"
+ER_INVALID_GROUP_FUNC_USE
+ cze "Nespr-Bávné použití funkce group"
+ dan "Forkert brug af grupperings-funktion"
+ nla "Ongeldig gebruik van GROUP-functie"
+ eng "Invalid use of group function"
+ est "Vigane grupeerimisfunktsiooni kasutus"
+ fre "Utilisation invalide de la clause GROUP"
+ ger "Falsche Verwendung einer Gruppierungsfunktion"
+ greek "Εσφαλμένη χÏήση της group function"
+ hun "A group funkcio ervenytelen hasznalata"
+ ita "Uso non valido di una funzione di raggruppamento"
+ kor "ìž˜ëª»ëœ ê·¸ë£¹ 함수를 사용하였습니다."
+ por "Uso inválido de função de agrupamento (GROUP)"
+ rum "Folosire incorecta a functiei group"
+ rus "Ðеправильное иÑпользование групповых функций"
+ serbian "Pogrešna upotreba 'GROUP' funkcije"
+ slo "Nesprávne použitie funkcie GROUP"
+ spa "Invalido uso de función en grupo"
+ swe "Felaktig användning av SQL grupp function"
+ ukr "Хибне викориÑÑ‚Ð°Ð½Ð½Ñ Ñ„ÑƒÐ½ÐºÑ†Ñ–Ñ— групуваннÑ"
+ER_UNSUPPORTED_EXTENSION 42000
+ cze "Tabulka '%-.192s' pou-Bžívá rozšíření, které v této verzi MySQL není"
+ dan "Tabellen '%-.192s' bruger et filtypenavn som ikke findes i denne MySQL version"
+ nla "Tabel '%-.192s' gebruikt een extensie, die niet in deze MySQL-versie voorkomt."
+ eng "Table '%-.192s' uses an extension that doesn't exist in this MySQL version"
+ est "Tabel '%-.192s' kasutab laiendust, mis ei eksisteeri antud MySQL versioonis"
+ fre "Table '%-.192s' : utilise une extension invalide pour cette version de MySQL"
+ ger "Tabelle '%-.192s' verwendet eine Erweiterung, die in dieser MySQL-Version nicht verfügbar ist"
+ greek "Ο πίνακς '%-.192s' χÏησιμοποιεί κάποιο extension που δεν υπάÏχει στην έκδοση αυτή της MySQL"
+ hun "A(z) '%-.192s' tabla olyan bovitest hasznal, amely nem letezik ebben a MySQL versioban."
+ ita "La tabella '%-.192s' usa un'estensione che non esiste in questa versione di MySQL"
+ kor "í…Œì´ë¸” '%-.192s'는 í™•ìž¥ëª…ë ¹ì„ ì´ìš©í•˜ì§€ë§Œ í˜„ìž¬ì˜ MySQL 버젼ì—서는 존재하지 않습니다."
+ nor "Table '%-.192s' uses a extension that doesn't exist in this MySQL version"
+ norwegian-ny "Table '%-.192s' uses a extension that doesn't exist in this MySQL version"
+ pol "Table '%-.192s' uses a extension that doesn't exist in this MySQL version"
+ por "Tabela '%-.192s' usa uma extensão que não existe nesta versão do MySQL"
+ rum "Tabela '%-.192s' foloseste o extensire inexistenta in versiunea curenta de MySQL"
+ rus "Ð’ таблице '%-.192s' иÑпользуютÑÑ Ð²Ð¾Ð·Ð¼Ð¾Ð¶Ð½Ð¾Ñти, не поддерживаемые в Ñтой верÑии MySQL"
+ serbian "Tabela '%-.192s' koristi ekstenziju koje ne postoji u ovoj verziji MySQL-a"
+ slo "Tabuľka '%-.192s' používa rozšírenie, ktoré v tejto verzii MySQL nie je"
+ spa "Tabla '%-.192s' usa una extensión que no existe en esta MySQL versión"
+ swe "Tabell '%-.192s' har en extension som inte finns i denna version av MySQL"
+ ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' викориÑтовує розширеннÑ, що не Ñ–Ñнує у цій верÑÑ–Ñ— MySQL"
+ER_TABLE_MUST_HAVE_COLUMNS 42000
+ cze "Tabulka mus-Bí mít alespoň jeden sloupec"
+ dan "En tabel skal have mindst een kolonne"
+ nla "Een tabel moet minstens 1 kolom bevatten"
+ eng "A table must have at least 1 column"
+ jps "テーブルã¯æœ€ä½Ž 1 個㮠column ãŒå¿…è¦ã§ã™",
+ est "Tabelis peab olema vähemalt üks tulp"
+ fre "Une table doit comporter au moins une colonne"
+ ger "Eine Tabelle muss mindestens eine Spalte besitzen"
+ greek "Ενας πίνακας Ï€Ïέπει να έχει τουλάχιστον ένα πεδίο"
+ hun "A tablanak legalabb egy oszlopot tartalmazni kell"
+ ita "Una tabella deve avere almeno 1 colonna"
+ jpn "テーブルã¯æœ€ä½Ž 1 個㮠column ãŒå¿…è¦ã§ã™"
+ kor "í•˜ë‚˜ì˜ í…Œì´ë¸”ì—서는 ì ì–´ë„ í•˜ë‚˜ì˜ ì¹¼ëŸ¼ì´ ì¡´ìž¬í•˜ì—¬ì•¼ 합니다."
+ por "Uma tabela tem que ter pelo menos uma (1) coluna"
+ rum "O tabela trebuie sa aiba cel putin o coloana"
+ rus "Ð’ таблице должен быть как минимум один Ñтолбец"
+ serbian "Tabela mora imati najmanje jednu kolonu"
+ slo "Tabuľka musí mať aspoň 1 pole"
+ spa "Una tabla debe tener al menos 1 columna"
+ swe "Tabeller måste ha minst 1 kolumn"
+ ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ Ð¿Ð¾Ð²Ð¸Ð½Ð½Ð° мати хочаб один Ñтовбець"
+ER_RECORD_FILE_FULL
+ cze "Tabulka '%-.192s' je pln-Bá"
+ dan "Tabellen '%-.192s' er fuld"
+ nla "De tabel '%-.192s' is vol"
+ eng "The table '%-.192s' is full"
+ jps "table '%-.192s' ã¯ã„ã£ã±ã„ã§ã™",
+ est "Tabel '%-.192s' on täis"
+ fre "La table '%-.192s' est pleine"
+ ger "Tabelle '%-.192s' ist voll"
+ greek "Ο πίνακας '%-.192s' είναι γεμάτος"
+ hun "A '%-.192s' tabla megtelt"
+ ita "La tabella '%-.192s' e` piena"
+ jpn "table '%-.192s' ã¯ã„ã£ã±ã„ã§ã™"
+ kor "í…Œì´ë¸” '%-.192s'ê°€ full났습니다. "
+ por "Tabela '%-.192s' está cheia"
+ rum "Tabela '%-.192s' e plina"
+ rus "Таблица '%-.192s' переполнена"
+ serbian "Tabela '%-.192s' je popunjena do kraja"
+ slo "Tabuľka '%-.192s' je plná"
+ spa "La tabla '%-.192s' está llena"
+ swe "Tabellen '%-.192s' är full"
+ ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' заповнена"
+ER_UNKNOWN_CHARACTER_SET 42000
+ cze "Nezn-Bámá znaková sada: '%-.64s'"
+ dan "Ukendt tegnsæt: '%-.64s'"
+ nla "Onbekende character set: '%-.64s'"
+ eng "Unknown character set: '%-.64s'"
+ jps "character set '%-.64s' ã¯ã‚µãƒãƒ¼ãƒˆã—ã¦ã„ã¾ã›ã‚“",
+ est "Vigane kooditabel '%-.64s'"
+ fre "Jeu de caractères inconnu: '%-.64s'"
+ ger "Unbekannter Zeichensatz: '%-.64s'"
+ greek "Αγνωστο character set: '%-.64s'"
+ hun "Ervenytelen karakterkeszlet: '%-.64s'"
+ ita "Set di caratteri '%-.64s' sconosciuto"
+ jpn "character set '%-.64s' ã¯ã‚µãƒãƒ¼ãƒˆã—ã¦ã„ã¾ã›ã‚“"
+ kor "알수없는 언어 Set: '%-.64s'"
+ por "Conjunto de caracteres '%-.64s' desconhecido"
+ rum "Set de caractere invalid: '%-.64s'"
+ rus "ÐеизвеÑÑ‚Ð½Ð°Ñ ÐºÐ¾Ð´Ð¸Ñ€Ð¾Ð²ÐºÐ° '%-.64s'"
+ serbian "Nepoznati karakter-set: '%-.64s'"
+ slo "Neznáma znaková sada: '%-.64s'"
+ spa "Juego de caracteres desconocido: '%-.64s'"
+ swe "Okänd teckenuppsättning: '%-.64s'"
+ ukr "Ðевідома кодова таблицÑ: '%-.64s'"
+ER_TOO_MANY_TABLES
+ cze "P-Bříliš mnoho tabulek, MySQL jich může mít v joinu jen %d"
+ dan "For mange tabeller. MySQL kan kun bruge %d tabeller i et join"
+ nla "Teveel tabellen. MySQL kan slechts %d tabellen in een join bevatten"
+ eng "Too many tables; MySQL can only use %d tables in a join"
+ jps "テーブルãŒå¤šã™ãŽã¾ã™; MySQL can only use %d tables in a join",
+ est "Liiga palju tabeleid. MySQL suudab JOINiga ühendada kuni %d tabelit"
+ fre "Trop de tables. MySQL ne peut utiliser que %d tables dans un JOIN"
+ ger "Zu viele Tabellen. MySQL kann in einem Join maximal %d Tabellen verwenden"
+ greek "Î Î¿Î»Ï Î¼ÎµÎ³Î¬Î»Î¿Ï‚ αÏιθμός πινάκων. Η MySQL μποÏεί να χÏησιμοποιήσει %d πίνακες σε διαδικασία join"
+ hun "Tul sok tabla. A MySQL csak %d tablat tud kezelni osszefuzeskor"
+ ita "Troppe tabelle. MySQL puo` usare solo %d tabelle in una join"
+ jpn "テーブルãŒå¤šã™ãŽã¾ã™; MySQL can only use %d tables in a join"
+ kor "너무 ë§Žì€ í…Œì´ë¸”ì´ Joinë˜ì—ˆìŠµë‹ˆë‹¤. MySQLì—서는 JOINì‹œ %dê°œì˜ í…Œì´ë¸”만 사용할 수 있습니다."
+ por "Tabelas demais. O MySQL pode usar somente %d tabelas em uma junção (JOIN)"
+ rum "Prea multe tabele. MySQL nu poate folosi mai mult de %d tabele intr-un join"
+ rus "Слишком много таблиц. MySQL может иÑпользовать только %d таблиц в Ñоединении"
+ serbian "Previše tabela. MySQL može upotrebiti maksimum %d tabela pri 'JOIN' operaciji"
+ slo "Príliš mnoho tabuliek. MySQL môže použiť len %d v JOIN-e"
+ spa "Muchas tablas. MySQL solamente puede usar %d tablas en un join"
+ swe "För många tabeller. MySQL can ha högst %d tabeller i en och samma join"
+ ukr "Забагато таблиць. MySQL може викориÑтовувати лише %d таблиць у об'єднанні"
+ER_TOO_MANY_FIELDS
+ cze "P-Bříliš mnoho položek"
+ dan "For mange felter"
+ nla "Te veel velden"
+ eng "Too many columns"
+ jps "column ãŒå¤šã™ãŽã¾ã™",
+ est "Liiga palju tulpasid"
+ fre "Trop de champs"
+ ger "Zu viele Felder"
+ greek "Î Î¿Î»Ï Î¼ÎµÎ³Î¬Î»Î¿Ï‚ αÏιθμός πεδίων"
+ hun "Tul sok mezo"
+ ita "Troppi campi"
+ jpn "column ãŒå¤šã™ãŽã¾ã™"
+ kor "ì¹¼ëŸ¼ì´ ë„ˆë¬´ 많습니다."
+ por "Colunas demais"
+ rum "Prea multe coloane"
+ rus "Слишком много Ñтолбцов"
+ serbian "Previše kolona"
+ slo "Príliš mnoho polí"
+ spa "Muchos campos"
+ swe "För många fält"
+ ukr "Забагато Ñтовбців"
+ER_TOO_BIG_ROWSIZE 42000
+ cze "-BŘádek je příliÅ¡ velký. Maximální velikost řádku, nepoÄítaje položky blob, je %ld. Musíte zmÄ›nit nÄ›které položky na blob"
+ dan "For store poster. Max post størrelse, uden BLOB's, er %ld. Du må lave nogle felter til BLOB's"
+ nla "Rij-grootte is groter dan toegestaan. Maximale rij grootte, blobs niet meegeteld, is %ld. U dient sommige velden in blobs te veranderen."
+ eng "Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs"
+ jps "row size ãŒå¤§ãã™ãŽã¾ã™. BLOB ã‚’å«ã¾ãªã„å ´åˆã® row size ã®æœ€å¤§ã¯ %ld ã§ã™. ã„ãã¤ã‹ã® field ã‚’ BLOB ã«å¤‰ãˆã¦ãã ã•ã„.",
+ est "Liiga pikk kirje. Kirje maksimumpikkus arvestamata BLOB-tüüpi välju on %ld. Muuda mõned väljad BLOB-tüüpi väljadeks"
+ fre "Ligne trop grande. Le taille maximale d'une ligne, sauf les BLOBs, est %ld. Changez le type de quelques colonnes en BLOB"
+ ger "Zeilenlänge zu groß. Die maximale Zeilenlänge für den verwendeten Tabellentyp (ohne BLOB-Felder) beträgt %ld. Einige Felder müssen in BLOB oder TEXT umgewandelt werden"
+ greek "Î Î¿Î»Ï Î¼ÎµÎ³Î¬Î»Î¿ μέγεθος εγγÏαφής. Το μέγιστο μέγεθος εγγÏαφής, χωÏίς να υπολογίζονται τα blobs, είναι %ld. ΠÏέπει να οÏίσετε κάποια πεδία σαν blobs"
+ hun "Tul nagy sormeret. A maximalis sormeret (nem szamolva a blob objektumokat) %ld. Nehany mezot meg kell valtoztatnia"
+ ita "Riga troppo grande. La massima grandezza di una riga, non contando i BLOB, e` %ld. Devi cambiare alcuni campi in BLOB"
+ jpn "row size ãŒå¤§ãã™ãŽã¾ã™. BLOB ã‚’å«ã¾ãªã„å ´åˆã® row size ã®æœ€å¤§ã¯ %ld ã§ã™. ã„ãã¤ã‹ã® field ã‚’ BLOB ã«å¤‰ãˆã¦ãã ã•ã„."
+ kor "너무 í° row 사ì´ì¦ˆìž…니다. BLOB를 계산하지 ì•Šê³  최대 row 사ì´ì¦ˆëŠ” %ld입니다. ì–¼ë§ˆê°„ì˜ í•„ë“œë“¤ì„ BLOBë¡œ 바꾸셔야 ê² êµ°ìš”.."
+ por "Tamanho de linha grande demais. O máximo tamanho de linha, não contando BLOBs, é %ld. Você tem que mudar alguns campos para BLOBs"
+ rum "Marimea liniei (row) prea mare. Marimea maxima a liniei, excluzind BLOB-urile este de %ld. Trebuie sa schimbati unele cimpuri in BLOB-uri"
+ rus "Слишком большой размер запиÑи. МакÑимальный размер Ñтроки, иÑÐºÐ»ÑŽÑ‡Ð°Ñ Ð¿Ð¾Ð»Ñ BLOB, - %ld. Возможно, вам Ñледует изменить тип некоторых полей на BLOB"
+ serbian "Prevelik slog. Maksimalna veliÄina sloga, ne raÄunajući BLOB polja, je %ld. Trebali bi da promenite tip nekih polja u BLOB"
+ slo "Riadok je príliš veľký. Maximálna veľkosť riadku, okrem 'BLOB', je %ld. Musíte zmeniť niektoré položky na BLOB"
+ spa "Tamaño de línea muy grande. Máximo tamaño de línea, no contando blob, es %ld. Tu tienes que cambiar algunos campos para blob"
+ swe "För stor total radlängd. Den högst tillåtna radlängden, förutom BLOBs, är %ld. Ändra några av dina fält till BLOB"
+ ukr "Задовга Ñтрока. Ðайбільшою довжиною Ñтроки, не рахуючи BLOB, Ñ” %ld. Вам потрібно привеÑти деÑкі Ñтовбці до типу BLOB"
+ER_STACK_OVERRUN
+ cze "P-BÅ™eteÄení zásobníku threadu: použito %ld z %ld. Použijte 'mysqld -O thread_stack=#' k zadání vÄ›tšího zásobníku"
+ dan "Thread stack brugt: Brugt: %ld af en %ld stak. Brug 'mysqld -O thread_stack=#' for at allokere en større stak om nødvendigt"
+ nla "Thread stapel overrun: Gebruikte: %ld van een %ld stack. Gebruik 'mysqld -O thread_stack=#' om een grotere stapel te definieren (indien noodzakelijk)."
+ eng "Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed"
+ jps "Thread stack overrun: Used: %ld of a %ld stack. スタック領域を多ãã¨ã‚ŠãŸã„å ´åˆã€'mysqld -O thread_stack=#' ã¨æŒ‡å®šã—ã¦ãã ã•ã„",
+ fre "Débordement de la pile des tâches (Thread stack). Utilisées: %ld pour une pile de %ld. Essayez 'mysqld -O thread_stack=#' pour indiquer une plus grande valeur"
+ ger "Thread-Stack-Überlauf. Benutzt: %ld von %ld Stack. 'mysqld -O thread_stack=#' verwenden, um bei Bedarf einen größeren Stack anzulegen"
+ greek "Stack overrun στο thread: Used: %ld of a %ld stack. ΠαÏακαλώ χÏησιμοποιείστε 'mysqld -O thread_stack=#' για να οÏίσετε ένα μεγαλÏτεÏο stack αν χÏειάζεται"
+ hun "Thread verem tullepes: Used: %ld of a %ld stack. Hasznalja a 'mysqld -O thread_stack=#' nagyobb verem definialasahoz"
+ ita "Thread stack overrun: Usati: %ld di uno stack di %ld. Usa 'mysqld -O thread_stack=#' per specificare uno stack piu` grande."
+ jpn "Thread stack overrun: Used: %ld of a %ld stack. スタック領域を多ãã¨ã‚ŠãŸã„å ´åˆã€'mysqld -O thread_stack=#' ã¨æŒ‡å®šã—ã¦ãã ã•ã„"
+ kor "쓰레드 스íƒì´ 넘쳤습니다. 사용: %ldê°œ 스íƒ: %ldê°œ. 만약 필요시 ë”í° ìŠ¤íƒì„ ì›í• ë•Œì—는 'mysqld -O thread_stack=#' 를 ì •ì˜í•˜ì„¸ìš”"
+ por "Estouro da pilha do 'thread'. Usados %ld de uma pilha de %ld. Use 'mysqld -O thread_stack=#' para especificar uma pilha maior, se necessário"
+ rum "Stack-ul thread-ului a fost depasit (prea mic): Folositi: %ld intr-un stack de %ld. Folositi 'mysqld -O thread_stack=#' ca sa specifici un stack mai mare"
+ rus "Стек потоков переполнен: иÑпользовано: %ld из %ld Ñтека. ПрименÑйте 'mysqld -O thread_stack=#' Ð´Ð»Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð¸Ñ Ð±Ð¾Ð»ÑŒÑˆÐµÐ³Ð¾ размера Ñтека, еÑли необходимо"
+ serbian "Prepisivanje thread stack-a: Upotrebljeno: %ld od %ld stack memorije. Upotrebite 'mysqld -O thread_stack=#' da navedete veći stack ako je potrebno"
+ slo "PreteÄenie zásobníku vlákna: použité: %ld z %ld. Použite 'mysqld -O thread_stack=#' k zadaniu väÄÅ¡ieho zásobníka"
+ spa "Sobrecarga de la pila de thread: Usada: %ld de una %ld pila. Use 'mysqld -O thread_stack=#' para especificar una mayor pila si necesario"
+ swe "Trådstacken tog slut: Har använt %ld av %ld bytes. Använd 'mysqld -O thread_stack=#' ifall du behöver en större stack"
+ ukr "Стек гілок переповнено: ВикориÑтано: %ld з %ld. ВикориÑтовуйте 'mysqld -O thread_stack=#' аби зазначити більший Ñтек, Ñкщо необхідно"
+ER_WRONG_OUTER_JOIN 42000
+ cze "V OUTER JOIN byl nalezen k-Břížový odkaz. Prověřte ON podmínky"
+ dan "Krydsreferencer fundet i OUTER JOIN; check dine ON conditions"
+ nla "Gekruiste afhankelijkheid gevonden in OUTER JOIN. Controleer uw ON-conditions"
+ eng "Cross dependency found in OUTER JOIN; examine your ON conditions"
+ est "Ristsõltuvus OUTER JOIN klauslis. Kontrolli oma ON tingimusi"
+ fre "Dépendance croisée dans une clause OUTER JOIN. Vérifiez la condition ON"
+ ger "OUTER JOIN enthält fehlerhafte Abhängigkeiten. In ON verwendete Bedingungen überprüfen"
+ greek "Cross dependency βÏέθηκε σε OUTER JOIN. ΠαÏακαλώ εξετάστε τις συνθήκες που θέσατε στο ON"
+ hun "Keresztfuggoseg van az OUTER JOIN-ban. Ellenorizze az ON felteteleket"
+ ita "Trovata una dipendenza incrociata nella OUTER JOIN. Controlla le condizioni ON"
+ por "Dependência cruzada encontrada em junção externa (OUTER JOIN); examine as condições utilizadas nas cláusulas 'ON'"
+ rum "Dependinta incrucisata (cross dependency) gasita in OUTER JOIN. Examinati conditiile ON"
+ rus "Ð’ OUTER JOIN обнаружена перекреÑÑ‚Ð½Ð°Ñ Ð·Ð°Ð²Ð¸ÑимоÑÑ‚ÑŒ. Внимательно проанализируйте Ñвои уÑÐ»Ð¾Ð²Ð¸Ñ ON"
+ serbian "Unakrsna zavisnost pronađena u komandi 'OUTER JOIN'. Istražite vaše 'ON' uslove"
+ slo "V OUTER JOIN bol nájdený krížový odkaz. Skontrolujte podmienky ON"
+ spa "Dependencia cruzada encontrada en OUTER JOIN. Examine su condición ON"
+ swe "Felaktigt referens i OUTER JOIN. Kontrollera ON-uttrycket"
+ ukr "ПерехреÑна залежніÑÑ‚ÑŒ у OUTER JOIN. Перевірте умову ON"
+ER_NULL_COLUMN_IN_INDEX 42000
+ eng "Table handler doesn't support NULL in given index. Please change column '%-.192s' to be NOT NULL or use another handler"
+ swe "Tabell hanteraren kan inte indexera NULL kolumner för den givna index typen. Ändra '%-.192s' till NOT NULL eller använd en annan hanterare"
+ER_CANT_FIND_UDF
+ cze "Nemohu na-BÄíst funkci '%-.192s'"
+ dan "Kan ikke læse funktionen '%-.192s'"
+ nla "Kan functie '%-.192s' niet laden"
+ eng "Can't load function '%-.192s'"
+ jps "function '%-.192s' ã‚’ ロードã§ãã¾ã›ã‚“",
+ est "Ei suuda avada funktsiooni '%-.192s'"
+ fre "Imposible de charger la fonction '%-.192s'"
+ ger "Kann Funktion '%-.192s' nicht laden"
+ greek "Δεν είναι δυνατή η διαδικασία load για τη συνάÏτηση '%-.192s'"
+ hun "A(z) '%-.192s' fuggveny nem toltheto be"
+ ita "Impossibile caricare la funzione '%-.192s'"
+ jpn "function '%-.192s' ã‚’ ロードã§ãã¾ã›ã‚“"
+ kor "'%-.192s' 함수를 로드하지 못했습니다."
+ por "Não pode carregar a função '%-.192s'"
+ rum "Nu pot incarca functia '%-.192s'"
+ rus "Ðевозможно загрузить функцию '%-.192s'"
+ serbian "Ne mogu da uÄitam funkciju '%-.192s'"
+ slo "Nemôžem naÄítaÅ¥ funkciu '%-.192s'"
+ spa "No puedo cargar función '%-.192s'"
+ swe "Kan inte ladda funktionen '%-.192s'"
+ ukr "Ðе можу завантажити функцію '%-.192s'"
+ER_CANT_INITIALIZE_UDF
+ cze "Nemohu inicializovat funkci '%-.192s'; %-.80s"
+ dan "Kan ikke starte funktionen '%-.192s'; %-.80s"
+ nla "Kan functie '%-.192s' niet initialiseren; %-.80s"
+ eng "Can't initialize function '%-.192s'; %-.80s"
+ jps "function '%-.192s' ã‚’åˆæœŸåŒ–ã§ãã¾ã›ã‚“; %-.80s",
+ est "Ei suuda algväärtustada funktsiooni '%-.192s'; %-.80s"
+ fre "Impossible d'initialiser la fonction '%-.192s'; %-.80s"
+ ger "Kann Funktion '%-.192s' nicht initialisieren: %-.80s"
+ greek "Δεν είναι δυνατή η έναÏξη της συνάÏτησης '%-.192s'; %-.80s"
+ hun "A(z) '%-.192s' fuggveny nem inicializalhato; %-.80s"
+ ita "Impossibile inizializzare la funzione '%-.192s'; %-.80s"
+ jpn "function '%-.192s' ã‚’åˆæœŸåŒ–ã§ãã¾ã›ã‚“; %-.80s"
+ kor "'%-.192s' 함수를 초기화 하지 못했습니다.; %-.80s"
+ por "Não pode inicializar a função '%-.192s' - '%-.80s'"
+ rum "Nu pot initializa functia '%-.192s'; %-.80s"
+ rus "Ðевозможно инициализировать функцию '%-.192s'; %-.80s"
+ serbian "Ne mogu da inicijalizujem funkciju '%-.192s'; %-.80s"
+ slo "Nemôžem inicializovať funkciu '%-.192s'; %-.80s"
+ spa "No puedo inicializar función '%-.192s'; %-.80s"
+ swe "Kan inte initialisera funktionen '%-.192s'; '%-.80s'"
+ ukr "Ðе можу ініціалізувати функцію '%-.192s'; %-.80s"
+ER_UDF_NO_PATHS
+ cze "Pro sd-Bílenou knihovnu nejsou povoleny cesty"
+ dan "Angivelse af sti ikke tilladt for delt bibliotek"
+ nla "Geen pad toegestaan voor shared library"
+ eng "No paths allowed for shared library"
+ jps "shared library ã¸ã®ãƒ‘スãŒé€šã£ã¦ã„ã¾ã›ã‚“",
+ est "Teegi nimes ei tohi olla kataloogi"
+ fre "Chemin interdit pour les bibliothèques partagées"
+ ger "Keine Pfade gestattet für Shared Library"
+ greek "Δεν βÏέθηκαν paths για την shared library"
+ hun "Nincs ut a megosztott konyvtarakhoz (shared library)"
+ ita "Non sono ammessi path per le librerie condivisa"
+ jpn "shared library ã¸ã®ãƒ‘スãŒé€šã£ã¦ã„ã¾ã›ã‚“"
+ kor "공유 ë¼ì´ë²„러리를 위한 패스가 ì •ì˜ë˜ì–´ 있지 않습니다."
+ por "Não há caminhos (paths) permitidos para biblioteca compartilhada"
+ rum "Nici un paths nu e permis pentru o librarie shared"
+ rus "ÐедопуÑтимо указывать пути Ð´Ð»Ñ Ð´Ð¸Ð½Ð°Ð¼Ð¸Ñ‡ÐµÑких библиотек"
+ serbian "Ne postoje dozvoljene putanje do share-ovane biblioteke"
+ slo "Neprípustné žiadne cesty k zdieľanej knižnici"
+ spa "No pasos permitidos para librarias conjugadas"
+ swe "Man får inte ange sökväg för dynamiska bibliotek"
+ ukr "Ðе дозволено викориÑтовувати путі Ð´Ð»Ñ Ñ€Ð¾Ð·Ð´Ñ–Ð»ÑŽÐ²Ð°Ð½Ð¸Ñ… бібліотек"
+ER_UDF_EXISTS
+ cze "Funkce '%-.192s' ji-Bž existuje"
+ dan "Funktionen '%-.192s' findes allerede"
+ nla "Functie '%-.192s' bestaat reeds"
+ eng "Function '%-.192s' already exists"
+ jps "Function '%-.192s' ã¯æ—¢ã«å®šç¾©ã•ã‚Œã¦ã„ã¾ã™",
+ est "Funktsioon '%-.192s' juba eksisteerib"
+ fre "La fonction '%-.192s' existe déjà"
+ ger "Funktion '%-.192s' existiert schon"
+ greek "Η συνάÏτηση '%-.192s' υπάÏχει ήδη"
+ hun "A '%-.192s' fuggveny mar letezik"
+ ita "La funzione '%-.192s' esiste gia`"
+ jpn "Function '%-.192s' ã¯æ—¢ã«å®šç¾©ã•ã‚Œã¦ã„ã¾ã™"
+ kor "'%-.192s' 함수는 ì´ë¯¸ 존재합니다."
+ por "Função '%-.192s' já existe"
+ rum "Functia '%-.192s' exista deja"
+ rus "Ð¤ÑƒÐ½ÐºÑ†Ð¸Ñ '%-.192s' уже ÑущеÑтвует"
+ serbian "Funkcija '%-.192s' već postoji"
+ slo "Funkcia '%-.192s' už existuje"
+ spa "Función '%-.192s' ya existe"
+ swe "Funktionen '%-.192s' finns redan"
+ ukr "Ð¤ÑƒÐ½ÐºÑ†Ñ–Ñ '%-.192s' вже Ñ–Ñнує"
+ER_CANT_OPEN_LIBRARY
+ cze "Nemohu otev-Břít sdílenou knihovnu '%-.192s' (errno: %d %-.128s)"
+ dan "Kan ikke åbne delt bibliotek '%-.192s' (errno: %d %-.128s)"
+ nla "Kan shared library '%-.192s' niet openen (Errcode: %d %-.128s)"
+ eng "Can't open shared library '%-.192s' (errno: %d %-.128s)"
+ jps "shared library '%-.192s' ã‚’é–‹ã事ãŒã§ãã¾ã›ã‚“ (errno: %d %-.128s)",
+ est "Ei suuda avada jagatud teeki '%-.192s' (veakood: %d %-.128s)"
+ fre "Impossible d'ouvrir la bibliothèque partagée '%-.192s' (errno: %d %-.128s)"
+ ger "Kann Shared Library '%-.192s' nicht öffnen (Fehler: %d %-.128s)"
+ greek "Δεν είναι δυνατή η ανάγνωση της shared library '%-.192s' (κωδικός λάθους: %d %-.128s)"
+ hun "A(z) '%-.192s' megosztott konyvtar nem hasznalhato (hibakod: %d %-.128s)"
+ ita "Impossibile aprire la libreria condivisa '%-.192s' (errno: %d %-.128s)"
+ jpn "shared library '%-.192s' ã‚’é–‹ã事ãŒã§ãã¾ã›ã‚“ (errno: %d %-.128s)"
+ kor "'%-.192s' 공유 ë¼ì´ë²„러리를 열수 없습니다.(ì—러번호: %d %-.128s)"
+ nor "Can't open shared library '%-.192s' (errno: %d %-.128s)"
+ norwegian-ny "Can't open shared library '%-.192s' (errno: %d %-.128s)"
+ pol "Can't open shared library '%-.192s' (errno: %d %-.128s)"
+ por "Não pode abrir biblioteca compartilhada '%-.192s' (erro no. %d '%-.128s')"
+ rum "Nu pot deschide libraria shared '%-.192s' (Eroare: %d %-.128s)"
+ rus "Ðевозможно открыть динамичеÑкую библиотеку '%-.192s' (ошибка: %d %-.128s)"
+ serbian "Ne mogu da otvorim share-ovanu biblioteku '%-.192s' (errno: %d %-.128s)"
+ slo "Nemôžem otvoriť zdieľanú knižnicu '%-.192s' (chybový kód: %d %-.128s)"
+ spa "No puedo abrir libraria conjugada '%-.192s' (errno: %d %-.128s)"
+ swe "Kan inte öppna det dynamiska biblioteket '%-.192s' (Felkod: %d %-.128s)"
+ ukr "Ðе можу відкрити розділювану бібліотеку '%-.192s' (помилка: %d %-.128s)"
+ER_CANT_FIND_DL_ENTRY
+ cze "Nemohu naj-Bít funkci '%-.128s' v knihovně"
+ dan "Kan ikke finde funktionen '%-.128s' i bibliotek"
+ nla "Kan functie '%-.128s' niet in library vinden"
+ eng "Can't find symbol '%-.128s' in library"
+ jps "function '%-.128s' をライブラリー中ã«è¦‹ä»˜ã‘る事ãŒã§ãã¾ã›ã‚“",
+ est "Ei leia funktsiooni '%-.128s' antud teegis"
+ fre "Impossible de trouver la fonction '%-.128s' dans la bibliothèque"
+ ger "Kann Funktion '%-.128s' in der Library nicht finden"
+ greek "Δεν είναι δυνατή η ανεÏÏεση της συνάÏτησης '%-.128s' στην βιβλιοθήκη"
+ hun "A(z) '%-.128s' fuggveny nem talalhato a konyvtarban"
+ ita "Impossibile trovare la funzione '%-.128s' nella libreria"
+ jpn "function '%-.128s' をライブラリー中ã«è¦‹ä»˜ã‘る事ãŒã§ãã¾ã›ã‚“"
+ kor "ë¼ì´ë²„러리ì—ì„œ '%-.128s' 함수를 ì°¾ì„ ìˆ˜ 없습니다."
+ por "Não pode encontrar a função '%-.128s' na biblioteca"
+ rum "Nu pot gasi functia '%-.128s' in libraria"
+ rus "Ðевозможно отыÑкать Ñимвол '%-.128s' в библиотеке"
+ serbian "Ne mogu da pronadjem funkciju '%-.128s' u biblioteci"
+ slo "Nemôžem nájsť funkciu '%-.128s' v knižnici"
+ spa "No puedo encontrar función '%-.128s' en libraria"
+ swe "Hittar inte funktionen '%-.128s' in det dynamiska biblioteket"
+ ukr "Ðе можу знайти функцію '%-.128s' у бібліотеці"
+ER_FUNCTION_NOT_DEFINED
+ cze "Funkce '%-.192s' nen-Bí definována"
+ dan "Funktionen '%-.192s' er ikke defineret"
+ nla "Functie '%-.192s' is niet gedefinieerd"
+ eng "Function '%-.192s' is not defined"
+ jps "Function '%-.192s' ã¯å®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“",
+ est "Funktsioon '%-.192s' ei ole defineeritud"
+ fre "La fonction '%-.192s' n'est pas définie"
+ ger "Funktion '%-.192s' ist nicht definiert"
+ greek "Η συνάÏτηση '%-.192s' δεν έχει οÏισθεί"
+ hun "A '%-.192s' fuggveny nem definialt"
+ ita "La funzione '%-.192s' non e` definita"
+ jpn "Function '%-.192s' ã¯å®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“"
+ kor "'%-.192s' 함수가 ì •ì˜ë˜ì–´ 있지 않습니다."
+ por "Função '%-.192s' não está definida"
+ rum "Functia '%-.192s' nu e definita"
+ rus "Ð¤ÑƒÐ½ÐºÑ†Ð¸Ñ '%-.192s' не определена"
+ serbian "Funkcija '%-.192s' nije definisana"
+ slo "Funkcia '%-.192s' nie je definovaná"
+ spa "Función '%-.192s' no está definida"
+ swe "Funktionen '%-.192s' är inte definierad"
+ ukr "Функцію '%-.192s' не визначено"
+ER_HOST_IS_BLOCKED
+ cze "Stroj '%-.64s' je zablokov-Bán kvůli mnoha chybám při připojování. Odblokujete použitím 'mysqladmin flush-hosts'"
+ dan "Værten '%-.64s' er blokeret på grund af mange fejlforespørgsler. Lås op med 'mysqladmin flush-hosts'"
+ nla "Host '%-.64s' is geblokkeeerd vanwege te veel verbindings fouten. Deblokkeer met 'mysqladmin flush-hosts'"
+ eng "Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'"
+ jps "Host '%-.64s' 㯠many connection error ã®ãŸã‚ã€æ‹’å¦ã•ã‚Œã¾ã—ãŸ. 'mysqladmin flush-hosts' ã§è§£é™¤ã—ã¦ãã ã•ã„",
+ est "Masin '%-.64s' on blokeeritud hulgaliste ühendusvigade tõttu. Blokeeringu saab tühistada 'mysqladmin flush-hosts' käsuga"
+ fre "L'hôte '%-.64s' est bloqué à cause d'un trop grand nombre d'erreur de connexion. Débloquer le par 'mysqladmin flush-hosts'"
+ ger "Host '%-.64s' blockiert wegen zu vieler Verbindungsfehler. Aufheben der Blockierung mit 'mysqladmin flush-hosts'"
+ greek "Ο υπολογιστής '%-.64s' έχει αποκλεισθεί λόγω πολλαπλών λαθών σÏνδεσης. ΠÏοσπαθήστε να διοÏώσετε με 'mysqladmin flush-hosts'"
+ hun "A '%-.64s' host blokkolodott, tul sok kapcsolodasi hiba miatt. Hasznalja a 'mysqladmin flush-hosts' parancsot"
+ ita "Sistema '%-.64s' bloccato a causa di troppi errori di connessione. Per sbloccarlo: 'mysqladmin flush-hosts'"
+ jpn "Host '%-.64s' 㯠many connection error ã®ãŸã‚ã€æ‹’å¦ã•ã‚Œã¾ã—ãŸ. 'mysqladmin flush-hosts' ã§è§£é™¤ã—ã¦ãã ã•ã„"
+ kor "너무 ë§Žì€ ì—°ê²°ì˜¤ë¥˜ë¡œ ì¸í•˜ì—¬ 호스트 '%-.64s'는 블ë½ë˜ì—ˆìŠµë‹ˆë‹¤. 'mysqladmin flush-hosts'를 ì´ìš©í•˜ì—¬ 블ë½ì„ 해제하세요"
+ por "'Host' '%-.64s' está bloqueado devido a muitos erros de conexão. Desbloqueie com 'mysqladmin flush-hosts'"
+ rum "Host-ul '%-.64s' e blocat din cauza multelor erori de conectie. Poti deploca folosind 'mysqladmin flush-hosts'"
+ rus "ХоÑÑ‚ '%-.64s' заблокирован из-за Ñлишком большого количеÑтва ошибок ÑоединениÑ. Разблокировать его можно Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ 'mysqladmin flush-hosts'"
+ serbian "Host '%-.64s' je blokiran zbog previše grešaka u konekciji. Možete ga odblokirati pomoću komande 'mysqladmin flush-hosts'"
+ spa "Servidor '%-.64s' está bloqueado por muchos errores de conexión. Desbloquear con 'mysqladmin flush-hosts'"
+ swe "Denna dator, '%-.64s', är blockerad pga många felaktig paket. Gör 'mysqladmin flush-hosts' för att ta bort alla blockeringarna"
+ ukr "ХоÑÑ‚ '%-.64s' заблоковано з причини великої кількоÑÑ‚Ñ– помилок з'єднаннÑ. Ð”Ð»Ñ Ñ€Ð¾Ð·Ð±Ð»Ð¾ÐºÑƒÐ²Ð°Ð½Ð½Ñ Ð²Ð¸ÐºÐ¾Ñ€Ð¸Ñтовуйте 'mysqladmin flush-hosts'"
+ER_HOST_NOT_PRIVILEGED
+ cze "Stroj '%-.64s' nem-Bá povoleno se k tomuto MySQL serveru připojit"
+ dan "Værten '%-.64s' kan ikke tilkoble denne MySQL-server"
+ nla "Het is host '%-.64s' is niet toegestaan verbinding te maken met deze MySQL server"
+ eng "Host '%-.64s' is not allowed to connect to this MySQL server"
+ jps "Host '%-.64s' 㯠MySQL server ã«æŽ¥ç¶šã‚’許å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“",
+ est "Masinal '%-.64s' puudub ligipääs sellele MySQL serverile"
+ fre "Le hôte '%-.64s' n'est pas authorisé à se connecter à ce serveur MySQL"
+ ger "Host '%-.64s' hat keine Berechtigung, sich mit diesem MySQL-Server zu verbinden"
+ greek "Ο υπολογιστής '%-.64s' δεν έχει δικαίωμα σÏνδεσης με τον MySQL server"
+ hun "A '%-.64s' host szamara nem engedelyezett a kapcsolodas ehhez a MySQL szerverhez"
+ ita "Al sistema '%-.64s' non e` consentita la connessione a questo server MySQL"
+ jpn "Host '%-.64s' 㯠MySQL server ã«æŽ¥ç¶šã‚’許å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“"
+ kor "'%-.64s' 호스트는 ì´ MySQLì„œë²„ì— ì ‘ì†í•  허가를 받지 못했습니다."
+ por "'Host' '%-.64s' não tem permissão para se conectar com este servidor MySQL"
+ rum "Host-ul '%-.64s' nu este permis a se conecta la aceste server MySQL"
+ rus "ХоÑту '%-.64s' не разрешаетÑÑ Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡Ð°Ñ‚ÑŒÑÑ Ðº Ñтому Ñерверу MySQL"
+ serbian "Host-u '%-.64s' nije dozvoljeno da se konektuje na ovaj MySQL server"
+ spa "Servidor '%-.64s' no está permitido para conectar con este servidor MySQL"
+ swe "Denna dator, '%-.64s', har inte privileger att använda denna MySQL server"
+ ukr "ХоÑту '%-.64s' не доволено зв'ÑзуватиÑÑŒ з цим Ñервером MySQL"
+ER_PASSWORD_ANONYMOUS_USER 42000
+ cze "Pou-Bžíváte MySQL jako anonymní uživatel a anonymní uživatelé nemají povoleno měnit hesla"
+ dan "Du bruger MySQL som anonym bruger. Anonyme brugere må ikke ændre adgangskoder"
+ nla "U gebruikt MySQL als anonieme gebruiker en deze mogen geen wachtwoorden wijzigen"
+ eng "You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords"
+ jps "MySQL ã‚’ anonymous users ã§ä½¿ç”¨ã—ã¦ã„る状態ã§ã¯ã€ãƒ‘スワードã®å¤‰æ›´ã¯ã§ãã¾ã›ã‚“",
+ est "Te kasutate MySQL-i anonüümse kasutajana, kelledel pole parooli muutmise õigust"
+ fre "Vous utilisez un utilisateur anonyme et les utilisateurs anonymes ne sont pas autorisés à changer les mots de passe"
+ ger "Sie benutzen MySQL als anonymer Benutzer und dürfen daher keine Passwörter ändern"
+ greek "ΧÏησιμοποιείτε την MySQL σαν anonymous user και έτσι δεν μποÏείτε να αλλάξετε τα passwords άλλων χÏηστών"
+ hun "Nevtelen (anonymous) felhasznalokent nem negedelyezett a jelszovaltoztatas"
+ ita "Impossibile cambiare la password usando MySQL come utente anonimo"
+ jpn "MySQL ã‚’ anonymous users ã§ä½¿ç”¨ã—ã¦ã„る状態ã§ã¯ã€ãƒ‘スワードã®å¤‰æ›´ã¯ã§ãã¾ã›ã‚“"
+ kor "ë‹¹ì‹ ì€ MySQLì„œë²„ì— ìµëª…ì˜ ì‚¬ìš©ìžë¡œ ì ‘ì†ì„ 하셨습니다.ìµëª…ì˜ ì‚¬ìš©ìžëŠ” 암호를 변경할 수 없습니다."
+ por "Você está usando o MySQL como usuário anônimo e usuários anônimos não têm permissão para mudar senhas"
+ rum "Dumneavoastra folositi MySQL ca un utilizator anonim si utilizatorii anonimi nu au voie sa schime parolele"
+ rus "Ð’Ñ‹ иÑпользуете MySQL от имени анонимного пользователÑ, а анонимным пользователÑм не разрешаетÑÑ Ð¼ÐµÐ½ÑÑ‚ÑŒ пароли"
+ serbian "Vi koristite MySQL kao anonimni korisnik a anonimnim korisnicima nije dozvoljeno da menjaju lozinke"
+ spa "Tu estás usando MySQL como un usuario anonimo y usuarios anonimos no tienen permiso para cambiar las claves"
+ swe "Du använder MySQL som en anonym användare och som sådan får du inte ändra ditt lösenord"
+ ukr "Ви викориÑтовуєте MySQL Ñк анонімний кориÑтувач, тому вам не дозволено змінювати паролі"
+ER_PASSWORD_NOT_ALLOWED 42000
+ cze "Na zm-Běnu hesel ostatním musíte mít právo provést update tabulek v databázi mysql"
+ dan "Du skal have tilladelse til at opdatere tabeller i MySQL databasen for at ændre andres adgangskoder"
+ nla "U moet tabel update priveleges hebben in de mysql database om wachtwoorden voor anderen te mogen wijzigen"
+ eng "You must have privileges to update tables in the mysql database to be able to change passwords for others"
+ jps "ä»–ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ã®ãƒ‘スワードを変更ã™ã‚‹ãŸã‚ã«ã¯, mysql データベースã«å¯¾ã—㦠update ã®è¨±å¯ãŒãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“.",
+ est "Teiste paroolide muutmiseks on nõutav tabelite muutmisõigus 'mysql' andmebaasis"
+ fre "Vous devez avoir le privilège update sur les tables de la base de donnée mysql pour pouvoir changer les mots de passe des autres"
+ ger "Sie benötigen die Berechtigung zum Aktualisieren von Tabellen in der Datenbank 'mysql', um die Passwörter anderer Benutzer ändern zu können"
+ greek "ΠÏέπει να έχετε δικαίωμα διόÏθωσης πινάκων (update) στη βάση δεδομένων mysql για να μποÏείτε να αλλάξετε τα passwords άλλων χÏηστών"
+ hun "Onnek tabla-update joggal kell rendelkeznie a mysql adatbazisban masok jelszavanak megvaltoztatasahoz"
+ ita "E` necessario il privilegio di update sulle tabelle del database mysql per cambiare le password per gli altri utenti"
+ jpn "ä»–ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ã®ãƒ‘スワードを変更ã™ã‚‹ãŸã‚ã«ã¯, mysql データベースã«å¯¾ã—㦠update ã®è¨±å¯ãŒãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“."
+ kor "ë‹¹ì‹ ì€ ë‹¤ë¥¸ì‚¬ìš©ìžë“¤ì˜ 암호를 변경할 수 있ë„ë¡ ë°ì´íƒ€ë² ì´ìŠ¤ ë³€ê²½ê¶Œí•œì„ ê°€ì ¸ì•¼ 합니다."
+ por "Você deve ter privilégios para atualizar tabelas no banco de dados mysql para ser capaz de mudar a senha de outros"
+ rum "Trebuie sa aveti privilegii sa actualizati tabelele in bazele de date mysql ca sa puteti sa schimati parolele altora"
+ rus "Ð”Ð»Ñ Ñ‚Ð¾Ð³Ð¾ чтобы изменÑÑ‚ÑŒ пароли других пользователей, у Ð²Ð°Ñ Ð´Ð¾Ð»Ð¶Ð½Ñ‹ быть привилегии на изменение таблиц в базе данных mysql"
+ serbian "Morate imati privilegije da možete da update-ujete određene tabele ako želite da menjate lozinke za druge korisnike"
+ spa "Tu debes de tener permiso para actualizar tablas en la base de datos mysql para cambiar las claves para otros"
+ swe "För att ändra lösenord för andra måste du ha rättigheter att uppdatera mysql-databasen"
+ ukr "Ви повині мати право на Ð¾Ð½Ð¾Ð²Ð»ÐµÐ½Ð½Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†ÑŒ у базі данних mysql, аби мати можливіÑÑ‚ÑŒ змінювати пароль іншим"
+ER_PASSWORD_NO_MATCH 42000
+ cze "V tabulce user nen-Bí žádný odpovídající řádek"
+ dan "Kan ikke finde nogen tilsvarende poster i bruger tabellen"
+ nla "Kan geen enkele passende rij vinden in de gebruikers tabel"
+ eng "Can't find any matching row in the user table"
+ est "Ei leia vastavat kirjet kasutajate tabelis"
+ fre "Impossible de trouver un enregistrement correspondant dans la table user"
+ ger "Kann keinen passenden Datensatz in Tabelle 'user' finden"
+ greek "Δεν είναι δυνατή η ανεÏÏεση της αντίστοιχης εγγÏαφής στον πίνακα των χÏηστών"
+ hun "Nincs megegyezo sor a user tablaban"
+ ita "Impossibile trovare la riga corrispondente nella tabella user"
+ kor "ì‚¬ìš©ìž í…Œì´ë¸”ì—ì„œ ì¼ì¹˜í•˜ëŠ” ê²ƒì„ ì°¾ì„ ìˆ˜ ì—†ì니다."
+ por "Não pode encontrar nenhuma linha que combine na tabela usuário (user table)"
+ rum "Nu pot gasi nici o linie corespunzatoare in tabela utilizatorului"
+ rus "Ðевозможно отыÑкать подходÑщую запиÑÑŒ в таблице пользователей"
+ serbian "Ne mogu da pronađem odgovarajući slog u 'user' tabeli"
+ spa "No puedo encontrar una línea correponsdiente en la tabla user"
+ swe "Hittade inte användaren i 'user'-tabellen"
+ ukr "Ðе можу знайти відповідних запиÑів у таблиці кориÑтувача"
+ER_UPDATE_INFO
+ cze "Nalezen-Bých řádků: %ld Změněno: %ld Varování: %ld"
+ dan "Poster fundet: %ld Ændret: %ld Advarsler: %ld"
+ nla "Passende rijen: %ld Gewijzigd: %ld Waarschuwingen: %ld"
+ eng "Rows matched: %ld Changed: %ld Warnings: %ld"
+ jps "一致数(Rows matched): %ld 変更: %ld Warnings: %ld",
+ est "Sobinud kirjeid: %ld Muudetud: %ld Hoiatusi: %ld"
+ fre "Enregistrements correspondants: %ld Modifiés: %ld Warnings: %ld"
+ ger "Datensätze gefunden: %ld Geändert: %ld Warnungen: %ld"
+ hun "Megegyezo sorok szama: %ld Valtozott: %ld Warnings: %ld"
+ ita "Rows riconosciute: %ld Cambiate: %ld Warnings: %ld"
+ jpn "一致数(Rows matched): %ld 変更: %ld Warnings: %ld"
+ kor "ì¼ì¹˜í•˜ëŠ” Rows : %ldê°œ 변경ë¨: %ldê°œ 경고: %ldê°œ"
+ por "Linhas que combinaram: %ld - Alteradas: %ld - Avisos: %ld"
+ rum "Linii identificate (matched): %ld Schimbate: %ld Atentionari (warnings): %ld"
+ rus "Совпало запиÑей: %ld Изменено: %ld Предупреждений: %ld"
+ serbian "Odgovarajućih slogova: %ld Promenjeno: %ld Upozorenja: %ld"
+ spa "Líneas correspondientes: %ld Cambiadas: %ld Avisos: %ld"
+ swe "Rader: %ld Uppdaterade: %ld Varningar: %ld"
+ ukr "ЗапиÑів відповідає: %ld Змінено: %ld ЗаÑтережень: %ld"
+ER_CANT_CREATE_THREAD
+ cze "Nemohu vytvo-BÅ™it nový thread (errno %d). Pokud je jeÅ¡tÄ› nÄ›jaká volná paměť, podívejte se do manuálu na Äást o chybách specifických pro jednotlivé operaÄní systémy"
+ dan "Kan ikke danne en ny tråd (fejl nr. %d). Hvis computeren ikke er løbet tør for hukommelse, kan du se i brugervejledningen for en mulig operativ-system - afhængig fejl"
+ nla "Kan geen nieuwe thread aanmaken (Errcode: %d). Indien er geen tekort aan geheugen is kunt u de handleiding consulteren over een mogelijke OS afhankelijke fout"
+ eng "Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug"
+ jps "æ–°è¦ã«ã‚¹ãƒ¬ãƒƒãƒ‰ãŒä½œã‚Œã¾ã›ã‚“ã§ã—㟠(errno %d). ã‚‚ã—最大使用許å¯ãƒ¡ãƒ¢ãƒªãƒ¼æ•°ã‚’越ãˆã¦ã„ãªã„ã®ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¦ã„ã‚‹ãªã‚‰, マニュアルã®ä¸­ã‹ã‚‰ 'possible OS-dependent bug' ã¨ã„ã†æ–‡å­—を探ã—ã¦ãã¿ã¦ã ã•ã„.",
+ est "Ei suuda luua uut lõime (veakood %d). Kui mälu ei ole otsas, on tõenäoliselt tegemist operatsioonisüsteemispetsiifilise veaga"
+ fre "Impossible de créer une nouvelle tâche (errno %d). S'il reste de la mémoire libre, consultez le manual pour trouver un éventuel bug dépendant de l'OS"
+ ger "Kann keinen neuen Thread erzeugen (Fehler: %d). Sollte noch Speicher verfügbar sein, bitte im Handbuch wegen möglicher Fehler im Betriebssystem nachschlagen"
+ hun "Uj thread letrehozasa nem lehetseges (Hibakod: %d). Amenyiben van meg szabad memoria, olvassa el a kezikonyv operacios rendszerfuggo hibalehetosegekrol szolo reszet"
+ ita "Impossibile creare un nuovo thread (errno %d). Se non ci sono problemi di memoria disponibile puoi consultare il manuale per controllare possibili problemi dipendenti dal SO"
+ jpn "æ–°è¦ã«ã‚¹ãƒ¬ãƒƒãƒ‰ãŒä½œã‚Œã¾ã›ã‚“ã§ã—㟠(errno %d). ã‚‚ã—最大使用許å¯ãƒ¡ãƒ¢ãƒªãƒ¼æ•°ã‚’越ãˆã¦ã„ãªã„ã®ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¦ã„ã‚‹ãªã‚‰, マニュアルã®ä¸­ã‹ã‚‰ 'possible OS-dependent bug' ã¨ã„ã†æ–‡å­—を探ã—ã¦ãã¿ã¦ã ã•ã„."
+ kor "새로운 쓰레드를 만들 수 없습니다.(ì—러번호 %d). 만약 여유메모리가 있다면 OS-dependent버그 ì˜ ë©”ë‰´ì–¼ ë¶€ë¶„ì„ ì°¾ì•„ë³´ì‹œì˜¤."
+ nor "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug"
+ norwegian-ny "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug"
+ pol "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug"
+ por "Não pode criar uma nova 'thread' (erro no. %d). Se você não estiver sem memória disponível, você pode consultar o manual sobre um possível 'bug' dependente do sistema operacional"
+ rum "Nu pot crea un thread nou (Eroare %d). Daca mai aveti memorie disponibila in sistem, puteti consulta manualul - ar putea exista un potential bug in legatura cu sistemul de operare"
+ rus "Ðевозможно Ñоздать новый поток (ошибка %d). ЕÑли Ñто не ÑитуациÑ, ÑвÑÐ·Ð°Ð½Ð½Ð°Ñ Ñ Ð½ÐµÑ…Ð²Ð°Ñ‚ÐºÐ¾Ð¹ памÑти, то вам Ñледует изучить документацию на предмет опиÑÐ°Ð½Ð¸Ñ Ð²Ð¾Ð·Ð¼Ð¾Ð¶Ð½Ð¾Ð¹ ошибки работы в конкретной ОС"
+ serbian "Ne mogu da kreiram novi thread (errno %d). Ako imate joÅ¡ slobodne memorije, trebali biste da pogledate u priruÄniku da li je ovo specifiÄna greÅ¡ka vaÅ¡eg operativnog sistema"
+ spa "No puedo crear un nuevo thread (errno %d). Si tu está con falta de memoria disponible, tu puedes consultar el Manual para posibles problemas con SO"
+ swe "Kan inte skapa en ny tråd (errno %d)"
+ ukr "Ðе можу Ñтворити нову гілку (помилка %d). Якщо ви не викориÑтали уÑÑŽ пам'ÑÑ‚ÑŒ, то прочитайте документацію до вашої ОС - можливо це помилка ОС"
+ER_WRONG_VALUE_COUNT_ON_ROW 21S01
+ cze "Po-BÄet sloupců neodpovídá poÄtu hodnot na řádku %ld"
+ dan "Kolonne antallet stemmer ikke overens med antallet af værdier i post %ld"
+ nla "Kolom aantal komt niet overeen met waarde aantal in rij %ld"
+ eng "Column count doesn't match value count at row %ld"
+ est "Tulpade hulk erineb väärtuste hulgast real %ld"
+ ger "Anzahl der Felder stimmt nicht mit der Anzahl der Werte in Zeile %ld überein"
+ hun "Az oszlopban talalhato ertek nem egyezik meg a %ld sorban szamitott ertekkel"
+ ita "Il numero delle colonne non corrisponde al conteggio alla riga %ld"
+ kor "Row %ldì—ì„œ 칼럼 카운트와 value 카운터와 ì¼ì¹˜í•˜ì§€ 않습니다."
+ por "Contagem de colunas não confere com a contagem de valores na linha %ld"
+ rum "Numarul de coloane nu corespunde cu numarul de valori la linia %ld"
+ rus "КоличеÑтво Ñтолбцов не Ñовпадает Ñ ÐºÐ¾Ð»Ð¸Ñ‡ÐµÑтвом значений в запиÑи %ld"
+ serbian "Broj kolona ne odgovara broju vrednosti u slogu %ld"
+ spa "El número de columnas no corresponde al número en la línea %ld"
+ swe "Antalet kolumner motsvarar inte antalet värden på rad: %ld"
+ ukr "КількіÑÑ‚ÑŒ Ñтовбців не Ñпівпадає з кількіÑÑ‚ÑŽ значень у Ñтроці %ld"
+ER_CANT_REOPEN_TABLE
+ cze "Nemohu znovuotev-Břít tabulku: '%-.192s"
+ dan "Kan ikke genåbne tabel '%-.192s"
+ nla "Kan tabel niet opnieuw openen: '%-.192s"
+ eng "Can't reopen table: '%-.192s'"
+ est "Ei suuda taasavada tabelit '%-.192s'"
+ fre "Impossible de réouvrir la table: '%-.192s"
+ ger "Kann Tabelle'%-.192s' nicht erneut öffnen"
+ hun "Nem lehet ujra-megnyitni a tablat: '%-.192s"
+ ita "Impossibile riaprire la tabella: '%-.192s'"
+ kor "í…Œì´ë¸”ì„ ë‹¤ì‹œ 열수 없군요: '%-.192s"
+ nor "Can't reopen table: '%-.192s"
+ norwegian-ny "Can't reopen table: '%-.192s"
+ pol "Can't reopen table: '%-.192s"
+ por "Não pode reabrir a tabela '%-.192s"
+ rum "Nu pot redeschide tabela: '%-.192s'"
+ rus "Ðевозможно заново открыть таблицу '%-.192s'"
+ serbian "Ne mogu da ponovo otvorim tabelu '%-.192s'"
+ slo "Can't reopen table: '%-.192s"
+ spa "No puedo reabrir tabla: '%-.192s"
+ swe "Kunde inte stänga och öppna tabell '%-.192s"
+ ukr "Ðе можу перевідкрити таблицю: '%-.192s'"
+ER_INVALID_USE_OF_NULL 22004
+ cze "Neplatn-Bé užití hodnoty NULL"
+ dan "Forkert brug af nulværdi (NULL)"
+ nla "Foutief gebruik van de NULL waarde"
+ eng "Invalid use of NULL value"
+ jps "NULL 値ã®ä½¿ç”¨æ–¹æ³•ãŒä¸é©åˆ‡ã§ã™",
+ est "NULL väärtuse väärkasutus"
+ fre "Utilisation incorrecte de la valeur NULL"
+ ger "Unerlaubte Verwendung eines NULL-Werts"
+ hun "A NULL ervenytelen hasznalata"
+ ita "Uso scorretto del valore NULL"
+ jpn "NULL 値ã®ä½¿ç”¨æ–¹æ³•ãŒä¸é©åˆ‡ã§ã™"
+ kor "NULL ê°’ì„ ìž˜ëª» 사용하셨군요..."
+ por "Uso inválido do valor NULL"
+ rum "Folosirea unei value NULL e invalida"
+ rus "Ðеправильное иÑпользование величины NULL"
+ serbian "Pogrešna upotreba vrednosti NULL"
+ spa "Invalido uso de valor NULL"
+ swe "Felaktig använding av NULL"
+ ukr "Хибне викориÑÑ‚Ð°Ð½Ð½Ñ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð½Ñ NULL"
+ER_REGEXP_ERROR 42000
+ cze "Regul-Bární výraz vrátil chybu '%-.64s'"
+ dan "Fik fejl '%-.64s' fra regexp"
+ nla "Fout '%-.64s' ontvangen van regexp"
+ eng "Got error '%-.64s' from regexp"
+ est "regexp tagastas vea '%-.64s'"
+ fre "Erreur '%-.64s' provenant de regexp"
+ ger "regexp lieferte Fehler '%-.64s'"
+ hun "'%-.64s' hiba a regularis kifejezes hasznalata soran (regexp)"
+ ita "Errore '%-.64s' da regexp"
+ kor "regexpì—ì„œ '%-.64s'ê°€ 났습니다."
+ por "Obteve erro '%-.64s' em regexp"
+ rum "Eroarea '%-.64s' obtinuta din expresia regulara (regexp)"
+ rus "Получена ошибка '%-.64s' от регулÑрного выражениÑ"
+ serbian "Funkcija regexp je vratila grešku '%-.64s'"
+ spa "Obtenido error '%-.64s' de regexp"
+ swe "Fick fel '%-.64s' från REGEXP"
+ ukr "Отримано помилку '%-.64s' від регулÑрного виразу"
+ER_MIX_OF_GROUP_FUNC_AND_FIELDS 42000
+ cze "Pokud nen-Bí žádná GROUP BY klauzule, není dovoleno souÄasné použití GROUP položek (MIN(),MAX(),COUNT()...) s ne GROUP položkami"
+ dan "Sammenblanding af GROUP kolonner (MIN(),MAX(),COUNT()...) uden GROUP kolonner er ikke tilladt, hvis der ikke er noget GROUP BY prædikat"
+ nla "Het mixen van GROUP kolommen (MIN(),MAX(),COUNT()...) met no-GROUP kolommen is foutief indien er geen GROUP BY clausule is"
+ eng "Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause"
+ est "GROUP tulpade (MIN(),MAX(),COUNT()...) kooskasutamine tavaliste tulpadega ilma GROUP BY klauslita ei ole lubatud"
+ fre "Mélanger les colonnes GROUP (MIN(),MAX(),COUNT()...) avec des colonnes normales est interdit s'il n'y a pas de clause GROUP BY"
+ ger "Das Vermischen von GROUP-Feldern (MIN(),MAX(),COUNT()...) mit Nicht-GROUP-Feldern ist nicht zulässig, wenn keine GROUP-BY-Klausel vorhanden ist"
+ hun "A GROUP mezok (MIN(),MAX(),COUNT()...) kevert hasznalata nem lehetseges GROUP BY hivatkozas nelkul"
+ ita "Il mescolare funzioni di aggregazione (MIN(),MAX(),COUNT()...) e non e` illegale se non c'e` una clausula GROUP BY"
+ kor "Mixing of GROUP 칼럼s (MIN(),MAX(),COUNT(),...) with no GROUP 칼럼s is illegal if there is no GROUP BY clause"
+ por "Mistura de colunas agrupadas (com MIN(), MAX(), COUNT(), ...) com colunas não agrupadas é ilegal, se não existir uma cláusula de agrupamento (cláusula GROUP BY)"
+ rum "Amestecarea de coloane GROUP (MIN(),MAX(),COUNT()...) fara coloane GROUP este ilegala daca nu exista o clauza GROUP BY"
+ rus "Одновременное иÑпользование Ñгруппированных (GROUP) Ñтолбцов (MIN(),MAX(),COUNT(),...) Ñ Ð½ÐµÑгруппированными Ñтолбцами ÑвлÑетÑÑ Ð½ÐµÐºÐ¾Ñ€Ñ€ÐµÐºÑ‚Ð½Ñ‹Ð¼, еÑли в выражении еÑÑ‚ÑŒ GROUP BY"
+ serbian "Upotreba agregatnih funkcija (MIN(),MAX(),COUNT()...) bez 'GROUP' kolona je pogrešna ako ne postoji 'GROUP BY' iskaz"
+ spa "Mezcla de columnas GROUP (MIN(),MAX(),COUNT()...) con no GROUP columnas es ilegal si no hat la clausula GROUP BY"
+ swe "Man får ha både GROUP-kolumner (MIN(),MAX(),COUNT()...) och fält i en fråga om man inte har en GROUP BY-del"
+ ukr "Ð—Ð¼Ñ–ÑˆÑƒÐ²Ð°Ð½Ð½Ñ GROUP Ñтовбців (MIN(),MAX(),COUNT()...) з не GROUP ÑтовбцÑми Ñ” забороненим, Ñкщо не має GROUP BY"
+ER_NONEXISTING_GRANT 42000
+ cze "Neexistuje odpov-Bídající grant pro uživatele '%-.48s' na stroji '%-.64s'"
+ dan "Denne tilladelse findes ikke for brugeren '%-.48s' på vært '%-.64s'"
+ nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.48s' op host '%-.64s'"
+ eng "There is no such grant defined for user '%-.48s' on host '%-.64s'"
+ jps "ユーザー '%-.48s' (ホスト '%-.64s' ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼) ã¯è¨±å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“",
+ est "Sellist õigust ei ole defineeritud kasutajale '%-.48s' masinast '%-.64s'"
+ fre "Un tel droit n'est pas défini pour l'utilisateur '%-.48s' sur l'hôte '%-.64s'"
+ ger "Für Benutzer '%-.48s' auf Host '%-.64s' gibt es keine solche Berechtigung"
+ hun "A '%-.48s' felhasznalonak nincs ilyen joga a '%-.64s' host-on"
+ ita "GRANT non definita per l'utente '%-.48s' dalla macchina '%-.64s'"
+ jpn "ユーザー '%-.48s' (ホスト '%-.64s' ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼) ã¯è¨±å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“"
+ kor "ì‚¬ìš©ìž '%-.48s' (호스트 '%-.64s')를 위하여 ì •ì˜ëœ 그런 승ì¸ì€ 없습니다."
+ por "Não existe tal permissão (grant) definida para o usuário '%-.48s' no 'host' '%-.64s'"
+ rum "Nu exista un astfel de grant definit pentru utilzatorul '%-.48s' de pe host-ul '%-.64s'"
+ rus "Такие права не определены Ð´Ð»Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ '%-.48s' на хоÑте '%-.64s'"
+ serbian "Ne postoji odobrenje za pristup korisniku '%-.48s' na host-u '%-.64s'"
+ spa "No existe permiso definido para usuario '%-.48s' en el servidor '%-.64s'"
+ swe "Det finns inget privilegium definierat för användare '%-.48s' på '%-.64s'"
+ ukr "Повноважень не визначено Ð´Ð»Ñ ÐºÐ¾Ñ€Ð¸Ñтувача '%-.48s' з хоÑту '%-.64s'"
+ER_TABLEACCESS_DENIED_ERROR 42000
+ cze "%-.16s p-Bříkaz nepřístupný pro uživatele: '%-.48s'@'%-.64s' pro tabulku '%-.192s'"
+ dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.48s'@'%-.64s' for tabellen '%-.192s'"
+ nla "%-.16s commando geweigerd voor gebruiker: '%-.48s'@'%-.64s' voor tabel '%-.192s'"
+ eng "%-.16s command denied to user '%-.48s'@'%-.64s' for table '%-.192s'"
+ jps "コマンド %-.16s 㯠ユーザー '%-.48s'@'%-.64s' ,テーブル '%-.192s' ã«å¯¾ã—ã¦è¨±å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“",
+ est "%-.16s käsk ei ole lubatud kasutajale '%-.48s'@'%-.64s' tabelis '%-.192s'"
+ fre "La commande '%-.16s' est interdite à l'utilisateur: '%-.48s'@'@%-.64s' sur la table '%-.192s'"
+ ger "%-.16s Befehl nicht erlaubt für Benutzer '%-.48s'@'%-.64s' auf Tabelle '%-.192s'"
+ hun "%-.16s parancs a '%-.48s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.192s' tablaban"
+ ita "Comando %-.16s negato per l'utente: '%-.48s'@'%-.64s' sulla tabella '%-.192s'"
+ jpn "コマンド %-.16s 㯠ユーザー '%-.48s'@'%-.64s' ,テーブル '%-.192s' ã«å¯¾ã—ã¦è¨±å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“"
+ kor "'%-.16s' ëª…ë ¹ì€ ë‹¤ìŒ ì‚¬ìš©ìžì—게 거부ë˜ì—ˆìŠµë‹ˆë‹¤. : '%-.48s'@'%-.64s' for í…Œì´ë¸” '%-.192s'"
+ por "Comando '%-.16s' negado para o usuário '%-.48s'@'%-.64s' na tabela '%-.192s'"
+ rum "Comanda %-.16s interzisa utilizatorului: '%-.48s'@'%-.64s' pentru tabela '%-.192s'"
+ rus "Команда %-.16s запрещена пользователю '%-.48s'@'%-.64s' Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ‹ '%-.192s'"
+ serbian "%-.16s komanda zabranjena za korisnika '%-.48s'@'%-.64s' za tabelu '%-.192s'"
+ spa "%-.16s comando negado para usuario: '%-.48s'@'%-.64s' para tabla '%-.192s'"
+ swe "%-.16s ej tillåtet för '%-.48s'@'%-.64s' för tabell '%-.192s'"
+ ukr "%-.16s команда заборонена кориÑтувачу: '%-.48s'@'%-.64s' у таблиці '%-.192s'"
+ER_COLUMNACCESS_DENIED_ERROR 42000
+ cze "%-.16s p-Bříkaz nepřístupný pro uživatele: '%-.48s'@'%-.64s' pro sloupec '%-.192s' v tabulce '%-.192s'"
+ dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.48s'@'%-.64s' for kolonne '%-.192s' in tabellen '%-.192s'"
+ nla "%-.16s commando geweigerd voor gebruiker: '%-.48s'@'%-.64s' voor kolom '%-.192s' in tabel '%-.192s'"
+ eng "%-.16s command denied to user '%-.48s'@'%-.64s' for column '%-.192s' in table '%-.192s'"
+ jps "コマンド %-.16s 㯠ユーザー '%-.48s'@'%-.64s'Â¥n カラム '%-.192s' テーブル '%-.192s' ã«å¯¾ã—ã¦è¨±å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“",
+ est "%-.16s käsk ei ole lubatud kasutajale '%-.48s'@'%-.64s' tulbale '%-.192s' tabelis '%-.192s'"
+ fre "La commande '%-.16s' est interdite à l'utilisateur: '%-.48s'@'@%-.64s' sur la colonne '%-.192s' de la table '%-.192s'"
+ ger "%-.16s Befehl nicht erlaubt für Benutzer '%-.48s'@'%-.64s' und Feld '%-.192s' in Tabelle '%-.192s'"
+ hun "%-.16s parancs a '%-.48s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.192s' mezo eseten a '%-.192s' tablaban"
+ ita "Comando %-.16s negato per l'utente: '%-.48s'@'%-.64s' sulla colonna '%-.192s' della tabella '%-.192s'"
+ jpn "コマンド %-.16s 㯠ユーザー '%-.48s'@'%-.64s'\n カラム '%-.192s' テーブル '%-.192s' ã«å¯¾ã—ã¦è¨±å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“"
+ kor "'%-.16s' ëª…ë ¹ì€ ë‹¤ìŒ ì‚¬ìš©ìžì—게 거부ë˜ì—ˆìŠµë‹ˆë‹¤. : '%-.48s'@'%-.64s' for 칼럼 '%-.192s' in í…Œì´ë¸” '%-.192s'"
+ por "Comando '%-.16s' negado para o usuário '%-.48s'@'%-.64s' na coluna '%-.192s', na tabela '%-.192s'"
+ rum "Comanda %-.16s interzisa utilizatorului: '%-.48s'@'%-.64s' pentru coloana '%-.192s' in tabela '%-.192s'"
+ rus "Команда %-.16s запрещена пользователю '%-.48s'@'%-.64s' Ð´Ð»Ñ Ñтолбца '%-.192s' в таблице '%-.192s'"
+ serbian "%-.16s komanda zabranjena za korisnika '%-.48s'@'%-.64s' za kolonu '%-.192s' iz tabele '%-.192s'"
+ spa "%-.16s comando negado para usuario: '%-.48s'@'%-.64s' para columna '%-.192s' en la tabla '%-.192s'"
+ swe "%-.16s ej tillåtet för '%-.48s'@'%-.64s' för kolumn '%-.192s' i tabell '%-.192s'"
+ ukr "%-.16s команда заборонена кориÑтувачу: '%-.48s'@'%-.64s' Ð´Ð»Ñ ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ '%-.192s' у таблиці '%-.192s'"
+ER_ILLEGAL_GRANT_FOR_TABLE 42000
+ cze "Neplatn-Bý příkaz GRANT/REVOKE. Prosím, pÅ™eÄtÄ›te si v manuálu, jaká privilegia je možné použít."
+ dan "Forkert GRANT/REVOKE kommando. Se i brugervejledningen hvilke privilegier der kan specificeres."
+ nla "Foutief GRANT/REVOKE commando. Raadpleeg de handleiding welke priveleges gebruikt kunnen worden."
+ eng "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used"
+ est "Vigane GRANT/REVOKE käsk. Tutvu kasutajajuhendiga"
+ fre "Commande GRANT/REVOKE incorrecte. Consultez le manuel."
+ ger "Unzulässiger GRANT- oder REVOKE-Befehl. Verfügbare Berechtigungen sind im Handbuch aufgeführt"
+ greek "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used."
+ hun "Ervenytelen GRANT/REVOKE parancs. Kerem, nezze meg a kezikonyvben, milyen jogok lehetsegesek"
+ ita "Comando GRANT/REVOKE illegale. Prego consultare il manuale per sapere quali privilegi possono essere usati."
+ jpn "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
+ kor "ìž˜ëª»ëœ GRANT/REVOKE 명령. ì–´ë–¤ 권리와 승ì¸ì´ 사용ë˜ì–´ 질 수 있는지 ë©”ë‰´ì–¼ì„ ë³´ì‹œì˜¤."
+ nor "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
+ norwegian-ny "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
+ pol "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
+ por "Comando GRANT/REVOKE ilegal. Por favor consulte no manual quais privilégios podem ser usados."
+ rum "Comanda GRANT/REVOKE ilegala. Consultati manualul in privinta privilegiilor ce pot fi folosite."
+ rus "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ð° GRANT или REVOKE. ОбратитеÑÑŒ к документации, чтобы выÑÑнить, какие привилегии можно иÑпользовать"
+ serbian "PogreÅ¡na 'GRANT' odnosno 'REVOKE' komanda. Molim Vas pogledajte u priruÄniku koje vrednosti mogu biti upotrebljene."
+ slo "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
+ spa "Ilegal comando GRANT/REVOKE. Por favor consulte el manual para cuales permisos pueden ser usados."
+ swe "Felaktigt GRANT-privilegium använt"
+ ukr "Хибна GRANT/REVOKE команда; прочитайте документацію ÑтоÑовно того, Ñкі права можна викориÑтовувати"
+ER_GRANT_WRONG_HOST_OR_USER 42000
+ cze "Argument p-Bříkazu GRANT uživatel nebo stroj je příliš dlouhý"
+ dan "Værts- eller brugernavn for langt til GRANT"
+ nla "De host of gebruiker parameter voor GRANT is te lang"
+ eng "The host or user argument to GRANT is too long"
+ est "Masina või kasutaja nimi GRANT lauses on liiga pikk"
+ fre "L'hôte ou l'utilisateur donné en argument à GRANT est trop long"
+ ger "Das Host- oder User-Argument für GRANT ist zu lang"
+ hun "A host vagy felhasznalo argumentuma tul hosszu a GRANT parancsban"
+ ita "L'argomento host o utente per la GRANT e` troppo lungo"
+ kor "승ì¸(GRANT)ì„ ìœ„í•˜ì—¬ 사용한 사용ìžë‚˜ í˜¸ìŠ¤íŠ¸ì˜ ê°’ë“¤ì´ ë„ˆë¬´ ê¹ë‹ˆë‹¤."
+ por "Argumento de 'host' ou de usuário para o GRANT é longo demais"
+ rum "Argumentul host-ului sau utilizatorului pentru GRANT e prea lung"
+ rus "Слишком длинное Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ/хоÑта Ð´Ð»Ñ GRANT"
+ serbian "Argument 'host' ili 'korisnik' prosleÄ‘en komandi 'GRANT' je predugaÄak"
+ spa "El argumento para servidor o usuario para GRANT es demasiado grande"
+ swe "Felaktigt maskinnamn eller användarnamn använt med GRANT"
+ ukr "Ðргумент host або user Ð´Ð»Ñ GRANT задовгий"
+ER_NO_SUCH_TABLE 42S02
+ cze "Tabulka '%-.192s.%-.192s' neexistuje"
+ dan "Tabellen '%-.192s.%-.192s' eksisterer ikke"
+ nla "Tabel '%-.192s.%-.192s' bestaat niet"
+ eng "Table '%-.192s.%-.192s' doesn't exist"
+ est "Tabelit '%-.192s.%-.192s' ei eksisteeri"
+ fre "La table '%-.192s.%-.192s' n'existe pas"
+ ger "Tabelle '%-.192s.%-.192s' existiert nicht"
+ hun "A '%-.192s.%-.192s' tabla nem letezik"
+ ita "La tabella '%-.192s.%-.192s' non esiste"
+ jpn "Table '%-.192s.%-.192s' doesn't exist"
+ kor "í…Œì´ë¸” '%-.192s.%-.192s' 는 존재하지 않습니다."
+ nor "Table '%-.192s.%-.192s' doesn't exist"
+ norwegian-ny "Table '%-.192s.%-.192s' doesn't exist"
+ pol "Table '%-.192s.%-.192s' doesn't exist"
+ por "Tabela '%-.192s.%-.192s' não existe"
+ rum "Tabela '%-.192s.%-.192s' nu exista"
+ rus "Таблица '%-.192s.%-.192s' не ÑущеÑтвует"
+ serbian "Tabela '%-.192s.%-.192s' ne postoji"
+ slo "Table '%-.192s.%-.192s' doesn't exist"
+ spa "Tabla '%-.192s.%-.192s' no existe"
+ swe "Det finns ingen tabell som heter '%-.192s.%-.192s'"
+ ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s.%-.192s' не Ñ–Ñнує"
+ER_NONEXISTING_TABLE_GRANT 42000
+ cze "Neexistuje odpov-Bídající grant pro uživatele '%-.48s' na stroji '%-.64s' pro tabulku '%-.192s'"
+ dan "Denne tilladelse eksisterer ikke for brugeren '%-.48s' på vært '%-.64s' for tabellen '%-.192s'"
+ nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.48s' op host '%-.64s' op tabel '%-.192s'"
+ eng "There is no such grant defined for user '%-.48s' on host '%-.64s' on table '%-.192s'"
+ est "Sellist õigust ei ole defineeritud kasutajale '%-.48s' masinast '%-.64s' tabelile '%-.192s'"
+ fre "Un tel droit n'est pas défini pour l'utilisateur '%-.48s' sur l'hôte '%-.64s' sur la table '%-.192s'"
+ ger "Eine solche Berechtigung ist für User '%-.48s' auf Host '%-.64s' an Tabelle '%-.192s' nicht definiert"
+ hun "A '%-.48s' felhasznalo szamara a '%-.64s' host '%-.192s' tablajaban ez a parancs nem engedelyezett"
+ ita "GRANT non definita per l'utente '%-.48s' dalla macchina '%-.64s' sulla tabella '%-.192s'"
+ kor "ì‚¬ìš©ìž '%-.48s'(호스트 '%-.64s')는 í…Œì´ë¸” '%-.192s'를 사용하기 위하여 ì •ì˜ëœ 승ì¸ì€ 없습니다. "
+ por "Não existe tal permissão (grant) definido para o usuário '%-.48s' no 'host' '%-.64s', na tabela '%-.192s'"
+ rum "Nu exista un astfel de privilegiu (grant) definit pentru utilizatorul '%-.48s' de pe host-ul '%-.64s' pentru tabela '%-.192s'"
+ rus "Такие права не определены Ð´Ð»Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ '%-.48s' на компьютере '%-.64s' Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ‹ '%-.192s'"
+ serbian "Ne postoji odobrenje za pristup korisniku '%-.48s' na host-u '%-.64s' tabeli '%-.192s'"
+ spa "No existe tal permiso definido para usuario '%-.48s' en el servidor '%-.64s' en la tabla '%-.192s'"
+ swe "Det finns inget privilegium definierat för användare '%-.48s' på '%-.64s' för tabell '%-.192s'"
+ ukr "Повноважень не визначено Ð´Ð»Ñ ÐºÐ¾Ñ€Ð¸Ñтувача '%-.48s' з хоÑту '%-.64s' Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ– '%-.192s'"
+ER_NOT_ALLOWED_COMMAND 42000
+ cze "Pou-Bžitý příkaz není v této verzi MySQL povolen"
+ dan "Den brugte kommando er ikke tilladt med denne udgave af MySQL"
+ nla "Het used commando is niet toegestaan in deze MySQL versie"
+ eng "The used command is not allowed with this MySQL version"
+ est "Antud käsk ei ole lubatud käesolevas MySQL versioonis"
+ fre "Cette commande n'existe pas dans cette version de MySQL"
+ ger "Der verwendete Befehl ist in dieser MySQL-Version nicht zulässig"
+ hun "A hasznalt parancs nem engedelyezett ebben a MySQL verzioban"
+ ita "Il comando utilizzato non e` supportato in questa versione di MySQL"
+ kor "ì‚¬ìš©ëœ ëª…ë ¹ì€ í˜„ìž¬ì˜ MySQL 버젼ì—서는 ì´ìš©ë˜ì§€ 않습니다."
+ por "Comando usado não é permitido para esta versão do MySQL"
+ rum "Comanda folosita nu este permisa pentru aceasta versiune de MySQL"
+ rus "Эта команда не допуÑкаетÑÑ Ð² данной верÑии MySQL"
+ serbian "Upotrebljena komanda nije dozvoljena sa ovom verzijom MySQL servera"
+ spa "El comando usado no es permitido con esta versión de MySQL"
+ swe "Du kan inte använda detta kommando med denna MySQL version"
+ ukr "ВикориÑтовувана команда не дозволена у цій верÑÑ–Ñ— MySQL"
+ER_SYNTAX_ERROR 42000
+ cze "Va-Bše syntaxe je nějaká divná"
+ dan "Der er en fejl i SQL syntaksen"
+ nla "Er is iets fout in de gebruikte syntax"
+ eng "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use"
+ est "Viga SQL süntaksis"
+ fre "Erreur de syntaxe"
+ ger "Fehler in der SQL-Syntax. Bitte die korrekte Syntax im Handbuch nachschlagen"
+ greek "You have an error in your SQL syntax"
+ hun "Szintaktikai hiba"
+ ita "Errore di sintassi nella query SQL"
+ jpn "Something is wrong in your syntax"
+ kor "SQL êµ¬ë¬¸ì— ì˜¤ë¥˜ê°€ 있습니다."
+ nor "Something is wrong in your syntax"
+ norwegian-ny "Something is wrong in your syntax"
+ pol "Something is wrong in your syntax"
+ por "Você tem um erro de sintaxe no seu SQL"
+ rum "Aveti o eroare in sintaxa RSQL"
+ rus "У Ð²Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° в запроÑе. Изучите документацию по иÑпользуемой верÑии MySQL на предмет корректного ÑинтакÑиÑа"
+ serbian "Imate grešku u vašoj SQL sintaksi"
+ slo "Something is wrong in your syntax"
+ spa "Algo está equivocado en su sintax"
+ swe "Du har något fel i din syntax"
+ ukr "У Ð²Ð°Ñ Ð¿Ð¾Ð¼Ð¸Ð»ÐºÐ° у ÑинтакÑиÑÑ– SQL"
+ER_DELAYED_CANT_CHANGE_LOCK
+ cze "Zpo-Bžděný insert threadu nebyl schopen získat požadovaný zámek pro tabulku %-.192s"
+ dan "Forsinket indsættelse tråden (delayed insert thread) kunne ikke opnå lås på tabellen %-.192s"
+ nla "'Delayed insert' thread kon de aangevraagde 'lock' niet krijgen voor tabel %-.192s"
+ eng "Delayed insert thread couldn't get requested lock for table %-.192s"
+ est "INSERT DELAYED lõim ei suutnud saada soovitud lukku tabelile %-.192s"
+ fre "La tâche 'delayed insert' n'a pas pu obtenir le verrou démandé sur la table %-.192s"
+ ger "Verzögerter (DELAYED) Einfüge-Thread konnte die angeforderte Sperre für Tabelle '%-.192s' nicht erhalten"
+ hun "A kesleltetett beillesztes (delayed insert) thread nem kapott zatolast a %-.192s tablahoz"
+ ita "Il thread di inserimento ritardato non riesce ad ottenere il lock per la tabella %-.192s"
+ kor "ì§€ì—°ëœ insert 쓰레드가 í…Œì´ë¸” %-.192sì˜ ìš”êµ¬ëœ ë½í‚¹ì„ 처리할 수 없었습니다."
+ por "'Thread' de inserção retardada (atrasada) pois não conseguiu obter a trava solicitada para tabela '%-.192s'"
+ rum "Thread-ul pentru inserarea aminata nu a putut obtine lacatul (lock) pentru tabela %-.192s"
+ rus "Поток, обÑлуживающий отложенную вÑтавку (delayed insert), не Ñмог получить запрашиваемую блокировку на таблицу %-.192s"
+ serbian "Prolongirani 'INSERT' thread nije mogao da dobije traženo zakljuÄavanje tabele '%-.192s'"
+ spa "Thread de inserción retarda no pudiendo bloquear para la tabla %-.192s"
+ swe "DELAYED INSERT-tråden kunde inte låsa tabell '%-.192s'"
+ ukr "Гілка Ð´Ð»Ñ INSERT DELAYED не може отримати Ð±Ð»Ð¾ÐºÑƒÐ²Ð°Ð½Ð½Ñ Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ– %-.192s"
+ER_TOO_MANY_DELAYED_THREADS
+ cze "P-Bříliš mnoho zpožděných threadů"
+ dan "For mange slettede tråde (threads) i brug"
+ nla "Te veel 'delayed' threads in gebruik"
+ eng "Too many delayed threads in use"
+ est "Liiga palju DELAYED lõimesid kasutusel"
+ fre "Trop de tâche 'delayed' en cours"
+ ger "Zu viele verzögerte (DELAYED) Threads in Verwendung"
+ hun "Tul sok kesletetett thread (delayed)"
+ ita "Troppi threads ritardati in uso"
+ kor "너무 ë§Žì€ ì§€ì—° 쓰레드를 사용하고 있습니다."
+ por "Excesso de 'threads' retardadas (atrasadas) em uso"
+ rum "Prea multe threaduri aminate care sint in uz"
+ rus "Слишком много потоков, обÑлуживающих отложенную вÑтавку (delayed insert)"
+ serbian "Previše prolongiranih thread-ova je u upotrebi"
+ spa "Muchos threads retardados en uso"
+ swe "Det finns redan 'max_delayed_threads' trådar i använding"
+ ukr "Забагато затриманих гілок викориÑтовуєтьÑÑ"
+ER_ABORTING_CONNECTION 08S01
+ cze "Zru-Bšeno spojení %ld do databáze: '%-.192s' uživatel: '%-.48s' (%-.64s)"
+ dan "Afbrudt forbindelse %ld til database: '%-.192s' bruger: '%-.48s' (%-.64s)"
+ nla "Afgebroken verbinding %ld naar db: '%-.192s' gebruiker: '%-.48s' (%-.64s)"
+ eng "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)"
+ est "Ãœhendus katkestatud %ld andmebaasile: '%-.192s' kasutajale: '%-.48s' (%-.64s)"
+ fre "Connection %ld avortée vers la bd: '%-.192s' utilisateur: '%-.48s' (%-.64s)"
+ ger "Abbruch der Verbindung %ld zur Datenbank '%-.192s'. Benutzer: '%-.48s' (%-.64s)"
+ hun "Megszakitott kapcsolat %ld db: '%-.192s' adatbazishoz, felhasznalo: '%-.48s' (%-.64s)"
+ ita "Interrotta la connessione %ld al db: '%-.192s' utente: '%-.48s' (%-.64s)"
+ jpn "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)"
+ kor "ë°ì´íƒ€ë² ì´ìŠ¤ ì ‘ì†ì„ 위한 ì—°ê²° %ldê°€ ì¤‘ë‹¨ë¨ : '%-.192s' 사용ìž: '%-.48s' (%-.64s)"
+ nor "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)"
+ norwegian-ny "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)"
+ pol "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)"
+ por "Conexão %ld abortou para o banco de dados '%-.192s' - usuário '%-.48s' (%-.64s)"
+ rum "Conectie terminata %ld la baza de date: '%-.192s' utilizator: '%-.48s' (%-.64s)"
+ rus "Прервано Ñоединение %ld к базе данных '%-.192s' Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ '%-.48s' (%-.64s)"
+ serbian "Prekinuta konekcija broj %ld ka bazi: '%-.192s' korisnik je bio: '%-.48s' (%-.64s)"
+ slo "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)"
+ spa "Conexión abortada %ld para db: '%-.192s' usuario: '%-.48s' (%-.64s)"
+ swe "Avbröt länken för tråd %ld till db '%-.192s', användare '%-.48s' (%-.64s)"
+ ukr "Перервано з'Ñ”Ð´Ð½Ð°Ð½Ð½Ñ %ld до бази данних: '%-.192s' кориÑтувача: '%-.48s' (%-.64s)"
+ER_NET_PACKET_TOO_LARGE 08S01
+ cze "Zji-Bštěn příchozí packet delší než 'max_allowed_packet'"
+ dan "Modtog en datapakke som var større end 'max_allowed_packet'"
+ nla "Groter pakket ontvangen dan 'max_allowed_packet'"
+ eng "Got a packet bigger than 'max_allowed_packet' bytes"
+ est "Saabus suurem pakett kui lubatud 'max_allowed_packet' muutujaga"
+ fre "Paquet plus grand que 'max_allowed_packet' reçu"
+ ger "Empfangenes Paket ist größer als 'max_allowed_packet' Bytes"
+ hun "A kapott csomag nagyobb, mint a maximalisan engedelyezett: 'max_allowed_packet'"
+ ita "Ricevuto un pacchetto piu` grande di 'max_allowed_packet'"
+ kor "'max_allowed_packet'보다 ë”í° íŒ¨í‚·ì„ ë°›ì•˜ìŠµë‹ˆë‹¤."
+ por "Obteve um pacote maior do que a taxa máxima de pacotes definida (max_allowed_packet)"
+ rum "Un packet mai mare decit 'max_allowed_packet' a fost primit"
+ rus "Полученный пакет больше, чем 'max_allowed_packet'"
+ serbian "Primio sam mrežni paket veći od definisane vrednosti 'max_allowed_packet'"
+ spa "Obtenido un paquete mayor que 'max_allowed_packet'"
+ swe "Kommunkationspaketet är större än 'max_allowed_packet'"
+ ukr "Отримано пакет більший ніж max_allowed_packet"
+ER_NET_READ_ERROR_FROM_PIPE 08S01
+ cze "Zji-BÅ¡tÄ›na chyba pÅ™i Ätení z roury spojení"
+ dan "Fik læsefejl fra forbindelse (connection pipe)"
+ nla "Kreeg leesfout van de verbindings pipe"
+ eng "Got a read error from the connection pipe"
+ est "Viga ühendustoru lugemisel"
+ fre "Erreur de lecture reçue du pipe de connexion"
+ ger "Lese-Fehler bei einer Verbindungs-Pipe"
+ hun "Olvasasi hiba a kapcsolat soran"
+ ita "Rilevato un errore di lettura dalla pipe di connessione"
+ kor "ì—°ê²° 파ì´í”„로부터 ì—러가 ë°œìƒí•˜ì˜€ìŠµë‹ˆë‹¤."
+ por "Obteve um erro de leitura no 'pipe' da conexão"
+ rum "Eroare la citire din cauza lui 'connection pipe'"
+ rus "Получена ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¾Ñ‚ потока ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ (connection pipe)"
+ serbian "GreÅ¡ka pri Äitanju podataka sa pipe-a"
+ spa "Obtenido un error de lectura de la conexión pipe"
+ swe "Fick läsfel från klienten vid läsning från 'PIPE'"
+ ukr "Отримано помилку Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ Ð· комунікаційного каналу"
+ER_NET_FCNTL_ERROR 08S01
+ cze "Zji-Bštěna chyba fcntl()"
+ dan "Fik fejlmeddelelse fra fcntl()"
+ nla "Kreeg fout van fcntl()"
+ eng "Got an error from fcntl()"
+ est "fcntl() tagastas vea"
+ fre "Erreur reçue de fcntl() "
+ ger "fcntl() lieferte einen Fehler"
+ hun "Hiba a fcntl() fuggvenyben"
+ ita "Rilevato un errore da fcntl()"
+ kor "fcntl() 함수로부터 ì—러가 ë°œìƒí•˜ì˜€ìŠµë‹ˆë‹¤."
+ por "Obteve um erro em fcntl()"
+ rum "Eroare obtinuta de la fcntl()"
+ rus "Получена ошибка от fcntl()"
+ serbian "Greška pri izvršavanju funkcije fcntl()"
+ spa "Obtenido un error de fcntl()"
+ swe "Fick fatalt fel från 'fcntl()'"
+ ukr "Отримано помилкку від fcntl()"
+ER_NET_PACKETS_OUT_OF_ORDER 08S01
+ cze "P-Bříchozí packety v chybném pořadí"
+ dan "Modtog ikke datapakker i korrekt rækkefølge"
+ nla "Pakketten in verkeerde volgorde ontvangen"
+ eng "Got packets out of order"
+ est "Paketid saabusid vales järjekorras"
+ fre "Paquets reçus dans le désordre"
+ ger "Pakete nicht in der richtigen Reihenfolge empfangen"
+ hun "Helytelen sorrendben erkezett adatcsomagok"
+ ita "Ricevuti pacchetti non in ordine"
+ kor "순서가 맞지않는 íŒ¨í‚·ì„ ë°›ì•˜ìŠµë‹ˆë‹¤."
+ por "Obteve pacotes fora de ordem"
+ rum "Packets care nu sint ordonati au fost gasiti"
+ rus "Пакеты получены в неверном порÑдке"
+ serbian "Primio sam mrežne pakete van reda"
+ spa "Obtenido paquetes desordenados"
+ swe "Kommunikationspaketen kom i fel ordning"
+ ukr "Отримано пакети у неналежному порÑдку"
+ER_NET_UNCOMPRESS_ERROR 08S01
+ cze "Nemohu rozkomprimovat komunika-BÄní packet"
+ dan "Kunne ikke dekomprimere kommunikations-pakke (communication packet)"
+ nla "Communicatiepakket kon niet worden gedecomprimeerd"
+ eng "Couldn't uncompress communication packet"
+ est "Viga andmepaketi lahtipakkimisel"
+ fre "Impossible de décompresser le paquet reçu"
+ ger "Kommunikationspaket lässt sich nicht entpacken"
+ hun "A kommunikacios adatcsomagok nem tomorithetok ki"
+ ita "Impossibile scompattare i pacchetti di comunicazione"
+ kor "통신 íŒ¨í‚·ì˜ ì••ì¶•í•´ì œë¥¼ í•  수 없었습니다."
+ por "Não conseguiu descomprimir pacote de comunicação"
+ rum "Nu s-a putut decompresa pachetul de comunicatie (communication packet)"
+ rus "Ðевозможно раÑпаковать пакет, полученный через коммуникационный протокол"
+ serbian "Ne mogu da dekompresujem mrežne pakete"
+ spa "No puedo descomprimir paquetes de comunicación"
+ swe "Kunde inte packa up kommunikationspaketet"
+ ukr "Ðе можу декомпреÑувати комунікаційний пакет"
+ER_NET_READ_ERROR 08S01
+ cze "Zji-BÅ¡tÄ›na chyba pÅ™i Ätení komunikaÄního packetu"
+ dan "Fik fejlmeddelelse ved læsning af kommunikations-pakker (communication packets)"
+ nla "Fout bij het lezen van communicatiepakketten"
+ eng "Got an error reading communication packets"
+ est "Viga andmepaketi lugemisel"
+ fre "Erreur de lecture des paquets reçus"
+ ger "Fehler beim Lesen eines Kommunikationspakets"
+ hun "HIba a kommunikacios adatcsomagok olvasasa soran"
+ ita "Rilevato un errore ricevendo i pacchetti di comunicazione"
+ kor "통신 íŒ¨í‚·ì„ ì½ëŠ” 중 오류가 ë°œìƒí•˜ì˜€ìŠµë‹ˆë‹¤."
+ por "Obteve um erro na leitura de pacotes de comunicação"
+ rum "Eroare obtinuta citind pachetele de comunicatie (communication packets)"
+ rus "Получена ошибка в процеÑÑе Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð¿Ð°ÐºÐµÑ‚Ð° через коммуникационный протокол "
+ serbian "Greška pri primanju mrežnih paketa"
+ spa "Obtenido un error leyendo paquetes de comunicación"
+ swe "Fick ett fel vid läsning från klienten"
+ ukr "Отримано помилку Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ ÐºÐ¾Ð¼ÑƒÐ½Ñ–ÐºÐ°Ñ†Ñ–Ð¹Ð½Ð¸Ñ… пакетів"
+ER_NET_READ_INTERRUPTED 08S01
+ cze "Zji-BÅ¡tÄ›n timeout pÅ™i Ätení komunikaÄního packetu"
+ dan "Timeout-fejl ved læsning af kommunukations-pakker (communication packets)"
+ nla "Timeout bij het lezen van communicatiepakketten"
+ eng "Got timeout reading communication packets"
+ est "Kontrollaja ületamine andmepakettide lugemisel"
+ fre "Timeout en lecture des paquets reçus"
+ ger "Zeitüberschreitung beim Lesen eines Kommunikationspakets"
+ hun "Idotullepes a kommunikacios adatcsomagok olvasasa soran"
+ ita "Rilevato un timeout ricevendo i pacchetti di comunicazione"
+ kor "통신 íŒ¨í‚·ì„ ì½ëŠ” 중 timeoutì´ ë°œìƒí•˜ì˜€ìŠµë‹ˆë‹¤."
+ por "Obteve expiração de tempo (timeout) na leitura de pacotes de comunicação"
+ rum "Timeout obtinut citind pachetele de comunicatie (communication packets)"
+ rus "Получен таймаут Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¿Ð°ÐºÐµÑ‚Ð° через коммуникационный протокол "
+ serbian "Vremenski limit za Äitanje mrežnih paketa je istekao"
+ spa "Obtenido timeout leyendo paquetes de comunicación"
+ swe "Fick 'timeout' vid läsning från klienten"
+ ukr "Отримано затримку Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ ÐºÐ¾Ð¼ÑƒÐ½Ñ–ÐºÐ°Ñ†Ñ–Ð¹Ð½Ð¸Ñ… пакетів"
+ER_NET_ERROR_ON_WRITE 08S01
+ cze "Zji-BÅ¡tÄ›na chyba pÅ™i zápisu komunikaÄního packetu"
+ dan "Fik fejlmeddelelse ved skrivning af kommunukations-pakker (communication packets)"
+ nla "Fout bij het schrijven van communicatiepakketten"
+ eng "Got an error writing communication packets"
+ est "Viga andmepaketi kirjutamisel"
+ fre "Erreur d'écriture des paquets envoyés"
+ ger "Fehler beim Schreiben eines Kommunikationspakets"
+ hun "Hiba a kommunikacios csomagok irasa soran"
+ ita "Rilevato un errore inviando i pacchetti di comunicazione"
+ kor "통신 íŒ¨í‚·ì„ ê¸°ë¡í•˜ëŠ” 중 오류가 ë°œìƒí•˜ì˜€ìŠµë‹ˆë‹¤."
+ por "Obteve um erro na escrita de pacotes de comunicação"
+ rum "Eroare in scrierea pachetelor de comunicatie (communication packets)"
+ rus "Получена ошибка при передаче пакета через коммуникационный протокол "
+ serbian "Greška pri slanju mrežnih paketa"
+ spa "Obtenido un error de escribiendo paquetes de comunicación"
+ swe "Fick ett fel vid skrivning till klienten"
+ ukr "Отримано помилку запиÑу комунікаційних пакетів"
+ER_NET_WRITE_INTERRUPTED 08S01
+ cze "Zji-BÅ¡tÄ›n timeout pÅ™i zápisu komunikaÄního packetu"
+ dan "Timeout-fejl ved skrivning af kommunukations-pakker (communication packets)"
+ nla "Timeout bij het schrijven van communicatiepakketten"
+ eng "Got timeout writing communication packets"
+ est "Kontrollaja ületamine andmepakettide kirjutamisel"
+ fre "Timeout d'écriture des paquets envoyés"
+ ger "Zeitüberschreitung beim Schreiben eines Kommunikationspakets"
+ hun "Idotullepes a kommunikacios csomagok irasa soran"
+ ita "Rilevato un timeout inviando i pacchetti di comunicazione"
+ kor "통신 íŒ¨íŒƒì„ ê¸°ë¡í•˜ëŠ” 중 timeoutì´ ë°œìƒí•˜ì˜€ìŠµë‹ˆë‹¤."
+ por "Obteve expiração de tempo ('timeout') na escrita de pacotes de comunicação"
+ rum "Timeout obtinut scriind pachetele de comunicatie (communication packets)"
+ rus "Получен таймаут в процеÑÑе передачи пакета через коммуникационный протокол "
+ serbian "Vremenski limit za slanje mrežnih paketa je istekao"
+ spa "Obtenido timeout escribiendo paquetes de comunicación"
+ swe "Fick 'timeout' vid skrivning till klienten"
+ ukr "Отримано затримку запиÑу комунікаційних пакетів"
+ER_TOO_LONG_STRING 42000
+ cze "V-Býsledný řetězec je delší než 'max_allowed_packet'"
+ dan "Strengen med resultater er større end 'max_allowed_packet'"
+ nla "Resultaat string is langer dan 'max_allowed_packet'"
+ eng "Result string is longer than 'max_allowed_packet' bytes"
+ est "Tulemus on pikem kui lubatud 'max_allowed_packet' muutujaga"
+ fre "La chaîne résultat est plus grande que 'max_allowed_packet'"
+ ger "Ergebnis-String ist länger als 'max_allowed_packet' Bytes"
+ hun "Ez eredmeny sztring nagyobb, mint a lehetseges maximum: 'max_allowed_packet'"
+ ita "La stringa di risposta e` piu` lunga di 'max_allowed_packet'"
+ por "'String' resultante é mais longa do que 'max_allowed_packet'"
+ rum "Sirul rezultat este mai lung decit 'max_allowed_packet'"
+ rus "Ð ÐµÐ·ÑƒÐ»ÑŒÑ‚Ð¸Ñ€ÑƒÑŽÑ‰Ð°Ñ Ñтрока больше, чем 'max_allowed_packet'"
+ serbian "RezultujuÄi string je duži nego Å¡to to dozvoljava parametar servera 'max_allowed_packet'"
+ spa "La string resultante es mayor que max_allowed_packet"
+ swe "Resultatsträngen är längre än max_allowed_packet"
+ ukr "Строка результату довша ніж max_allowed_packet"
+ER_TABLE_CANT_HANDLE_BLOB 42000
+ cze "Typ pou-Bžité tabulky nepodporuje BLOB/TEXT sloupce"
+ dan "Denne tabeltype understøtter ikke brug af BLOB og TEXT kolonner"
+ nla "Het gebruikte tabel type ondersteunt geen BLOB/TEXT kolommen"
+ eng "The used table type doesn't support BLOB/TEXT columns"
+ est "Valitud tabelitüüp ei toeta BLOB/TEXT tüüpi välju"
+ fre "Ce type de table ne supporte pas les colonnes BLOB/TEXT"
+ ger "Der verwendete Tabellentyp unterstützt keine BLOB- und TEXT-Felder"
+ hun "A hasznalt tabla tipus nem tamogatja a BLOB/TEXT mezoket"
+ ita "Il tipo di tabella usata non supporta colonne di tipo BLOB/TEXT"
+ por "Tipo de tabela usado não permite colunas BLOB/TEXT"
+ rum "Tipul de tabela folosit nu suporta coloane de tip BLOB/TEXT"
+ rus "ИÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÐµÐ¼Ð°Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ð° не поддерживает типы BLOB/TEXT"
+ serbian "Iskorišteni tip tabele ne podržava kolone tipa 'BLOB' odnosno 'TEXT'"
+ spa "El tipo de tabla usada no permite soporte para columnas BLOB/TEXT"
+ swe "Den använda tabelltypen kan inte hantera BLOB/TEXT-kolumner"
+ ukr "ВикориÑтаний тип таблиці не підтримує BLOB/TEXT Ñтовбці"
+ER_TABLE_CANT_HANDLE_AUTO_INCREMENT 42000
+ cze "Typ pou-Bžité tabulky nepodporuje AUTO_INCREMENT sloupce"
+ dan "Denne tabeltype understøtter ikke brug af AUTO_INCREMENT kolonner"
+ nla "Het gebruikte tabel type ondersteunt geen AUTO_INCREMENT kolommen"
+ eng "The used table type doesn't support AUTO_INCREMENT columns"
+ est "Valitud tabelitüüp ei toeta AUTO_INCREMENT tüüpi välju"
+ fre "Ce type de table ne supporte pas les colonnes AUTO_INCREMENT"
+ ger "Der verwendete Tabellentyp unterstützt keine AUTO_INCREMENT-Felder"
+ hun "A hasznalt tabla tipus nem tamogatja az AUTO_INCREMENT tipusu mezoket"
+ ita "Il tipo di tabella usata non supporta colonne di tipo AUTO_INCREMENT"
+ por "Tipo de tabela usado não permite colunas AUTO_INCREMENT"
+ rum "Tipul de tabela folosit nu suporta coloane de tip AUTO_INCREMENT"
+ rus "ИÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÐµÐ¼Ð°Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ð° не поддерживает автоинкрементные Ñтолбцы"
+ serbian "Iskorišteni tip tabele ne podržava kolone tipa 'AUTO_INCREMENT'"
+ spa "El tipo de tabla usada no permite soporte para columnas AUTO_INCREMENT"
+ swe "Den använda tabelltypen kan inte hantera AUTO_INCREMENT-kolumner"
+ ukr "ВикориÑтаний тип таблиці не підтримує AUTO_INCREMENT Ñтовбці"
+ER_DELAYED_INSERT_TABLE_LOCKED
+ cze "INSERT DELAYED nen-Bí možno s tabulkou '%-.192s' použít, protože je zamÄená pomocí LOCK TABLES"
+ dan "INSERT DELAYED kan ikke bruges med tabellen '%-.192s', fordi tabellen er låst med LOCK TABLES"
+ nla "INSERT DELAYED kan niet worden gebruikt bij table '%-.192s', vanwege een 'lock met LOCK TABLES"
+ eng "INSERT DELAYED can't be used with table '%-.192s' because it is locked with LOCK TABLES"
+ est "INSERT DELAYED ei saa kasutada tabeli '%-.192s' peal, kuna see on lukustatud LOCK TABLES käsuga"
+ fre "INSERT DELAYED ne peut être utilisé avec la table '%-.192s', car elle est verrouée avec LOCK TABLES"
+ ger "INSERT DELAYED kann für Tabelle '%-.192s' nicht verwendet werden, da sie mit LOCK TABLES gesperrt ist"
+ greek "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES"
+ hun "Az INSERT DELAYED nem hasznalhato a '%-.192s' tablahoz, mert a tabla zarolt (LOCK TABLES)"
+ ita "L'inserimento ritardato (INSERT DELAYED) non puo` essere usato con la tabella '%-.192s', perche` soggetta a lock da 'LOCK TABLES'"
+ jpn "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES"
+ kor "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES"
+ nor "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES"
+ norwegian-ny "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES"
+ pol "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES"
+ por "INSERT DELAYED não pode ser usado com a tabela '%-.192s', porque ela está travada com LOCK TABLES"
+ rum "INSERT DELAYED nu poate fi folosit cu tabela '%-.192s', deoarece este locked folosing LOCK TABLES"
+ rus "ÐÐµÐ»ÑŒÐ·Ñ Ð¸Ñпользовать INSERT DELAYED Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ‹ '%-.192s', потому что она заблокирована Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ LOCK TABLES"
+ serbian "Komanda 'INSERT DELAYED' ne može biti iskoriÅ¡tena u tabeli '%-.192s', zbog toga Å¡to je zakljuÄana komandom 'LOCK TABLES'"
+ slo "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES"
+ spa "INSERT DELAYED no puede ser usado con tablas '%-.192s', porque esta bloqueada con LOCK TABLES"
+ swe "INSERT DELAYED kan inte användas med tabell '%-.192s', emedan den är låst med LOCK TABLES"
+ ukr "INSERT DELAYED не може бути викориÑтано з таблицею '%-.192s', тому що Ñ—Ñ— заблоковано з LOCK TABLES"
+ER_WRONG_COLUMN_NAME 42000
+ cze "Nespr-Bávné jméno sloupce '%-.100s'"
+ dan "Forkert kolonnenavn '%-.100s'"
+ nla "Incorrecte kolom naam '%-.100s'"
+ eng "Incorrect column name '%-.100s'"
+ est "Vigane tulba nimi '%-.100s'"
+ fre "Nom de colonne '%-.100s' incorrect"
+ ger "Falscher Spaltenname '%-.100s'"
+ hun "Ervenytelen mezonev: '%-.100s'"
+ ita "Nome colonna '%-.100s' non corretto"
+ por "Nome de coluna '%-.100s' incorreto"
+ rum "Nume increct de coloana '%-.100s'"
+ rus "Ðеверное Ð¸Ð¼Ñ Ñтолбца '%-.100s'"
+ serbian "Pogrešno ime kolone '%-.100s'"
+ spa "Incorrecto nombre de columna '%-.100s'"
+ swe "Felaktigt kolumnnamn '%-.100s'"
+ ukr "Ðевірне ім'Ñ ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ '%-.100s'"
+ER_WRONG_KEY_COLUMN 42000
+ cze "Handler pou-Bžité tabulky neumí indexovat sloupce '%-.192s'"
+ dan "Den brugte tabeltype kan ikke indeksere kolonnen '%-.192s'"
+ nla "De gebruikte tabel 'handler' kan kolom '%-.192s' niet indexeren"
+ eng "The used storage engine can't index column '%-.192s'"
+ est "Tabelihandler ei oska indekseerida tulpa '%-.192s'"
+ fre "Le handler de la table ne peut indexé la colonne '%-.192s'"
+ ger "Die verwendete Speicher-Engine kann die Spalte '%-.192s' nicht indizieren"
+ greek "The used table handler can't index column '%-.192s'"
+ hun "A hasznalt tablakezelo nem tudja a '%-.192s' mezot indexelni"
+ ita "Il gestore delle tabelle non puo` indicizzare la colonna '%-.192s'"
+ jpn "The used table handler can't index column '%-.192s'"
+ kor "The used table handler can't index column '%-.192s'"
+ nor "The used table handler can't index column '%-.192s'"
+ norwegian-ny "The used table handler can't index column '%-.192s'"
+ pol "The used table handler can't index column '%-.192s'"
+ por "O manipulador de tabela usado não pode indexar a coluna '%-.192s'"
+ rum "Handler-ul tabelei folosite nu poate indexa coloana '%-.192s'"
+ rus "ИÑпользованный обработчик таблицы не может проиндекÑировать Ñтолбец '%-.192s'"
+ serbian "Handler tabele ne može da indeksira kolonu '%-.192s'"
+ slo "The used table handler can't index column '%-.192s'"
+ spa "El manipulador de tabla usado no puede indexar columna '%-.192s'"
+ swe "Den använda tabelltypen kan inte indexera kolumn '%-.192s'"
+ ukr "ВикориÑтаний вказівник таблиці не може індекÑувати Ñтовбець '%-.192s'"
+ER_WRONG_MRG_TABLE
+ cze "V-Bšechny tabulky v MERGE tabulce nejsou definovány stejně"
+ dan "Tabellerne i MERGE er ikke defineret ens"
+ nla "Niet alle tabellen in de MERGE tabel hebben identieke gedefinities"
+ eng "Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist"
+ est "Kõik tabelid MERGE tabeli määratluses ei ole identsed"
+ fre "Toutes les tables de la table de type MERGE n'ont pas la même définition"
+ ger "Nicht alle Tabellen in der MERGE-Tabelle sind gleich definiert"
+ hun "A MERGE tablaban talalhato tablak definicioja nem azonos"
+ ita "Non tutte le tabelle nella tabella di MERGE sono definite in maniera identica"
+ jpn "All tables in the MERGE table are not defined identically"
+ kor "All tables in the MERGE table are not defined identically"
+ nor "All tables in the MERGE table are not defined identically"
+ norwegian-ny "All tables in the MERGE table are not defined identically"
+ pol "All tables in the MERGE table are not defined identically"
+ por "Todas as tabelas contidas na tabela fundida (MERGE) não estão definidas identicamente"
+ rum "Toate tabelele din tabela MERGE nu sint definite identic"
+ rus "Ðе вÑе таблицы в MERGE определены одинаково"
+ serbian "Tabele iskoriÅ¡tene u 'MERGE' tabeli nisu definisane na isti naÄin"
+ slo "All tables in the MERGE table are not defined identically"
+ spa "Todas las tablas en la MERGE tabla no estan definidas identicamente"
+ swe "Tabellerna i MERGE-tabellen är inte identiskt definierade"
+ ukr "Таблиці у MERGE TABLE мають різну Ñтруктуру"
+ER_DUP_UNIQUE 23000
+ cze "Kv-Bůli unique constraintu nemozu zapsat do tabulky '%-.192s'"
+ dan "Kan ikke skrive til tabellen '%-.192s' fordi det vil bryde CONSTRAINT regler"
+ nla "Kan niet opslaan naar table '%-.192s' vanwege 'unique' beperking"
+ eng "Can't write, because of unique constraint, to table '%-.192s'"
+ est "Ei suuda kirjutada tabelisse '%-.192s', kuna see rikub ühesuse kitsendust"
+ fre "Écriture impossible à cause d'un index UNIQUE sur la table '%-.192s'"
+ ger "Schreiben in Tabelle '%-.192s' nicht möglich wegen einer Eindeutigkeitsbeschränkung (unique constraint)"
+ hun "A '%-.192s' nem irhato, az egyedi mezok miatt"
+ ita "Impossibile scrivere nella tabella '%-.192s' per limitazione di unicita`"
+ por "Não pode gravar, devido à restrição UNIQUE, na tabela '%-.192s'"
+ rum "Nu pot scrie pe hard-drive, din cauza constraintului unic (unique constraint) pentru tabela '%-.192s'"
+ rus "Ðевозможно запиÑать в таблицу '%-.192s' из-за ограничений уникального ключа"
+ serbian "Zbog provere jedinstvenosti ne mogu da upišem podatke u tabelu '%-.192s'"
+ spa "No puedo escribir, debido al único constraint, para tabla '%-.192s'"
+ swe "Kan inte skriva till tabell '%-.192s'; UNIQUE-test"
+ ukr "Ðе можу запиÑати до таблиці '%-.192s', з причини вимог унікальноÑÑ‚Ñ–"
+ER_BLOB_KEY_WITHOUT_LENGTH 42000
+ cze "BLOB sloupec '%-.192s' je pou-Bžit ve specifikaci klíÄe bez délky"
+ dan "BLOB kolonnen '%-.192s' brugt i nøglespecifikation uden nøglelængde"
+ nla "BLOB kolom '%-.192s' gebruikt in zoeksleutel specificatie zonder zoeksleutel lengte"
+ eng "BLOB/TEXT column '%-.192s' used in key specification without a key length"
+ est "BLOB-tüüpi tulp '%-.192s' on kasutusel võtmes ilma pikkust määratlemata"
+ fre "La colonne '%-.192s' de type BLOB est utilisée dans une définition d'index sans longueur d'index"
+ ger "BLOB- oder TEXT-Spalte '%-.192s' wird in der Schlüsseldefinition ohne Schlüssellängenangabe verwendet"
+ greek "BLOB column '%-.192s' used in key specification without a key length"
+ hun "BLOB mezo '%-.192s' hasznalt a mezo specifikacioban, a mezohossz megadasa nelkul"
+ ita "La colonna '%-.192s' di tipo BLOB e` usata in una chiave senza specificarne la lunghezza"
+ jpn "BLOB column '%-.192s' used in key specification without a key length"
+ kor "BLOB column '%-.192s' used in key specification without a key length"
+ nor "BLOB column '%-.192s' used in key specification without a key length"
+ norwegian-ny "BLOB column '%-.192s' used in key specification without a key length"
+ pol "BLOB column '%-.192s' used in key specification without a key length"
+ por "Coluna BLOB '%-.192s' usada na especificação de chave sem o comprimento da chave"
+ rum "Coloana BLOB '%-.192s' este folosita in specificarea unei chei fara ca o lungime de cheie sa fie folosita"
+ rus "Столбец типа BLOB '%-.192s' был указан в определении ключа без ÑƒÐºÐ°Ð·Ð°Ð½Ð¸Ñ Ð´Ð»Ð¸Ð½Ñ‹ ключа"
+ serbian "BLOB kolona '%-.192s' je upotrebljena u specifikaciji kljuÄa bez navoÄ‘enja dužine kljuÄa"
+ slo "BLOB column '%-.192s' used in key specification without a key length"
+ spa "Columna BLOB column '%-.192s' usada en especificación de clave sin tamaño de la clave"
+ swe "Du har inte angett någon nyckellängd för BLOB '%-.192s'"
+ ukr "Стовбець BLOB '%-.192s' викориÑтано у визначенні ключа без Ð²ÐºÐ°Ð·Ð°Ð½Ð½Ñ Ð´Ð¾Ð²Ð¶Ð¸Ð½Ð¸ ключа"
+ER_PRIMARY_CANT_HAVE_NULL 42000
+ cze "V-BÅ¡echny Äásti primárního klíÄe musejí být NOT NULL; pokud potÅ™ebujete NULL, použijte UNIQUE"
+ dan "Alle dele af en PRIMARY KEY skal være NOT NULL; Hvis du skal bruge NULL i nøglen, brug UNIQUE istedet"
+ nla "Alle delen van een PRIMARY KEY moeten NOT NULL zijn; Indien u NULL in een zoeksleutel nodig heeft kunt u UNIQUE gebruiken"
+ eng "All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead"
+ est "Kõik PRIMARY KEY peavad olema määratletud NOT NULL piiranguga; vajadusel kasuta UNIQUE tüüpi võtit"
+ fre "Toutes les parties d'un index PRIMARY KEY doivent être NOT NULL; Si vous avez besoin d'un NULL dans l'index, utilisez un index UNIQUE"
+ ger "Alle Teile eines PRIMARY KEY müssen als NOT NULL definiert sein. Wenn NULL in einem Schlüssel benötigt wird, muss ein UNIQUE-Schlüssel verwendet werden"
+ hun "Az elsodleges kulcs teljes egeszeben csak NOT NULL tipusu lehet; Ha NULL mezot szeretne a kulcskent, hasznalja inkabb a UNIQUE-ot"
+ ita "Tutte le parti di una chiave primaria devono essere dichiarate NOT NULL; se necessitano valori NULL nelle chiavi utilizzare UNIQUE"
+ por "Todas as partes de uma chave primária devem ser não-nulas. Se você precisou usar um valor nulo (NULL) em uma chave, use a cláusula UNIQUE em seu lugar"
+ rum "Toate partile unei chei primare (PRIMARY KEY) trebuie sa fie NOT NULL; Daca aveti nevoie de NULL in vreo cheie, folositi UNIQUE in schimb"
+ rus "Ð’Ñе чаÑти первичного ключа (PRIMARY KEY) должны быть определены как NOT NULL; ЕÑли вам нужна поддержка величин NULL в ключе, воÑпользуйтеÑÑŒ индекÑом UNIQUE"
+ serbian "Svi delovi primarnog kljuÄa moraju biti razliÄiti od NULL; Ako Vam ipak treba NULL vrednost u kljuÄu, upotrebite 'UNIQUE'"
+ spa "Todas las partes de un PRIMARY KEY deben ser NOT NULL; Si necesitas NULL en una clave, use UNIQUE"
+ swe "Alla delar av en PRIMARY KEY måste vara NOT NULL; Om du vill ha en nyckel med NULL, använd UNIQUE istället"
+ ukr "УÑÑ– чаÑтини PRIMARY KEY повинні бути NOT NULL; Якщо ви потребуєте NULL у ключі, ÑкориÑтайтеÑÑ UNIQUE"
+ER_TOO_MANY_ROWS 42000
+ cze "V-Býsledek obsahuje více než jeden řádek"
+ dan "Resultatet bestod af mere end een række"
+ nla "Resultaat bevatte meer dan een rij"
+ eng "Result consisted of more than one row"
+ est "Tulemis oli rohkem kui üks kirje"
+ fre "Le résultat contient plus d'un enregistrement"
+ ger "Ergebnis besteht aus mehr als einer Zeile"
+ hun "Az eredmeny tobb, mint egy sort tartalmaz"
+ ita "Il risultato consiste di piu` di una riga"
+ por "O resultado consistiu em mais do que uma linha"
+ rum "Resultatul constista din mai multe linii"
+ rus "Ð’ результате возвращена более чем одна Ñтрока"
+ serbian "Rezultat je saÄinjen od viÅ¡e slogova"
+ spa "Resultado compuesto de mas que una línea"
+ swe "Resultet bestod av mera än en rad"
+ ukr "Результат знаходитьÑÑ Ñƒ більше ніж одній Ñтроці"
+ER_REQUIRES_PRIMARY_KEY 42000
+ cze "Tento typ tabulky vy-Bžaduje primární klíÄ"
+ dan "Denne tabeltype kræver en primærnøgle"
+ nla "Dit tabel type heeft een primaire zoeksleutel nodig"
+ eng "This table type requires a primary key"
+ est "Antud tabelitüüp nõuab primaarset võtit"
+ fre "Ce type de table nécessite une clé primaire (PRIMARY KEY)"
+ ger "Dieser Tabellentyp benötigt einen Primärschlüssel (PRIMARY KEY)"
+ hun "Az adott tablatipushoz elsodleges kulcs hasznalata kotelezo"
+ ita "Questo tipo di tabella richiede una chiave primaria"
+ por "Este tipo de tabela requer uma chave primária"
+ rum "Aceast tip de tabela are nevoie de o cheie primara"
+ rus "Этот тип таблицы требует Ð¾Ð¿Ñ€ÐµÐ´ÐµÐ»ÐµÐ½Ð¸Ñ Ð¿ÐµÑ€Ð²Ð¸Ñ‡Ð½Ð¾Ð³Ð¾ ключа"
+ serbian "Ovaj tip tabele zahteva da imate definisan primarni kljuÄ"
+ spa "Este tipo de tabla necesita de una primary key"
+ swe "Denna tabelltyp kräver en PRIMARY KEY"
+ ukr "Цей тип таблиці потребує первинного ключа"
+ER_NO_RAID_COMPILED
+ cze "Tato verze MySQL nen-Bí zkompilována s podporou RAID"
+ dan "Denne udgave af MySQL er ikke oversat med understøttelse af RAID"
+ nla "Deze versie van MySQL is niet gecompileerd met RAID ondersteuning"
+ eng "This version of MySQL is not compiled with RAID support"
+ est "Antud MySQL versioon on kompileeritud ilma RAID toeta"
+ fre "Cette version de MySQL n'est pas compilée avec le support RAID"
+ ger "Diese MySQL-Version ist nicht mit RAID-Unterstützung kompiliert"
+ hun "Ezen leforditott MySQL verzio nem tartalmaz RAID support-ot"
+ ita "Questa versione di MYSQL non e` compilata con il supporto RAID"
+ por "Esta versão do MySQL não foi compilada com suporte a RAID"
+ rum "Aceasta versiune de MySQL, nu a fost compilata cu suport pentru RAID"
+ rus "Эта верÑÐ¸Ñ MySQL Ñкомпилирована без поддержки RAID"
+ serbian "Ova verzija MySQL servera nije kompajlirana sa podrškom za RAID uređaje"
+ spa "Esta versión de MySQL no es compilada con soporte RAID"
+ swe "Denna version av MySQL är inte kompilerad med RAID"
+ ukr "Ð¦Ñ Ð²ÐµÑ€ÑÑ–Ñ MySQL не зкомпільована з підтримкою RAID"
+ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
+ cze "Update tabulky bez WHERE s kl-BíÄem není v módu bezpeÄných update dovoleno"
+ dan "Du bruger sikker opdaterings modus ('safe update mode') og du forsøgte at opdatere en tabel uden en WHERE klausul, der gør brug af et KEY felt"
+ nla "U gebruikt 'safe update mode' en u probeerde een tabel te updaten zonder een WHERE met een KEY kolom"
+ eng "You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column"
+ est "Katse muuta tabelit turvalises rezhiimis ilma WHERE klauslita"
+ fre "Vous êtes en mode 'safe update' et vous essayez de faire un UPDATE sans clause WHERE utilisant un index"
+ ger "MySQL läuft im sicheren Aktualisierungsmodus (safe update mode). Sie haben versucht, eine Tabelle zu aktualisieren, ohne in der WHERE-Klausel ein KEY-Feld anzugeben"
+ hun "On a biztonsagos update modot hasznalja, es WHERE that uses a KEY column"
+ ita "In modalita` 'safe update' si e` cercato di aggiornare una tabella senza clausola WHERE su una chiave"
+ por "Você está usando modo de atualização seguro e tentou atualizar uma tabela sem uma cláusula WHERE que use uma coluna chave"
+ rus "Ð’Ñ‹ работаете в режиме безопаÑных обновлений (safe update mode) и попробовали изменить таблицу без иÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ ÐºÐ»ÑŽÑ‡ÐµÐ²Ð¾Ð³Ð¾ Ñтолбца в чаÑти WHERE"
+ serbian "Vi koristite safe update mod servera, a probali ste da promenite podatke bez 'WHERE' komande koja koristi kolonu kljuÄa"
+ spa "Tu estás usando modo de actualización segura y tentado actualizar una tabla sin un WHERE que usa una KEY columna"
+ swe "Du använder 'säker uppdateringsmod' och försökte uppdatera en tabell utan en WHERE-sats som använder sig av en nyckel"
+ ukr "Ви у режимі безпечного Ð¾Ð½Ð¾Ð²Ð»ÐµÐ½Ð½Ñ Ñ‚Ð° намагаєтеÑÑŒ оновити таблицю без оператора WHERE, що викориÑтовує KEY Ñтовбець"
+ER_KEY_DOES_NOT_EXITS 42000 S1009
+ cze "Kl-BÃ­Ä '%-.192s' v tabulce '%-.192s' neexistuje"
+ dan "Nøglen '%-.192s' eksisterer ikke i tabellen '%-.192s'"
+ nla "Zoeksleutel '%-.192s' bestaat niet in tabel '%-.192s'"
+ eng "Key '%-.192s' doesn't exist in table '%-.192s'"
+ est "Võti '%-.192s' ei eksisteeri tabelis '%-.192s'"
+ fre "L'index '%-.192s' n'existe pas sur la table '%-.192s'"
+ ger "Schlüssel '%-.192s' existiert in der Tabelle '%-.192s' nicht"
+ hun "A '%-.192s' kulcs nem letezik a '%-.192s' tablaban"
+ ita "La chiave '%-.192s' non esiste nella tabella '%-.192s'"
+ por "Chave '%-.192s' não existe na tabela '%-.192s'"
+ rus "Ключ '%-.192s' не ÑущеÑтвует в таблице '%-.192s'"
+ serbian "KljuÄ '%-.192s' ne postoji u tabeli '%-.192s'"
+ spa "Clave '%-.192s' no existe en la tabla '%-.192s'"
+ swe "Nyckel '%-.192s' finns inte in tabell '%-.192s'"
+ ukr "Ключ '%-.192s' не Ñ–Ñнує в таблиці '%-.192s'"
+ER_CHECK_NO_SUCH_TABLE 42000
+ cze "Nemohu otev-Břít tabulku"
+ dan "Kan ikke åbne tabellen"
+ nla "Kan tabel niet openen"
+ eng "Can't open table"
+ est "Ei suuda avada tabelit"
+ fre "Impossible d'ouvrir la table"
+ ger "Kann Tabelle nicht öffnen"
+ hun "Nem tudom megnyitni a tablat"
+ ita "Impossibile aprire la tabella"
+ por "Não pode abrir a tabela"
+ rus "Ðевозможно открыть таблицу"
+ serbian "Ne mogu da otvorim tabelu"
+ spa "No puedo abrir tabla"
+ swe "Kan inte öppna tabellen"
+ ukr "Ðе можу відкрити таблицю"
+ER_CHECK_NOT_IMPLEMENTED 42000
+ cze "Handler tabulky nepodporuje %s"
+ dan "Denne tabeltype understøtter ikke %s"
+ nla "De 'handler' voor de tabel ondersteund geen %s"
+ eng "The storage engine for the table doesn't support %s"
+ est "Antud tabelitüüp ei toeta %s käske"
+ fre "Ce type de table ne supporte pas les %s"
+ ger "Die Speicher-Engine für diese Tabelle unterstützt kein %s"
+ greek "The handler for the table doesn't support %s"
+ hun "A tabla kezeloje (handler) nem tamogatja az %s"
+ ita "Il gestore per la tabella non supporta il %s"
+ jpn "The handler for the table doesn't support %s"
+ kor "The handler for the table doesn't support %s"
+ nor "The handler for the table doesn't support %s"
+ norwegian-ny "The handler for the table doesn't support %s"
+ pol "The handler for the table doesn't support %s"
+ por "O manipulador de tabela não suporta %s"
+ rum "The handler for the table doesn't support %s"
+ rus "Обработчик таблицы не поддерживает Ñтого: %s"
+ serbian "Handler za ovu tabelu ne dozvoljava %s komande"
+ slo "The handler for the table doesn't support %s"
+ spa "El manipulador de la tabla no permite soporte para %s"
+ swe "Tabellhanteraren för denna tabell kan inte göra %s"
+ ukr "Вказівник таблиці не підтримуе %s"
+ER_CANT_DO_THIS_DURING_AN_TRANSACTION 25000
+ cze "Proveden-Bí tohoto příkazu není v transakci dovoleno"
+ dan "Du må ikke bruge denne kommando i en transaktion"
+ nla "Het is u niet toegestaan dit commando uit te voeren binnen een transactie"
+ eng "You are not allowed to execute this command in a transaction"
+ est "Seda käsku ei saa kasutada transaktsiooni sees"
+ fre "Vous n'êtes pas autorisé à exécute cette commande dans une transaction"
+ ger "Sie dürfen diesen Befehl nicht in einer Transaktion ausführen"
+ hun "Az On szamara nem engedelyezett a parancs vegrehajtasa a tranzakcioban"
+ ita "Non puoi eseguire questo comando in una transazione"
+ por "Não lhe é permitido executar este comando em uma transação"
+ rus "Вам не разрешено выполнÑÑ‚ÑŒ Ñту команду в транзакции"
+ serbian "Nije Vam dozvoljeno da izvršite ovu komandu u transakciji"
+ spa "No tienes el permiso para ejecutar este comando en una transición"
+ swe "Du får inte utföra detta kommando i en transaktion"
+ ukr "Вам не дозволено виконувати цю команду в транзакції"
+ER_ERROR_DURING_COMMIT
+ cze "Chyba %d p-Bři COMMIT"
+ dan "Modtog fejl %d mens kommandoen COMMIT blev udført"
+ nla "Kreeg fout %d tijdens COMMIT"
+ eng "Got error %d during COMMIT"
+ est "Viga %d käsu COMMIT täitmisel"
+ fre "Erreur %d lors du COMMIT"
+ ger "Fehler %d beim COMMIT"
+ hun "%d hiba a COMMIT vegrehajtasa soran"
+ ita "Rilevato l'errore %d durante il COMMIT"
+ por "Obteve erro %d durante COMMIT"
+ rus "Получена ошибка %d в процеÑÑе COMMIT"
+ serbian "Greška %d za vreme izvršavanja komande 'COMMIT'"
+ spa "Obtenido error %d durante COMMIT"
+ swe "Fick fel %d vid COMMIT"
+ ukr "Отримано помилку %d під Ñ‡Ð°Ñ COMMIT"
+ER_ERROR_DURING_ROLLBACK
+ cze "Chyba %d p-Bři ROLLBACK"
+ dan "Modtog fejl %d mens kommandoen ROLLBACK blev udført"
+ nla "Kreeg fout %d tijdens ROLLBACK"
+ eng "Got error %d during ROLLBACK"
+ est "Viga %d käsu ROLLBACK täitmisel"
+ fre "Erreur %d lors du ROLLBACK"
+ ger "Fehler %d beim ROLLBACK"
+ hun "%d hiba a ROLLBACK vegrehajtasa soran"
+ ita "Rilevato l'errore %d durante il ROLLBACK"
+ por "Obteve erro %d durante ROLLBACK"
+ rus "Получена ошибка %d в процеÑÑе ROLLBACK"
+ serbian "Greška %d za vreme izvršavanja komande 'ROLLBACK'"
+ spa "Obtenido error %d durante ROLLBACK"
+ swe "Fick fel %d vid ROLLBACK"
+ ukr "Отримано помилку %d під Ñ‡Ð°Ñ ROLLBACK"
+ER_ERROR_DURING_FLUSH_LOGS
+ cze "Chyba %d p-Bři FLUSH_LOGS"
+ dan "Modtog fejl %d mens kommandoen FLUSH_LOGS blev udført"
+ nla "Kreeg fout %d tijdens FLUSH_LOGS"
+ eng "Got error %d during FLUSH_LOGS"
+ est "Viga %d käsu FLUSH_LOGS täitmisel"
+ fre "Erreur %d lors du FLUSH_LOGS"
+ ger "Fehler %d bei FLUSH_LOGS"
+ hun "%d hiba a FLUSH_LOGS vegrehajtasa soran"
+ ita "Rilevato l'errore %d durante il FLUSH_LOGS"
+ por "Obteve erro %d durante FLUSH_LOGS"
+ rus "Получена ошибка %d в процеÑÑе FLUSH_LOGS"
+ serbian "Greška %d za vreme izvršavanja komande 'FLUSH_LOGS'"
+ spa "Obtenido error %d durante FLUSH_LOGS"
+ swe "Fick fel %d vid FLUSH_LOGS"
+ ukr "Отримано помилку %d під Ñ‡Ð°Ñ FLUSH_LOGS"
+ER_ERROR_DURING_CHECKPOINT
+ cze "Chyba %d p-Bři CHECKPOINT"
+ dan "Modtog fejl %d mens kommandoen CHECKPOINT blev udført"
+ nla "Kreeg fout %d tijdens CHECKPOINT"
+ eng "Got error %d during CHECKPOINT"
+ est "Viga %d käsu CHECKPOINT täitmisel"
+ fre "Erreur %d lors du CHECKPOINT"
+ ger "Fehler %d bei CHECKPOINT"
+ hun "%d hiba a CHECKPOINT vegrehajtasa soran"
+ ita "Rilevato l'errore %d durante il CHECKPOINT"
+ por "Obteve erro %d durante CHECKPOINT"
+ rus "Получена ошибка %d в процеÑÑе CHECKPOINT"
+ serbian "Greška %d za vreme izvršavanja komande 'CHECKPOINT'"
+ spa "Obtenido error %d durante CHECKPOINT"
+ swe "Fick fel %d vid CHECKPOINT"
+ ukr "Отримано помилку %d під Ñ‡Ð°Ñ CHECKPOINT"
+ER_NEW_ABORTING_CONNECTION 08S01
+ cze "Spojen-Bí %ld do databáze: '%-.192s' uživatel: '%-.48s' stroj: '%-.64s' (%-.64s) bylo přerušeno"
+ dan "Afbrød forbindelsen %ld til databasen '%-.192s' bruger: '%-.48s' vært: '%-.64s' (%-.64s)"
+ nla "Afgebroken verbinding %ld naar db: '%-.192s' gebruiker: '%-.48s' host: '%-.64s' (%-.64s)"
+ eng "Aborted connection %ld to db: '%-.192s' user: '%-.48s' host: '%-.64s' (%-.64s)"
+ est "Ãœhendus katkestatud %ld andmebaas: '%-.192s' kasutaja: '%-.48s' masin: '%-.64s' (%-.64s)"
+ fre "Connection %ld avortée vers la bd: '%-.192s' utilisateur: '%-.48s' hôte: '%-.64s' (%-.64s)"
+ ger "Abbruch der Verbindung %ld zur Datenbank '%-.192s'. Benutzer: '%-.48s', Host: '%-.64s' (%-.64s)"
+ ita "Interrotta la connessione %ld al db: ''%-.192s' utente: '%-.48s' host: '%-.64s' (%-.64s)"
+ por "Conexão %ld abortada para banco de dados '%-.192s' - usuário '%-.48s' - 'host' '%-.64s' ('%-.64s')"
+ rus "Прервано Ñоединение %ld к базе данных '%-.192s' Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ '%-.48s' Ñ Ñ…Ð¾Ñта '%-.64s' (%-.64s)"
+ serbian "Prekinuta konekcija broj %ld ka bazi: '%-.192s' korisnik je bio: '%-.48s' a host: '%-.64s' (%-.64s)"
+ spa "Abortada conexión %ld para db: '%-.192s' usuario: '%-.48s' servidor: '%-.64s' (%-.64s)"
+ swe "Avbröt länken för tråd %ld till db '%-.192s', användare '%-.48s', host '%-.64s' (%-.64s)"
+ ukr "Перервано з'Ñ”Ð´Ð½Ð°Ð½Ð½Ñ %ld до бази данних: '%-.192s' кориÑтувач: '%-.48s' хоÑÑ‚: '%-.64s' (%-.64s)"
+ER_DUMP_NOT_IMPLEMENTED
+ cze "Handler tabulky nepodporuje bin-Bární dump"
+ dan "Denne tabeltype unserstøtter ikke binært tabeldump"
+ nla "De 'handler' voor de tabel ondersteund geen binaire tabel dump"
+ eng "The storage engine for the table does not support binary table dump"
+ fre "Ce type de table ne supporte pas les copies binaires"
+ ger "Die Speicher-Engine für die Tabelle unterstützt keinen binären Tabellen-Dump"
+ ita "Il gestore per la tabella non supporta il dump binario"
+ jpn "The handler for the table does not support binary table dump"
+ por "O manipulador de tabela não suporta 'dump' binário de tabela"
+ rum "The handler for the table does not support binary table dump"
+ rus "Обработчик Ñтой таблицы не поддерживает двоичного ÑÐ¾Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ð¾Ð±Ñ€Ð°Ð·Ð° таблицы (dump)"
+ serbian "Handler tabele ne podržava binarni dump tabele"
+ spa "El manipulador de tabla no soporta dump para tabla binaria"
+ swe "Tabellhanteraren klarar inte en binär kopiering av tabellen"
+ ukr "Цей тип таблиці не підтримує бінарну передачу таблиці"
+ER_FLUSH_MASTER_BINLOG_CLOSED
+ eng "Binlog closed, cannot RESET MASTER"
+ ger "Binlog geschlossen. Kann RESET MASTER nicht ausführen"
+ por "Binlog fechado. Não pode fazer RESET MASTER"
+ rus "Двоичный журнал Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð·Ð°ÐºÑ€Ñ‹Ñ‚, невозможно выполнить RESET MASTER"
+ serbian "Binarni log file zatvoren, ne mogu da izvršim komandu 'RESET MASTER'"
+ ukr "Реплікаційний лог закрито, не можу виконати RESET MASTER"
+ER_INDEX_REBUILD
+ cze "P-Břebudování indexu dumpnuté tabulky '%-.192s' nebylo úspěšné"
+ dan "Kunne ikke genopbygge indekset for den dumpede tabel '%-.192s'"
+ nla "Gefaald tijdens heropbouw index van gedumpte tabel '%-.192s'"
+ eng "Failed rebuilding the index of dumped table '%-.192s'"
+ fre "La reconstruction de l'index de la table copiée '%-.192s' a échoué"
+ ger "Neuerstellung des Index der Dump-Tabelle '%-.192s' fehlgeschlagen"
+ greek "Failed rebuilding the index of dumped table '%-.192s'"
+ hun "Failed rebuilding the index of dumped table '%-.192s'"
+ ita "Fallita la ricostruzione dell'indice della tabella copiata '%-.192s'"
+ por "Falhou na reconstrução do índice da tabela 'dumped' '%-.192s'"
+ rus "Ошибка переÑтройки индекÑа Ñохраненной таблицы '%-.192s'"
+ serbian "Izgradnja indeksa dump-ovane tabele '%-.192s' nije uspela"
+ spa "Falla reconstruyendo el indice de la tabla dumped '%-.192s'"
+ ukr "Ðевдале Ð²Ñ–Ð´Ð½Ð¾Ð²Ð»ÐµÐ½Ð½Ñ Ñ–Ð½Ð´ÐµÐºÑа переданої таблиці '%-.192s'"
+ER_MASTER
+ cze "Chyba masteru: '%-.64s'"
+ dan "Fejl fra master: '%-.64s'"
+ nla "Fout van master: '%-.64s'"
+ eng "Error from master: '%-.64s'"
+ fre "Erreur reçue du maître: '%-.64s'"
+ ger "Fehler vom Master: '%-.64s'"
+ ita "Errore dal master: '%-.64s"
+ por "Erro no 'master' '%-.64s'"
+ rus "Ошибка от головного Ñервера: '%-.64s'"
+ serbian "Greška iz glavnog servera '%-.64s' u klasteru"
+ spa "Error del master: '%-.64s'"
+ swe "Fick en master: '%-.64s'"
+ ukr "Помилка від головного: '%-.64s'"
+ER_MASTER_NET_READ 08S01
+ cze "S-Bíťová chyba pÅ™i Ätení z masteru"
+ dan "Netværksfejl ved læsning fra master"
+ nla "Net fout tijdens lezen van master"
+ eng "Net error reading from master"
+ fre "Erreur de lecture réseau reçue du maître"
+ ger "Netzfehler beim Lesen vom Master"
+ ita "Errore di rete durante la ricezione dal master"
+ por "Erro de rede lendo do 'master'"
+ rus "Возникла ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð² процеÑÑе коммуникации Ñ Ð³Ð¾Ð»Ð¾Ð²Ð½Ñ‹Ð¼ Ñервером"
+ serbian "Greška u primanju mrežnih paketa sa glavnog servera u klasteru"
+ spa "Error de red leyendo del master"
+ swe "Fick nätverksfel vid läsning från master"
+ ukr "Мережева помилка Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ Ð²Ñ–Ð´ головного"
+ER_MASTER_NET_WRITE 08S01
+ cze "S-Bíťová chyba při zápisu na master"
+ dan "Netværksfejl ved skrivning til master"
+ nla "Net fout tijdens schrijven naar master"
+ eng "Net error writing to master"
+ fre "Erreur d'écriture réseau reçue du maître"
+ ger "Netzfehler beim Schreiben zum Master"
+ ita "Errore di rete durante l'invio al master"
+ por "Erro de rede gravando no 'master'"
+ rus "Возникла ошибка запиÑи в процеÑÑе коммуникации Ñ Ð³Ð¾Ð»Ð¾Ð²Ð½Ñ‹Ð¼ Ñервером"
+ serbian "Greška u slanju mrežnih paketa na glavni server u klasteru"
+ spa "Error de red escribiendo para el master"
+ swe "Fick nätverksfel vid skrivning till master"
+ ukr "Мережева помилка запиÑу до головного"
+ER_FT_MATCHING_KEY_NOT_FOUND
+ cze "-BŽádný sloupec nemá vytvořen fulltextový index"
+ dan "Kan ikke finde en FULLTEXT nøgle som svarer til kolonne listen"
+ nla "Kan geen FULLTEXT index vinden passend bij de kolom lijst"
+ eng "Can't find FULLTEXT index matching the column list"
+ est "Ei suutnud leida FULLTEXT indeksit, mis kattuks kasutatud tulpadega"
+ fre "Impossible de trouver un index FULLTEXT correspondant à cette liste de colonnes"
+ ger "Kann keinen FULLTEXT-Index finden, der der Feldliste entspricht"
+ ita "Impossibile trovare un indice FULLTEXT che corrisponda all'elenco delle colonne"
+ por "Não pode encontrar um índice para o texto todo que combine com a lista de colunas"
+ rus "Ðевозможно отыÑкать полнотекÑтовый (FULLTEXT) индекÑ, ÑоответÑтвующий ÑпиÑку Ñтолбцов"
+ serbian "Ne mogu da pronađem 'FULLTEXT' indeks koli odgovara listi kolona"
+ spa "No puedo encontrar índice FULLTEXT correspondiendo a la lista de columnas"
+ swe "Hittar inte ett FULLTEXT-index i kolumnlistan"
+ ukr "Ðе можу знайти FULLTEXT індекÑ, що відповідає переліку Ñтовбців"
+ER_LOCK_OR_ACTIVE_TRANSACTION
+ cze "Nemohu prov-Bést zadaný příkaz, protože existují aktivní zamÄené tabulky nebo aktivní transakce"
+ dan "Kan ikke udføre den givne kommando fordi der findes aktive, låste tabeller eller fordi der udføres en transaktion"
+ nla "Kan het gegeven commando niet uitvoeren, want u heeft actieve gelockte tabellen of een actieve transactie"
+ eng "Can't execute the given command because you have active locked tables or an active transaction"
+ est "Ei suuda täita antud käsku kuna on aktiivseid lukke või käimasolev transaktsioon"
+ fre "Impossible d'exécuter la commande car vous avez des tables verrouillées ou une transaction active"
+ ger "Kann den angegebenen Befehl wegen einer aktiven Tabellensperre oder einer aktiven Transaktion nicht ausführen"
+ ita "Impossibile eseguire il comando richiesto: tabelle sotto lock o transazione in atto"
+ por "Não pode executar o comando dado porque você tem tabelas ativas travadas ou uma transação ativa"
+ rus "Ðевозможно выполнить указанную команду, поÑкольку у Ð²Ð°Ñ Ð¿Ñ€Ð¸ÑутÑтвуют активно заблокированные таблица или Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð°Ñ Ñ‚Ñ€Ð°Ð½Ð·Ð°ÐºÑ†Ð¸Ñ"
+ serbian "Ne mogu da izvrÅ¡im datu komandu zbog toga Å¡to su tabele zakljuÄane ili je transakcija u toku"
+ spa "No puedo ejecutar el comando dado porque tienes tablas bloqueadas o una transición activa"
+ swe "Kan inte utföra kommandot emedan du har en låst tabell eller an aktiv transaktion"
+ ukr "Ðе можу виконати подану команду тому, що Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ Ð·Ð°Ð±Ð»Ð¾ÐºÐ¾Ð²Ð°Ð½Ð° або виконуєтьÑÑ Ñ‚Ñ€Ð°Ð½Ð·Ð°ÐºÑ†Ñ–Ñ"
+ER_UNKNOWN_SYSTEM_VARIABLE
+ cze "Nezn-Bámá systémová proměnná '%-.64s'"
+ dan "Ukendt systemvariabel '%-.64s'"
+ nla "Onbekende systeem variabele '%-.64s'"
+ eng "Unknown system variable '%-.64s'"
+ est "Tundmatu süsteemne muutuja '%-.64s'"
+ fre "Variable système '%-.64s' inconnue"
+ ger "Unbekannte Systemvariable '%-.64s'"
+ ita "Variabile di sistema '%-.64s' sconosciuta"
+ por "Variável de sistema '%-.64s' desconhecida"
+ rus "ÐеизвеÑÑ‚Ð½Ð°Ñ ÑиÑÑ‚ÐµÐ¼Ð½Ð°Ñ Ð¿ÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s'"
+ serbian "Nepoznata sistemska promenljiva '%-.64s'"
+ spa "Desconocida variable de sistema '%-.64s'"
+ swe "Okänd systemvariabel: '%-.64s'"
+ ukr "Ðевідома ÑиÑтемна змінна '%-.64s'"
+ER_CRASHED_ON_USAGE
+ cze "Tabulka '%-.192s' je ozna-BÄena jako poruÅ¡ená a mÄ›la by být opravena"
+ dan "Tabellen '%-.192s' er markeret med fejl og bør repareres"
+ nla "Tabel '%-.192s' staat als gecrashed gemarkeerd en dient te worden gerepareerd"
+ eng "Table '%-.192s' is marked as crashed and should be repaired"
+ est "Tabel '%-.192s' on märgitud vigaseks ja tuleb parandada"
+ fre "La table '%-.192s' est marquée 'crashed' et devrait être réparée"
+ ger "Tabelle '%-.192s' ist als defekt markiert und sollte repariert werden"
+ ita "La tabella '%-.192s' e` segnalata come corrotta e deve essere riparata"
+ por "Tabela '%-.192s' está marcada como danificada e deve ser reparada"
+ rus "Таблица '%-.192s' помечена как иÑÐ¿Ð¾Ñ€Ñ‡ÐµÐ½Ð½Ð°Ñ Ð¸ должна пройти проверку и ремонт"
+ serbian "Tabela '%-.192s' je markirana kao oštećena i trebala bi biti popravljena"
+ spa "Tabla '%-.192s' está marcada como crashed y debe ser reparada"
+ swe "Tabell '%-.192s' är trasig och bör repareras med REPAIR TABLE"
+ ukr "Таблицю '%-.192s' марковано Ñк зіпÑовану та Ñ—Ñ— потрібно відновити"
+ER_CRASHED_ON_REPAIR
+ cze "Tabulka '%-.192s' je ozna-BÄena jako poruÅ¡ená a poslední (automatická?) oprava se nezdaÅ™ila"
+ dan "Tabellen '%-.192s' er markeret med fejl og sidste (automatiske?) REPAIR fejlede"
+ nla "Tabel '%-.192s' staat als gecrashed gemarkeerd en de laatste (automatische?) reparatie poging mislukte"
+ eng "Table '%-.192s' is marked as crashed and last (automatic?) repair failed"
+ est "Tabel '%-.192s' on märgitud vigaseks ja viimane (automaatne?) parandus ebaõnnestus"
+ fre "La table '%-.192s' est marquée 'crashed' et le dernier 'repair' a échoué"
+ ger "Tabelle '%-.192s' ist als defekt markiert und der letzte (automatische?) Reparaturversuch schlug fehl"
+ ita "La tabella '%-.192s' e` segnalata come corrotta e l'ultima ricostruzione (automatica?) e` fallita"
+ por "Tabela '%-.192s' está marcada como danificada e a última reparação (automática?) falhou"
+ rus "Таблица '%-.192s' помечена как иÑÐ¿Ð¾Ñ€Ñ‡ÐµÐ½Ð½Ð°Ñ Ð¸ поÑледний (автоматичеÑкий?) ремонт не был уÑпешным"
+ serbian "Tabela '%-.192s' je markirana kao oštećena, a zadnja (automatska?) popravka je bila neuspela"
+ spa "Tabla '%-.192s' está marcada como crashed y la última reparación (automactica?) falló"
+ swe "Tabell '%-.192s' är trasig och senast (automatiska?) reparation misslyckades"
+ ukr "Таблицю '%-.192s' марковано Ñк зіпÑовану та оÑтаннє (автоматичне?) Ð²Ñ–Ð´Ð½Ð¾Ð²Ð»ÐµÐ½Ð½Ñ Ð½Ðµ вдалоÑÑ"
+ER_WARNING_NOT_COMPLETE_ROLLBACK
+ dan "Advarsel: Visse data i tabeller der ikke understøtter transaktioner kunne ikke tilbagestilles"
+ nla "Waarschuwing: Roll back mislukt voor sommige buiten transacties gewijzigde tabellen"
+ eng "Some non-transactional changed tables couldn't be rolled back"
+ est "Hoiatus: mõnesid transaktsioone mittetoetavaid tabeleid ei suudetud tagasi kerida"
+ fre "Attention: certaines tables ne supportant pas les transactions ont été changées et elles ne pourront pas être restituées"
+ ger "Änderungen an einigen nicht transaktionalen Tabellen konnten nicht zurückgerollt werden"
+ ita "Attenzione: Alcune delle modifiche alle tabelle non transazionali non possono essere ripristinate (roll back impossibile)"
+ por "Aviso: Algumas tabelas não-transacionais alteradas não puderam ser reconstituídas (rolled back)"
+ rus "Внимание: по некоторым измененным нетранзакционным таблицам невозможно будет произвеÑти откат транзакции"
+ serbian "Upozorenje: Neke izmenjene tabele ne podržavaju komandu 'ROLLBACK'"
+ spa "Aviso: Algunas tablas no transancionales no pueden tener rolled back"
+ swe "Warning: Några icke transaktionella tabeller kunde inte återställas vid ROLLBACK"
+ ukr "ЗаÑтереженнÑ: ДеÑкі нетранзакційні зміни таблиць не можна буде повернути"
+ER_TRANS_CACHE_FULL
+ dan "Fler-udtryks transaktion krævede mere plads en 'max_binlog_cache_size' bytes. Forhøj værdien af denne variabel og prøv igen"
+ nla "Multi-statement transactie vereist meer dan 'max_binlog_cache_size' bytes opslag. Verhoog deze mysqld variabele en probeer opnieuw"
+ eng "Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again"
+ est "Mitme lausendiga transaktsioon nõudis rohkem ruumi kui lubatud 'max_binlog_cache_size' muutujaga. Suurenda muutuja väärtust ja proovi uuesti"
+ fre "Cette transaction à commandes multiples nécessite plus de 'max_binlog_cache_size' octets de stockage, augmentez cette variable de mysqld et réessayez"
+ ger "Transaktionen, die aus mehreren Befehlen bestehen, benötigten mehr als 'max_binlog_cache_size' Bytes an Speicher. Btte vergrössern Sie diese Server-Variable versuchen Sie es noch einmal"
+ ita "La transazione a comandi multipli (multi-statement) ha richiesto piu` di 'max_binlog_cache_size' bytes di disco: aumentare questa variabile di mysqld e riprovare"
+ por "Transações multi-declaradas (multi-statement transactions) requeriram mais do que o valor limite (max_binlog_cache_size) de bytes para armazenagem. Aumente o valor desta variável do mysqld e tente novamente"
+ rus "Транзакции, включающей большое количеÑтво команд, потребовалоÑÑŒ более чем 'max_binlog_cache_size' байт. Увеличьте Ñту переменную Ñервера mysqld и попробуйте еще раз"
+ spa "Multipla transición necesita mas que 'max_binlog_cache_size' bytes de almacenamiento. Aumente esta variable mysqld y tente de nuevo"
+ swe "Transaktionen krävde mera än 'max_binlog_cache_size' minne. Öka denna mysqld-variabel och försök på nytt"
+ ukr "Ð¢Ñ€Ð°Ð½Ð·Ð°ÐºÑ†Ñ–Ñ Ð· багатьма виразами вимагає більше ніж 'max_binlog_cache_size' байтів Ð´Ð»Ñ Ð·Ð±ÐµÑ€Ñ–Ð³Ð°Ð½Ð½Ñ. Збільште цю змінну mysqld та Ñпробуйте знову"
+ER_SLAVE_MUST_STOP
+ dan "Denne handling kunne ikke udføres med kørende slave, brug først kommandoen STOP SLAVE"
+ nla "Deze operatie kan niet worden uitgevoerd met een actieve slave, doe eerst STOP SLAVE"
+ eng "This operation cannot be performed with a running slave; run STOP SLAVE first"
+ fre "Cette opération ne peut être réalisée avec un esclave actif, faites STOP SLAVE d'abord"
+ ger "Diese Operation kann bei einem aktiven Slave nicht durchgeführt werden. Bitte zuerst STOP SLAVE ausführen"
+ ita "Questa operazione non puo' essere eseguita con un database 'slave' che gira, lanciare prima STOP SLAVE"
+ por "Esta operação não pode ser realizada com um 'slave' em execução. Execute STOP SLAVE primeiro"
+ rus "Эту операцию невозможно выполнить при работающем потоке подчиненного Ñервера. Сначала выполните STOP SLAVE"
+ serbian "Ova operacija ne može biti izvršena dok je aktivan podređeni server. Zadajte prvo komandu 'STOP SLAVE' da zaustavite podređeni server."
+ spa "Esta operación no puede ser hecha con el esclavo funcionando, primero use STOP SLAVE"
+ swe "Denna operation kan inte göras under replikering; Gör STOP SLAVE först"
+ ukr "ÐžÐ¿ÐµÑ€Ð°Ñ†Ñ–Ñ Ð½Ðµ може бути виконана з запущеним підлеглим, Ñпочатку виконайте STOP SLAVE"
+ER_SLAVE_NOT_RUNNING
+ dan "Denne handling kræver en kørende slave. Konfigurer en slave og brug kommandoen START SLAVE"
+ nla "Deze operatie vereist een actieve slave, configureer slave en doe dan START SLAVE"
+ eng "This operation requires a running slave; configure slave and do START SLAVE"
+ fre "Cette opération nécessite un esclave actif, configurez les esclaves et faites START SLAVE"
+ ger "Diese Operation benötigt einen aktiven Slave. Bitte Slave konfigurieren und mittels START SLAVE aktivieren"
+ ita "Questa operaione richiede un database 'slave', configurarlo ed eseguire START SLAVE"
+ por "Esta operação requer um 'slave' em execução. Configure o 'slave' e execute START SLAVE"
+ rus "Ð”Ð»Ñ Ñтой операции требуетÑÑ Ñ€Ð°Ð±Ð¾Ñ‚Ð°ÑŽÑ‰Ð¸Ð¹ подчиненный Ñервер. Сначала выполните START SLAVE"
+ serbian "Ova operacija zahteva da je aktivan podređeni server. Konfigurišite prvo podređeni server i onda izvršite komandu 'START SLAVE'"
+ spa "Esta operación necesita el esclavo funcionando, configure esclavo y haga el START SLAVE"
+ swe "Denna operation kan endast göras under replikering; Konfigurera slaven och gör START SLAVE"
+ ukr "ÐžÐ¿ÐµÑ€Ð°Ñ†Ñ–Ñ Ð²Ð¸Ð¼Ð°Ð³Ð°Ñ” запущеного підлеглого, зконфігуруйте підлеглого та виконайте START SLAVE"
+ER_BAD_SLAVE
+ dan "Denne server er ikke konfigureret som slave. Ret in config-filen eller brug kommandoen CHANGE MASTER TO"
+ nla "De server is niet geconfigureerd als slave, fix in configuratie bestand of met CHANGE MASTER TO"
+ eng "The server is not configured as slave; fix in config file or with CHANGE MASTER TO"
+ fre "Le server n'est pas configuré comme un esclave, changez le fichier de configuration ou utilisez CHANGE MASTER TO"
+ ger "Der Server ist nicht als Slave konfiguriert. Bitte in der Konfigurationsdatei oder mittels CHANGE MASTER TO beheben"
+ ita "Il server non e' configurato come 'slave', correggere il file di configurazione cambiando CHANGE MASTER TO"
+ por "O servidor não está configurado como 'slave'. Acerte o arquivo de configuração ou use CHANGE MASTER TO"
+ rus "Этот Ñервер не наÑтроен как подчиненный. ВнеÑите иÑÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð² конфигурационном файле или Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ CHANGE MASTER TO"
+ serbian "Server nije konfigurisan kao podređeni server, ispravite konfiguracioni file ili na njemu izvršite komandu 'CHANGE MASTER TO'"
+ spa "El servidor no está configurado como esclavo, edite el archivo config file o con CHANGE MASTER TO"
+ swe "Servern är inte konfigurerade som en replikationsslav. Ändra konfigurationsfilen eller gör CHANGE MASTER TO"
+ ukr "Сервер не зконфігуровано Ñк підлеглий, виправте це у файлі конфігурації або з CHANGE MASTER TO"
+ER_MASTER_INFO
+ eng "Could not initialize master info structure; more error messages can be found in the MySQL error log"
+ fre "Impossible d'initialiser les structures d'information de maître, vous trouverez des messages d'erreur supplémentaires dans le journal des erreurs de MySQL"
+ ger "Konnte Master-Info-Struktur nicht initialisieren. Weitere Fehlermeldungen können im MySQL-Error-Log eingesehen werden"
+ serbian "Nisam mogao da inicijalizujem informacionu strukturu glavnog servera, proverite da li imam privilegije potrebne za pristup file-u 'master.info'"
+ swe "Kunde inte initialisera replikationsstrukturerna. See MySQL fel fil för mera information"
+ER_SLAVE_THREAD
+ dan "Kunne ikke danne en slave-tråd; check systemressourcerne"
+ nla "Kon slave thread niet aanmaken, controleer systeem resources"
+ eng "Could not create slave thread; check system resources"
+ fre "Impossible de créer une tâche esclave, vérifiez les ressources système"
+ ger "Konnte Slave-Thread nicht starten. Bitte System-Ressourcen überprüfen"
+ ita "Impossibile creare il thread 'slave', controllare le risorse di sistema"
+ por "Não conseguiu criar 'thread' de 'slave'. Verifique os recursos do sistema"
+ rus "Ðевозможно Ñоздать поток подчиненного Ñервера. Проверьте ÑиÑтемные реÑурÑÑ‹"
+ serbian "Nisam mogao da startujem thread za podređeni server, proverite sistemske resurse"
+ spa "No puedo crear el thread esclavo, verifique recursos del sistema"
+ swe "Kunde inte starta en tråd för replikering"
+ ukr "Ðе можу Ñтворити підлеглу гілку, перевірте ÑиÑтемні реÑурÑи"
+ER_TOO_MANY_USER_CONNECTIONS 42000
+ dan "Brugeren %-.64s har allerede mere end 'max_user_connections' aktive forbindelser"
+ nla "Gebruiker %-.64s heeft reeds meer dan 'max_user_connections' actieve verbindingen"
+ eng "User %-.64s already has more than 'max_user_connections' active connections"
+ est "Kasutajal %-.64s on juba rohkem ühendusi kui lubatud 'max_user_connections' muutujaga"
+ fre "L'utilisateur %-.64s possède déjà plus de 'max_user_connections' connexions actives"
+ ger "Benutzer '%-.64s' hat mehr als 'max_user_connections' aktive Verbindungen"
+ ita "L'utente %-.64s ha gia' piu' di 'max_user_connections' connessioni attive"
+ por "Usuário '%-.64s' já possui mais que o valor máximo de conexões (max_user_connections) ativas"
+ rus "У Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %-.64s уже больше чем 'max_user_connections' активных Ñоединений"
+ serbian "Korisnik %-.64s već ima više aktivnih konekcija nego što je to određeno 'max_user_connections' promenljivom"
+ spa "Usario %-.64s ya tiene mas que 'max_user_connections' conexiones activas"
+ swe "Användare '%-.64s' har redan 'max_user_connections' aktiva inloggningar"
+ ukr "КориÑтувач %-.64s вже має більше ніж 'max_user_connections' активних з'єднань"
+ER_SET_CONSTANTS_ONLY
+ dan "Du må kun bruge konstantudtryk med SET"
+ nla "U mag alleen constante expressies gebruiken bij SET"
+ eng "You may only use constant expressions with SET"
+ est "Ainult konstantsed suurused on lubatud SET klauslis"
+ fre "Seules les expressions constantes sont autorisées avec SET"
+ ger "Bei SET dürfen nur konstante Ausdrücke verwendet werden"
+ ita "Si possono usare solo espressioni costanti con SET"
+ por "Você pode usar apenas expressões constantes com SET"
+ rus "Ð’Ñ‹ можете иÑпользовать в SET только конÑтантные выражениÑ"
+ serbian "Možete upotrebiti samo konstantan iskaz sa komandom 'SET'"
+ spa "Tu solo debes usar expresiones constantes con SET"
+ swe "Man kan endast använda konstantuttryck med SET"
+ ukr "Можна викориÑтовувати лише вирази зі Ñталими у SET"
+ER_LOCK_WAIT_TIMEOUT
+ dan "Lock wait timeout overskredet"
+ nla "Lock wacht tijd overschreden"
+ eng "Lock wait timeout exceeded; try restarting transaction"
+ est "Kontrollaeg ületatud luku järel ootamisel; Proovi transaktsiooni otsast alata"
+ fre "Timeout sur l'obtention du verrou"
+ ger "Beim Warten auf eine Sperre wurde die zulässige Wartezeit überschritten. Bitte versuchen Sie, die Transaktion neu zu starten"
+ ita "E' scaduto il timeout per l'attesa del lock"
+ por "Tempo de espera (timeout) de travamento excedido. Tente reiniciar a transação."
+ rus "Таймаут Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²ÐºÐ¸ иÑтек; попробуйте перезапуÑтить транзакцию"
+ serbian "Vremenski limit za zakljuÄavanje tabele je istekao; Probajte da ponovo startujete transakciju"
+ spa "Tiempo de bloqueo de espera excedido"
+ swe "Fick inte ett lås i tid ; Försök att starta om transaktionen"
+ ukr "Затримку Ð¾Ñ‡Ñ–ÐºÑƒÐ²Ð°Ð½Ð½Ñ Ð±Ð»Ð¾ÐºÑƒÐ²Ð°Ð½Ð½Ñ Ð²Ð¸Ñ‡ÐµÑ€Ð¿Ð°Ð½Ð¾"
+ER_LOCK_TABLE_FULL
+ dan "Det totale antal låse overstiger størrelsen på låse-tabellen"
+ nla "Het totale aantal locks overschrijdt de lock tabel grootte"
+ eng "The total number of locks exceeds the lock table size"
+ est "Lukkude koguarv ületab lukutabeli suuruse"
+ fre "Le nombre total de verrou dépasse la taille de la table des verrous"
+ ger "Die Gesamtzahl der Sperren überschreitet die Größe der Sperrtabelle"
+ ita "Il numero totale di lock e' maggiore della grandezza della tabella di lock"
+ por "O número total de travamentos excede o tamanho da tabela de travamentos"
+ rus "Общее количеÑтво блокировок превыÑило размеры таблицы блокировок"
+ serbian "Broj totalnih zakljuÄavanja tabele premaÅ¡uje veliÄinu tabele zakljuÄavanja"
+ spa "El número total de bloqueos excede el tamaño de bloqueo de la tabla"
+ swe "Antal lås överskrider antalet reserverade lås"
+ ukr "Загальна кількіÑÑ‚ÑŒ блокувань перевищила розмір блокувань Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ–"
+ER_READ_ONLY_TRANSACTION 25000
+ dan "Update lås kan ikke opnås under en READ UNCOMMITTED transaktion"
+ nla "Update locks kunnen niet worden verkregen tijdens een READ UNCOMMITTED transactie"
+ eng "Update locks cannot be acquired during a READ UNCOMMITTED transaction"
+ est "Uuenduslukke ei saa kasutada READ UNCOMMITTED transaktsiooni käigus"
+ fre "Un verrou en update ne peut être acquit pendant une transaction READ UNCOMMITTED"
+ ger "Während einer READ-UNCOMMITTED-Transaktion können keine UPDATE-Sperren angefordert werden"
+ ita "I lock di aggiornamento non possono essere acquisiti durante una transazione 'READ UNCOMMITTED'"
+ por "Travamentos de atualização não podem ser obtidos durante uma transação de tipo READ UNCOMMITTED"
+ rus "Блокировки обновлений Ð½ÐµÐ»ÑŒÐ·Ñ Ð¿Ð¾Ð»ÑƒÑ‡Ð¸Ñ‚ÑŒ в процеÑÑе Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð½Ðµ принÑтой (в режиме READ UNCOMMITTED) транзакции"
+ serbian "ZakljuÄavanja izmena ne mogu biti realizovana sve dok traje 'READ UNCOMMITTED' transakcija"
+ spa "Bloqueos de actualización no pueden ser adqueridos durante una transición READ UNCOMMITTED"
+ swe "Updateringslås kan inte göras när man använder READ UNCOMMITTED"
+ ukr "Оновити Ð±Ð»Ð¾ÐºÑƒÐ²Ð°Ð½Ð½Ñ Ð½Ðµ можливо на протÑзі транзакції READ UNCOMMITTED"
+ER_DROP_DB_WITH_READ_LOCK
+ dan "DROP DATABASE er ikke tilladt mens en tråd holder på globalt read lock"
+ nla "DROP DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit"
+ eng "DROP DATABASE not allowed while thread is holding global read lock"
+ est "DROP DATABASE ei ole lubatud kui lõim omab globaalset READ lukku"
+ fre "DROP DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture"
+ ger "DROP DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält"
+ ita "DROP DATABASE non e' permesso mentre il thread ha un lock globale di lettura"
+ por "DROP DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura"
+ rus "Ðе допуÑкаетÑÑ DROP DATABASE, пока поток держит глобальную блокировку чтениÑ"
+ serbian "Komanda 'DROP DATABASE' nije dozvoljena dok thread globalno zakljuÄava Äitanje podataka"
+ spa "DROP DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global"
+ swe "DROP DATABASE är inte tillåtet när man har ett globalt läslås"
+ ukr "DROP DATABASE не дозволено доки гілка перебуває під загальним блокуваннÑм читаннÑ"
+ER_CREATE_DB_WITH_READ_LOCK
+ dan "CREATE DATABASE er ikke tilladt mens en tråd holder på globalt read lock"
+ nla "CREATE DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit"
+ eng "CREATE DATABASE not allowed while thread is holding global read lock"
+ est "CREATE DATABASE ei ole lubatud kui lõim omab globaalset READ lukku"
+ fre "CREATE DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture"
+ ger "CREATE DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält"
+ ita "CREATE DATABASE non e' permesso mentre il thread ha un lock globale di lettura"
+ por "CREATE DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura"
+ rus "Ðе допуÑкаетÑÑ CREATE DATABASE, пока поток держит глобальную блокировку чтениÑ"
+ serbian "Komanda 'CREATE DATABASE' nije dozvoljena dok thread globalno zakljuÄava Äitanje podataka"
+ spa "CREATE DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global"
+ swe "CREATE DATABASE är inte tillåtet när man har ett globalt läslås"
+ ukr "CREATE DATABASE не дозволено доки гілка перебуває під загальним блокуваннÑм читаннÑ"
+ER_WRONG_ARGUMENTS
+ nla "Foutieve parameters voor %s"
+ eng "Incorrect arguments to %s"
+ est "Vigased parameetrid %s-le"
+ fre "Mauvais arguments à %s"
+ ger "Falsche Argumente für %s"
+ ita "Argomenti errati a %s"
+ por "Argumentos errados para %s"
+ rus "Ðеверные параметры Ð´Ð»Ñ %s"
+ serbian "Pogrešni argumenti prosleđeni na %s"
+ spa "Argumentos errados para %s"
+ swe "Felaktiga argument till %s"
+ ukr "Хибний аргумент Ð´Ð»Ñ %s"
+ER_NO_PERMISSION_TO_CREATE_USER 42000
+ nla "'%-.48s'@'%-.64s' mag geen nieuwe gebruikers creeren"
+ eng "'%-.48s'@'%-.64s' is not allowed to create new users"
+ est "Kasutajal '%-.48s'@'%-.64s' ei ole lubatud luua uusi kasutajaid"
+ fre "'%-.48s'@'%-.64s' n'est pas autorisé à créer de nouveaux utilisateurs"
+ ger "'%-.48s'@'%-.64s' ist nicht berechtigt, neue Benutzer hinzuzufügen"
+ ita "A '%-.48s'@'%-.64s' non e' permesso creare nuovi utenti"
+ por "Não é permitido a '%-.48s'@'%-.64s' criar novos usuários"
+ rus "'%-.48s'@'%-.64s' не разрешаетÑÑ Ñоздавать новых пользователей"
+ serbian "Korisniku '%-.48s'@'%-.64s' nije dozvoljeno da kreira nove korisnike"
+ spa "'%-.48s`@`%-.64s` no es permitido para crear nuevos usuarios"
+ swe "'%-.48s'@'%-.64s' har inte rättighet att skapa nya användare"
+ ukr "КориÑтувачу '%-.48s'@'%-.64s' не дозволено Ñтворювати нових кориÑтувачів"
+ER_UNION_TABLES_IN_DIFFERENT_DIR
+ nla "Incorrecte tabel definitie; alle MERGE tabellen moeten tot dezelfde database behoren"
+ eng "Incorrect table definition; all MERGE tables must be in the same database"
+ est "Vigane tabelimääratlus; kõik MERGE tabeli liikmed peavad asuma samas andmebaasis"
+ fre "Définition de table incorrecte; toutes les tables MERGE doivent être dans la même base de donnée"
+ ger "Falsche Tabellendefinition. Alle MERGE-Tabellen müssen sich in derselben Datenbank befinden"
+ ita "Definizione della tabella errata; tutte le tabelle di tipo MERGE devono essere nello stesso database"
+ por "Definição incorreta da tabela. Todas as tabelas contidas na junção devem estar no mesmo banco de dados."
+ rus "Ðеверное определение таблицы; Ð’Ñе таблицы в MERGE должны принадлежать одной и той же базе данных"
+ serbian "Pogrešna definicija tabele; sve 'MERGE' tabele moraju biti u istoj bazi podataka"
+ spa "Incorrecta definición de la tabla; Todas las tablas MERGE deben estar en el mismo banco de datos"
+ swe "Felaktig tabelldefinition; alla tabeller i en MERGE-tabell måste vara i samma databas"
+ER_LOCK_DEADLOCK 40001
+ nla "Deadlock gevonden tijdens lock-aanvraag poging; Probeer herstart van de transactie"
+ eng "Deadlock found when trying to get lock; try restarting transaction"
+ est "Lukustamisel tekkis tupik (deadlock); alusta transaktsiooni otsast"
+ fre "Deadlock découvert en essayant d'obtenir les verrous : essayez de redémarrer la transaction"
+ ger "Beim Versuch, eine Sperre anzufordern, ist ein Deadlock aufgetreten. Versuchen Sie, die Transaktion neu zu starten"
+ ita "Trovato deadlock durante il lock; Provare a far ripartire la transazione"
+ por "Encontrado um travamento fatal (deadlock) quando tentava obter uma trava. Tente reiniciar a transação."
+ rus "Возникла Ñ‚ÑƒÐ¿Ð¸ÐºÐ¾Ð²Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð² процеÑÑе Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²ÐºÐ¸; Попробуйте перезапуÑтить транзакцию"
+ serbian "Unakrsno zakljuÄavanje pronaÄ‘eno kada sam pokuÅ¡ao da dobijem pravo na zakljuÄavanje; Probajte da restartujete transakciju"
+ spa "Encontrado deadlock cuando tentando obtener el bloqueo; Tente recomenzar la transición"
+ swe "Fick 'DEADLOCK' vid låsförsök av block/rad. Försök att starta om transaktionen"
+ER_TABLE_CANT_HANDLE_FT
+ nla "Het gebruikte tabel type ondersteund geen FULLTEXT indexen"
+ eng "The used table type doesn't support FULLTEXT indexes"
+ est "Antud tabelitüüp ei toeta FULLTEXT indekseid"
+ fre "Le type de table utilisé ne supporte pas les index FULLTEXT"
+ ger "Der verwendete Tabellentyp unterstützt keine FULLTEXT-Indizes"
+ ita "La tabella usata non supporta gli indici FULLTEXT"
+ por "O tipo de tabela utilizado não suporta índices de texto completo (fulltext indexes)"
+ rus "ИÑпользуемый тип таблиц не поддерживает полнотекÑтовых индекÑов"
+ serbian "Upotrebljeni tip tabele ne podržava 'FULLTEXT' indekse"
+ spa "El tipo de tabla usada no soporta índices FULLTEXT"
+ swe "Tabelltypen har inte hantering av FULLTEXT-index"
+ ukr "ВикориÑтаний тип таблиці не підтримує FULLTEXT індекÑів"
+ER_CANNOT_ADD_FOREIGN
+ nla "Kan foreign key beperking niet toevoegen"
+ eng "Cannot add foreign key constraint"
+ fre "Impossible d'ajouter des contraintes d'index externe"
+ ger "Fremdschlüssel-Beschränkung kann nicht hinzugefügt werden"
+ ita "Impossibile aggiungere il vincolo di integrita' referenziale (foreign key constraint)"
+ por "Não pode acrescentar uma restrição de chave estrangeira"
+ rus "Ðевозможно добавить Ð¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð¸Ñ Ð²Ð½ÐµÑˆÐ½ÐµÐ³Ð¾ ключа"
+ serbian "Ne mogu da dodam proveru spoljnog kljuÄa"
+ spa "No puede adicionar clave extranjera constraint"
+ swe "Kan inte lägga till 'FOREIGN KEY constraint'"
+ER_NO_REFERENCED_ROW 23000
+ nla "Kan onderliggende rij niet toevoegen: foreign key beperking gefaald"
+ eng "Cannot add or update a child row: a foreign key constraint fails"
+ fre "Impossible d'ajouter un enregistrement fils : une constrainte externe l'empèche"
+ ger "Hinzufügen oder Aktualisieren eines Kind-Datensatzes schlug aufgrund einer Fremdschlüssel-Beschränkung fehl"
+ greek "Cannot add a child row: a foreign key constraint fails"
+ hun "Cannot add a child row: a foreign key constraint fails"
+ ita "Impossibile aggiungere la riga: un vincolo d'integrita' referenziale non e' soddisfatto"
+ norwegian-ny "Cannot add a child row: a foreign key constraint fails"
+ por "Não pode acrescentar uma linha filha: uma restrição de chave estrangeira falhou"
+ rus "Ðевозможно добавить или обновить дочернюю Ñтроку: проверка ограничений внешнего ключа не выполнÑетÑÑ"
+ spa "No puede adicionar una línea hijo: falla de clave extranjera constraint"
+ swe "FOREIGN KEY-konflikt: Kan inte skriva barn"
+ER_ROW_IS_REFERENCED 23000
+ eng "Cannot delete or update a parent row: a foreign key constraint fails"
+ fre "Impossible de supprimer un enregistrement père : une constrainte externe l'empèche"
+ ger "Löschen oder Aktualisieren eines Eltern-Datensatzes schlug aufgrund einer Fremdschlüssel-Beschränkung fehl"
+ greek "Cannot delete a parent row: a foreign key constraint fails"
+ hun "Cannot delete a parent row: a foreign key constraint fails"
+ ita "Impossibile cancellare la riga: un vincolo d'integrita' referenziale non e' soddisfatto"
+ por "Não pode apagar uma linha pai: uma restrição de chave estrangeira falhou"
+ rus "Ðевозможно удалить или обновить родительÑкую Ñтроку: проверка ограничений внешнего ключа не выполнÑетÑÑ"
+ serbian "Ne mogu da izbriÅ¡em roditeljski slog: provera spoljnog kljuÄa je neuspela"
+ spa "No puede deletar una línea padre: falla de clave extranjera constraint"
+ swe "FOREIGN KEY-konflikt: Kan inte radera fader"
+ER_CONNECT_TO_MASTER 08S01
+ nla "Fout bij opbouwen verbinding naar master: %-.128s"
+ eng "Error connecting to master: %-.128s"
+ ger "Fehler bei der Verbindung zum Master: %-.128s"
+ ita "Errore durante la connessione al master: %-.128s"
+ por "Erro conectando com o master: %-.128s"
+ rus "Ошибка ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ Ð³Ð¾Ð»Ð¾Ð²Ð½Ñ‹Ð¼ Ñервером: %-.128s"
+ spa "Error de coneccion a master: %-.128s"
+ swe "Fick fel vid anslutning till master: %-.128s"
+ER_QUERY_ON_MASTER
+ nla "Fout bij uitvoeren query op master: %-.128s"
+ eng "Error running query on master: %-.128s"
+ ger "Beim Ausführen einer Abfrage auf dem Master trat ein Fehler auf: %-.128s"
+ ita "Errore eseguendo una query sul master: %-.128s"
+ por "Erro rodando consulta no master: %-.128s"
+ rus "Ошибка Ð²Ñ‹Ð¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа на головном Ñервере: %-.128s"
+ spa "Error executando el query en master: %-.128s"
+ swe "Fick fel vid utförande av command på mastern: %-.128s"
+ER_ERROR_WHEN_EXECUTING_COMMAND
+ nla "Fout tijdens uitvoeren van commando %s: %-.128s"
+ eng "Error when executing command %s: %-.128s"
+ est "Viga käsu %s täitmisel: %-.128s"
+ ger "Fehler beim Ausführen des Befehls %s: %-.128s"
+ ita "Errore durante l'esecuzione del comando %s: %-.128s"
+ por "Erro quando executando comando %s: %-.128s"
+ rus "Ошибка при выполнении команды %s: %-.128s"
+ serbian "Greška pri izvršavanju komande %s: %-.128s"
+ spa "Error de %s: %-.128s"
+ swe "Fick fel vid utförande av %s: %-.128s"
+ER_WRONG_USAGE
+ nla "Foutief gebruik van %s en %s"
+ eng "Incorrect usage of %s and %s"
+ est "Vigane %s ja %s kasutus"
+ ger "Falsche Verwendung von %s und %s"
+ ita "Uso errato di %s e %s"
+ por "Uso errado de %s e %s"
+ rus "Ðеверное иÑпользование %s и %s"
+ serbian "Pogrešna upotreba %s i %s"
+ spa "Equivocado uso de %s y %s"
+ swe "Felaktig använding av %s and %s"
+ ukr "Wrong usage of %s and %s"
+ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT 21000
+ nla "De gebruikte SELECT commando's hebben een verschillend aantal kolommen"
+ eng "The used SELECT statements have a different number of columns"
+ est "Tulpade arv kasutatud SELECT lausetes ei kattu"
+ ger "Die verwendeten SELECT-Befehle liefern unterschiedliche Anzahlen von Feldern zurück"
+ ita "La SELECT utilizzata ha un numero di colonne differente"
+ por "Os comandos SELECT usados têm diferente número de colunas"
+ rus "ИÑпользованные операторы выборки (SELECT) дают разное количеÑтво Ñтолбцов"
+ serbian "Upotrebljene 'SELECT' komande adresiraju razliÄit broj kolona"
+ spa "El comando SELECT usado tiene diferente número de columnas"
+ swe "SELECT-kommandona har olika antal kolumner"
+ER_CANT_UPDATE_WITH_READLOCK
+ nla "Kan de query niet uitvoeren vanwege een conflicterende read lock"
+ eng "Can't execute the query because you have a conflicting read lock"
+ est "Ei suuda täita päringut konfliktse luku tõttu"
+ ger "Augrund eines READ-LOCK-Konflikts kann die Abfrage nicht ausgeführt werden"
+ ita "Impossibile eseguire la query perche' c'e' un conflitto con in lock di lettura"
+ por "Não posso executar a consulta porque você tem um conflito de travamento de leitura"
+ rus "Ðевозможно иÑполнить запроÑ, поÑкольку у Ð²Ð°Ñ ÑƒÑтановлены конфликтующие блокировки чтениÑ"
+ serbian "Ne mogu da izvrÅ¡im upit zbog toga Å¡to imate zakljuÄavanja Äitanja podataka u konfliktu"
+ spa "No puedo ejecutar el query porque usted tiene conflicto de traba de lectura"
+ swe "Kan inte utföra kommandot emedan du har ett READ-lås"
+ER_MIXING_NOT_ALLOWED
+ nla "Het combineren van transactionele en niet-transactionele tabellen is uitgeschakeld."
+ eng "Mixing of transactional and non-transactional tables is disabled"
+ est "Transaktsioone toetavate ning mittetoetavate tabelite kooskasutamine ei ole lubatud"
+ ger "Die gleichzeitige Verwendung von Tabellen mit und ohne Transaktionsunterstützung ist deaktiviert"
+ ita "E' disabilitata la possibilita' di mischiare tabelle transazionali e non-transazionali"
+ por "Mistura de tabelas transacional e não-transacional está desabilitada"
+ rus "ИÑпользование транзакционных таблиц нарÑду Ñ Ð½ÐµÑ‚Ñ€Ð°Ð½Ð·Ð°ÐºÑ†Ð¸Ð¾Ð½Ð½Ñ‹Ð¼Ð¸ запрещено"
+ serbian "MeÅ¡anje tabela koje podržavaju transakcije i onih koje ne podržavaju transakcije je iskljuÄeno"
+ spa "Mezla de transancional y no-transancional tablas está deshabilitada"
+ swe "Blandning av transaktionella och icke-transaktionella tabeller är inaktiverat"
+ER_DUP_ARGUMENT
+ nla "Optie '%s' tweemaal gebruikt in opdracht"
+ eng "Option '%s' used twice in statement"
+ est "Määrangut '%s' on lauses kasutatud topelt"
+ ger "Option '%s' wird im Befehl zweimal verwendet"
+ ita "L'opzione '%s' e' stata usata due volte nel comando"
+ por "Opção '%s' usada duas vezes no comando"
+ rus "ÐžÐ¿Ñ†Ð¸Ñ '%s' дважды иÑпользована в выражении"
+ spa "Opción '%s' usada dos veces en el comando"
+ swe "Option '%s' användes två gånger"
+ER_USER_LIMIT_REACHED 42000
+ nla "Gebruiker '%-.64s' heeft het maximale gebruik van de '%s' faciliteit overschreden (huidige waarde: %ld)"
+ eng "User '%-.64s' has exceeded the '%s' resource (current value: %ld)"
+ ger "Benutzer '%-.64s' hat die Ressourcenbeschränkung '%s' überschritten (aktueller Wert: %ld)"
+ ita "L'utente '%-.64s' ha ecceduto la risorsa '%s' (valore corrente: %ld)"
+ por "Usuário '%-.64s' tem excedido o '%s' recurso (atual valor: %ld)"
+ rus "Пользователь '%-.64s' превыÑил иÑпользование реÑурÑа '%s' (текущее значение: %ld)"
+ spa "Usuario '%-.64s' ha excedido el recurso '%s' (actual valor: %ld)"
+ swe "Användare '%-.64s' har överskridit '%s' (nuvarande värde: %ld)"
+ER_SPECIFIC_ACCESS_DENIED_ERROR 42000
+ nla "Toegang geweigerd. U moet het %-.128s privilege hebben voor deze operatie"
+ eng "Access denied; you need (at least one of) the %-.128s privilege(s) for this operation"
+ ger "Kein Zugriff. Hierfür wird die Berechtigung %-.128s benötigt"
+ ita "Accesso non consentito. Serve il privilegio %-.128s per questa operazione"
+ por "Acesso negado. Você precisa o privilégio %-.128s para essa operação"
+ rus "Ð’ доÑтупе отказано. Вам нужны привилегии %-.128s Ð´Ð»Ñ Ñтой операции"
+ spa "Acceso negado. Usted necesita el privilegio %-.128s para esta operación"
+ swe "Du har inte privlegiet '%-.128s' som behövs för denna operation"
+ ukr "Access denied. You need the %-.128s privilege for this operation"
+ER_LOCAL_VARIABLE
+ nla "Variabele '%-.64s' is SESSION en kan niet worden gebruikt met SET GLOBAL"
+ eng "Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL"
+ ger "Variable '%-.64s' ist eine lokale Variable und kann nicht mit SET GLOBAL verändert werden"
+ ita "La variabile '%-.64s' e' una variabile locale ( SESSION ) e non puo' essere cambiata usando SET GLOBAL"
+ por "Variável '%-.64s' é uma SESSION variável e não pode ser usada com SET GLOBAL"
+ rus "ÐŸÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s' ÑвлÑетÑÑ Ð¿Ð¾Ñ‚Ð¾ÐºÐ¾Ð²Ð¾Ð¹ (SESSION) переменной и не может быть изменена Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ SET GLOBAL"
+ spa "Variable '%-.64s' es una SESSION variable y no puede ser usada con SET GLOBAL"
+ swe "Variabel '%-.64s' är en SESSION variabel och kan inte ändrad med SET GLOBAL"
+ER_GLOBAL_VARIABLE
+ nla "Variabele '%-.64s' is GLOBAL en dient te worden gewijzigd met SET GLOBAL"
+ eng "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL"
+ ger "Variable '%-.64s' ist eine globale Variable und muss mit SET GLOBAL verändert werden"
+ ita "La variabile '%-.64s' e' una variabile globale ( GLOBAL ) e deve essere cambiata usando SET GLOBAL"
+ por "Variável '%-.64s' é uma GLOBAL variável e deve ser configurada com SET GLOBAL"
+ rus "ÐŸÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s' ÑвлÑетÑÑ Ð³Ð»Ð¾Ð±Ð°Ð»ÑŒÐ½Ð¾Ð¹ (GLOBAL) переменной, и ее Ñледует изменÑÑ‚ÑŒ Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ SET GLOBAL"
+ spa "Variable '%-.64s' es una GLOBAL variable y no puede ser configurada con SET GLOBAL"
+ swe "Variabel '%-.64s' är en GLOBAL variabel och bör sättas med SET GLOBAL"
+ER_NO_DEFAULT 42000
+ nla "Variabele '%-.64s' heeft geen standaard waarde"
+ eng "Variable '%-.64s' doesn't have a default value"
+ ger "Variable '%-.64s' hat keinen Vorgabewert"
+ ita "La variabile '%-.64s' non ha un valore di default"
+ por "Variável '%-.64s' não tem um valor padrão"
+ rus "ÐŸÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s' не имеет Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¿Ð¾ умолчанию"
+ spa "Variable '%-.64s' no tiene un valor patrón"
+ swe "Variabel '%-.64s' har inte ett DEFAULT-värde"
+ER_WRONG_VALUE_FOR_VAR 42000
+ nla "Variabele '%-.64s' kan niet worden gewijzigd naar de waarde '%-.200s'"
+ eng "Variable '%-.64s' can't be set to the value of '%-.200s'"
+ ger "Variable '%-.64s' kann nicht auf '%-.200s' gesetzt werden"
+ ita "Alla variabile '%-.64s' non puo' essere assegato il valore '%-.200s'"
+ por "Variável '%-.64s' não pode ser configurada para o valor de '%-.200s'"
+ rus "ÐŸÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s' не может быть уÑтановлена в значение '%-.200s'"
+ spa "Variable '%-.64s' no puede ser configurada para el valor de '%-.200s'"
+ swe "Variabel '%-.64s' kan inte sättas till '%-.200s'"
+ER_WRONG_TYPE_FOR_VAR 42000
+ nla "Foutief argumenttype voor variabele '%-.64s'"
+ eng "Incorrect argument type to variable '%-.64s'"
+ ger "Falscher Argumenttyp für Variable '%-.64s'"
+ ita "Tipo di valore errato per la variabile '%-.64s'"
+ por "Tipo errado de argumento para variável '%-.64s'"
+ rus "Ðеверный тип аргумента Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð¾Ð¹ '%-.64s'"
+ spa "Tipo de argumento equivocado para variable '%-.64s'"
+ swe "Fel typ av argument till variabel '%-.64s'"
+ER_VAR_CANT_BE_READ
+ nla "Variabele '%-.64s' kan alleen worden gewijzigd, niet gelezen"
+ eng "Variable '%-.64s' can only be set, not read"
+ ger "Variable '%-.64s' kann nur verändert, nicht gelesen werden"
+ ita "Alla variabile '%-.64s' e' di sola scrittura quindi puo' essere solo assegnato un valore, non letto"
+ por "Variável '%-.64s' somente pode ser configurada, não lida"
+ rus "ÐŸÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s' может быть только уÑтановлена, но не Ñчитана"
+ spa "Variable '%-.64s' solamente puede ser configurada, no leída"
+ swe "Variabeln '%-.64s' kan endast sättas, inte läsas"
+ER_CANT_USE_OPTION_HERE 42000
+ nla "Foutieve toepassing/plaatsing van '%s'"
+ eng "Incorrect usage/placement of '%s'"
+ ger "Falsche Verwendung oder Platzierung von '%s'"
+ ita "Uso/posizione di '%s' sbagliato"
+ por "Errado uso/colocação de '%s'"
+ rus "Ðеверное иÑпользование или в неверном меÑте указан '%s'"
+ spa "Equivocado uso/colocación de '%s'"
+ swe "Fel använding/placering av '%s'"
+ER_NOT_SUPPORTED_YET 42000
+ nla "Deze versie van MySQL ondersteunt nog geen '%s'"
+ eng "This version of MySQL doesn't yet support '%s'"
+ ger "Diese MySQL-Version unterstützt '%s' nicht"
+ ita "Questa versione di MySQL non supporta ancora '%s'"
+ por "Esta versão de MySQL não suporta ainda '%s'"
+ rus "Эта верÑÐ¸Ñ MySQL пока еще не поддерживает '%s'"
+ spa "Esta versión de MySQL no soporta todavia '%s'"
+ swe "Denna version av MySQL kan ännu inte utföra '%s'"
+ER_MASTER_FATAL_ERROR_READING_BINLOG
+ nla "Kreeg fatale fout %d: '%-.128s' van master tijdens lezen van data uit binaire log"
+ eng "Got fatal error %d from master when reading data from binary log: '%-.128s'"
+ ger "Schwerer Fehler %d: '%-.128s vom Master beim Lesen des binären Logs"
+ ita "Errore fatale %d: '%-.128s' dal master leggendo i dati dal log binario"
+ por "Obteve fatal erro %d: '%-.128s' do master quando lendo dados do binary log"
+ rus "Получена неиÑÐ¿Ñ€Ð°Ð²Ð¸Ð¼Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° %d: '%-.128s' от головного Ñервера в процеÑÑе выборки данных из двоичного журнала"
+ spa "Recibió fatal error %d: '%-.128s' del master cuando leyendo datos del binary log"
+ swe "Fick fatalt fel %d: '%-.128s' från master vid läsning av binärloggen"
+ER_SLAVE_IGNORED_TABLE
+ eng "Slave SQL thread ignored the query because of replicate-*-table rules"
+ ger "Slave-SQL-Thread hat die Abfrage aufgrund von replicate-*-table-Regeln ignoriert"
+ nla "Slave SQL thread negeerde de query vanwege replicate-*-table opties"
+ por "Slave SQL thread ignorado a consulta devido às normas de replicação-*-tabela"
+ spa "Slave SQL thread ignorado el query debido a las reglas de replicación-*-tabla"
+ swe "Slav SQL tråden ignorerade frågan pga en replicate-*-table regel"
+ER_INCORRECT_GLOBAL_LOCAL_VAR
+ eng "Variable '%-.192s' is a %s variable"
+ serbian "Promenljiva '%-.192s' je %s promenljiva"
+ ger "Variable '%-.192s' ist eine %s-Variable"
+ nla "Variabele '%-.192s' is geen %s variabele"
+ spa "Variable '%-.192s' es una %s variable"
+ swe "Variabel '%-.192s' är av typ %s"
+ER_WRONG_FK_DEF 42000
+ eng "Incorrect foreign key definition for '%-.192s': %s"
+ ger "Falsche Fremdschlüssel-Definition für '%-.192s': %s"
+ nla "Incorrecte foreign key definitie voor '%-.192s': %s"
+ por "Definição errada da chave estrangeira para '%-.192s': %s"
+ spa "Equivocada definición de llave extranjera para '%-.192s': %s"
+ swe "Felaktig FOREIGN KEY-definition för '%-.192s': %s"
+ER_KEY_REF_DO_NOT_MATCH_TABLE_REF
+ eng "Key reference and table reference don't match"
+ ger "Schlüssel- und Tabellenverweis passen nicht zusammen"
+ nla "Sleutel- en tabelreferentie komen niet overeen"
+ por "Referência da chave e referência da tabela não coincidem"
+ spa "Referencia de llave y referencia de tabla no coinciden"
+ swe "Nyckelreferensen och tabellreferensen stämmer inte överens"
+ER_OPERAND_COLUMNS 21000
+ eng "Operand should contain %d column(s)"
+ ger "Operand sollte %d Spalte(n) enthalten"
+ nla "Operand behoort %d kolommen te bevatten"
+ rus "Операнд должен Ñодержать %d колонок"
+ spa "Operando debe tener %d columna(s)"
+ ukr "Операнд має ÑкладатиÑÑ Ð· %d Ñтовбців"
+ER_SUBQUERY_NO_1_ROW 21000
+ eng "Subquery returns more than 1 row"
+ ger "Unterabfrage lieferte mehr als einen Datensatz zurück"
+ nla "Subquery retourneert meer dan 1 rij"
+ por "Subconsulta retorna mais que 1 registro"
+ rus "ÐŸÐ¾Ð´Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð²Ð¾Ð·Ð²Ñ€Ð°Ñ‰Ð°ÐµÑ‚ более одной запиÑи"
+ spa "Subconsulta retorna mas que 1 línea"
+ swe "Subquery returnerade mer än 1 rad"
+ ukr "Підзапит повертає більш нiж 1 запиÑ"
+ER_UNKNOWN_STMT_HANDLER
+ dan "Unknown prepared statement handler (%.*s) given to %s"
+ eng "Unknown prepared statement handler (%.*s) given to %s"
+ ger "Unbekannter Prepared-Statement-Handler (%.*s) für %s angegeben"
+ nla "Onebekende prepared statement handler (%.*s) voor %s aangegeven"
+ por "Desconhecido manipulador de declaração preparado (%.*s) determinado para %s"
+ spa "Desconocido preparado comando handler (%.*s) dado para %s"
+ swe "Okänd PREPARED STATEMENT id (%.*s) var given till %s"
+ ukr "Unknown prepared statement handler (%.*s) given to %s"
+ER_CORRUPT_HELP_DB
+ eng "Help database is corrupt or does not exist"
+ ger "Die Hilfe-Datenbank ist beschädigt oder existiert nicht"
+ nla "Help database is beschadigd of bestaat niet"
+ por "Banco de dado de ajuda corrupto ou não existente"
+ spa "Base de datos Help está corrupto o no existe"
+ swe "Hjälpdatabasen finns inte eller är skadad"
+ER_CYCLIC_REFERENCE
+ eng "Cyclic reference on subqueries"
+ ger "Zyklischer Verweis in Unterabfragen"
+ nla "Cyclische verwijzing in subqueries"
+ por "Referência cíclica em subconsultas"
+ rus "ЦикличеÑÐºÐ°Ñ ÑÑылка на подзапроÑ"
+ spa "Cíclica referencia en subconsultas"
+ swe "Cyklisk referens i subqueries"
+ ukr "Циклічне поÑÐ¸Ð»Ð°Ð½Ð½Ñ Ð½Ð° підзапит"
+ER_AUTO_CONVERT
+ eng "Converting column '%s' from %s to %s"
+ ger "Feld '%s' wird von %s nach %s umgewandelt"
+ nla "Veld '%s' wordt van %s naar %s geconverteerd"
+ por "Convertendo coluna '%s' de %s para %s"
+ rus "Преобразование Ð¿Ð¾Ð»Ñ '%s' из %s в %s"
+ spa "Convirtiendo columna '%s' de %s para %s"
+ swe "Konvertar kolumn '%s' från %s till %s"
+ ukr "ÐŸÐµÑ€ÐµÑ‚Ð²Ð¾Ñ€ÐµÐ½Ð½Ñ Ñтовбца '%s' з %s у %s"
+ER_ILLEGAL_REFERENCE 42S22
+ eng "Reference '%-.64s' not supported (%s)"
+ ger "Verweis '%-.64s' wird nicht unterstützt (%s)"
+ nla "Verwijzing '%-.64s' niet ondersteund (%s)"
+ por "Referência '%-.64s' não suportada (%s)"
+ rus "СÑылка '%-.64s' не поддерживаетÑÑ (%s)"
+ spa "Referencia '%-.64s' no soportada (%s)"
+ swe "Referens '%-.64s' stöds inte (%s)"
+ ukr "ПоÑÐ¸Ð»Ð°Ð½Ð½Ñ '%-.64s' не пiдтримуетÑÑ (%s)"
+ER_DERIVED_MUST_HAVE_ALIAS 42000
+ eng "Every derived table must have its own alias"
+ ger "Für jede abgeleitete Tabelle muss ein eigener Alias angegeben werden"
+ nla "Voor elke afgeleide tabel moet een unieke alias worden gebruikt"
+ por "Cada tabela derivada deve ter seu próprio alias"
+ spa "Cada tabla derivada debe tener su propio alias"
+ swe "Varje 'derived table' måste ha sitt eget alias"
+ER_SELECT_REDUCED 01000
+ eng "Select %u was reduced during optimization"
+ ger "Select %u wurde während der Optimierung reduziert"
+ nla "Select %u werd geredureerd tijdens optimtalisatie"
+ por "Select %u foi reduzido durante otimização"
+ rus "Select %u был упразднен в процеÑÑе оптимизации"
+ spa "Select %u fué reducido durante optimización"
+ swe "Select %u reducerades vid optimiering"
+ ukr "Select %u was ÑкаÑовано при оптимiзацii"
+ER_TABLENAME_NOT_ALLOWED_HERE 42000
+ eng "Table '%-.192s' from one of the SELECTs cannot be used in %-.32s"
+ ger "Tabelle '%-.192s', die in einem der SELECT-Befehle verwendet wurde, kann nicht in %-.32s verwendet werden"
+ nla "Tabel '%-.192s' uit een van de SELECTS kan niet in %-.32s gebruikt worden"
+ por "Tabela '%-.192s' de um dos SELECTs não pode ser usada em %-.32s"
+ spa "Tabla '%-.192s' de uno de los SELECT no puede ser usada en %-.32s"
+ swe "Tabell '%-.192s' från en SELECT kan inte användas i %-.32s"
+ER_NOT_SUPPORTED_AUTH_MODE 08004
+ eng "Client does not support authentication protocol requested by server; consider upgrading MySQL client"
+ ger "Client unterstützt das vom Server erwartete Authentifizierungsprotokoll nicht. Bitte aktualisieren Sie Ihren MySQL-Client"
+ nla "Client ondersteunt het door de server verwachtte authenticatieprotocol niet. Overweeg een nieuwere MySQL client te gebruiken"
+ por "Cliente não suporta o protocolo de autenticação exigido pelo servidor; considere a atualização do cliente MySQL"
+ spa "Cliente no soporta protocolo de autenticación solicitado por el servidor; considere actualizar el cliente MySQL"
+ swe "Klienten stöder inte autentiseringsprotokollet som begärts av servern; överväg uppgradering av klientprogrammet."
+ER_SPATIAL_CANT_HAVE_NULL 42000
+ eng "All parts of a SPATIAL index must be NOT NULL"
+ ger "Alle Teile eines SPATIAL-Index müssen als NOT NULL deklariert sein"
+ nla "Alle delete van een SPATIAL index dienen als NOT NULL gedeclareerd te worden"
+ por "Todas as partes de uma SPATIAL index devem ser NOT NULL"
+ spa "Todas las partes de una SPATIAL index deben ser NOT NULL"
+ swe "Alla delar av en SPATIAL index måste vara NOT NULL"
+ER_COLLATION_CHARSET_MISMATCH 42000
+ eng "COLLATION '%s' is not valid for CHARACTER SET '%s'"
+ ger "COLLATION '%s' ist für CHARACTER SET '%s' ungültig"
+ nla "COLLATION '%s' is niet geldig voor CHARACTER SET '%s'"
+ por "COLLATION '%s' não é válida para CHARACTER SET '%s'"
+ spa "COLLATION '%s' no es válido para CHARACTER SET '%s'"
+ swe "COLLATION '%s' är inte tillåtet för CHARACTER SET '%s'"
+ER_SLAVE_WAS_RUNNING
+ eng "Slave is already running"
+ ger "Slave läuft bereits"
+ nla "Slave is reeds actief"
+ por "O slave já está rodando"
+ spa "Slave ya está funcionando"
+ swe "Slaven har redan startat"
+ER_SLAVE_WAS_NOT_RUNNING
+ eng "Slave already has been stopped"
+ ger "Slave wurde bereits angehalten"
+ nla "Slave is reeds gestopt"
+ por "O slave já está parado"
+ spa "Slave ya fué parado"
+ swe "Slaven har redan stoppat"
+ER_TOO_BIG_FOR_UNCOMPRESS
+ eng "Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)"
+ ger "Unkomprimierte Daten sind zu groß. Die maximale Größe beträgt %d (wahrscheinlich wurde die Länge der unkomprimierten Daten beschädigt)"
+ nla "Ongecomprimeerder data is te groot; de maximum lengte is %d (waarschijnlijk, de lengte van de gecomprimeerde data was beschadigd)"
+ por "Tamanho muito grande dos dados des comprimidos. O máximo tamanho é %d. (provavelmente, o comprimento dos dados descomprimidos está corrupto)"
+ spa "Tamaño demasiado grande para datos descomprimidos. El máximo tamaño es %d. (probablemente, extensión de datos descomprimidos fué corrompida)"
+ER_ZLIB_Z_MEM_ERROR
+ eng "ZLIB: Not enough memory"
+ ger "ZLIB: Nicht genug Speicher"
+ nla "ZLIB: Onvoldoende geheugen"
+ por "ZLIB: Não suficiente memória disponível"
+ spa "Z_MEM_ERROR: No suficiente memoria para zlib"
+ER_ZLIB_Z_BUF_ERROR
+ eng "ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)"
+ ger "ZLIB: Im Ausgabepuffer ist nicht genug Platz vorhanden (wahrscheinlich wurde die Länge der unkomprimierten Daten beschädigt)"
+ nla "ZLIB: Onvoldoende ruimte in uitgaande buffer (waarschijnlijk, de lengte van de ongecomprimeerde data was beschadigd)"
+ por "ZLIB: Não suficiente espaço no buffer emissor (provavelmente, o comprimento dos dados descomprimidos está corrupto)"
+ spa "Z_BUF_ERROR: No suficiente espacio en el búfer de salida para zlib (probablemente, extensión de datos descomprimidos fué corrompida)"
+ER_ZLIB_Z_DATA_ERROR
+ eng "ZLIB: Input data corrupted"
+ ger "ZLIB: Eingabedaten beschädigt"
+ nla "ZLIB: Invoer data beschadigd"
+ por "ZLIB: Dados de entrada está corrupto"
+ spa "ZLIB: Dato de entrada fué corrompido para zlib"
+ER_CUT_VALUE_GROUP_CONCAT
+ eng "Row %u was cut by GROUP_CONCAT()"
+ER_WARN_TOO_FEW_RECORDS 01000
+ eng "Row %ld doesn't contain data for all columns"
+ ger "Zeile %ld enthält nicht für alle Felder Daten"
+ nla "Rij %ld bevat niet de data voor alle kolommen"
+ por "Conta de registro é menor que a conta de coluna na linha %ld"
+ spa "Línea %ld no contiene datos para todas las columnas"
+ER_WARN_TOO_MANY_RECORDS 01000
+ eng "Row %ld was truncated; it contained more data than there were input columns"
+ ger "Zeile %ld gekürzt, die Zeile enthielt mehr Daten, als es Eingabefelder gibt"
+ nla "Regel %ld ingekort, bevatte meer data dan invoer kolommen"
+ por "Conta de registro é maior que a conta de coluna na linha %ld"
+ spa "Línea %ld fué truncada; La misma contine mas datos que las que existen en las columnas de entrada"
+ER_WARN_NULL_TO_NOTNULL 22004
+ eng "Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld"
+ ger "Feld auf Vorgabewert gesetzt, da NULL für NOT-NULL-Feld '%s' in Zeile %ld angegeben"
+ por "Dado truncado, NULL fornecido para NOT NULL coluna '%s' na linha %ld"
+ spa "Datos truncado, NULL suministrado para NOT NULL columna '%s' en la línea %ld"
+ER_WARN_DATA_OUT_OF_RANGE 22003
+ eng "Out of range value for column '%s' at row %ld"
+WARN_DATA_TRUNCATED 01000
+ eng "Data truncated for column '%s' at row %ld"
+ ger "Daten abgeschnitten für Feld '%s' in Zeile %ld"
+ por "Dado truncado para coluna '%s' na linha %ld"
+ spa "Datos truncados para columna '%s' en la línea %ld"
+ER_WARN_USING_OTHER_HANDLER
+ eng "Using storage engine %s for table '%s'"
+ ger "Für Tabelle '%s' wird Speicher-Engine %s benutzt"
+ por "Usando engine de armazenamento %s para tabela '%s'"
+ spa "Usando motor de almacenamiento %s para tabla '%s'"
+ swe "Använder handler %s för tabell '%s'"
+ER_CANT_AGGREGATE_2COLLATIONS
+ eng "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'"
+ ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s) und (%s, %s) für Operation '%s'"
+ por "Combinação ilegal de collations (%s,%s) e (%s,%s) para operação '%s'"
+ spa "Ilegal mezcla de collations (%s,%s) y (%s,%s) para operación '%s'"
+ER_DROP_USER
+ eng "Cannot drop one or more of the requested users"
+ ger "Kann einen oder mehrere der angegebenen Benutzer nicht löschen"
+ER_REVOKE_GRANTS
+ eng "Can't revoke all privileges for one or more of the requested users"
+ ger "Kann nicht alle Berechtigungen widerrufen, die für einen oder mehrere Benutzer gewährt wurden"
+ por "Não pode revocar todos os privilégios, grant para um ou mais dos usuários pedidos"
+ spa "No puede revocar todos los privilegios, derecho para uno o mas de los usuarios solicitados"
+ER_CANT_AGGREGATE_3COLLATIONS
+ eng "Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'"
+ ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s), (%s, %s), (%s, %s) für Operation '%s'"
+ por "Ilegal combinação de collations (%s,%s), (%s,%s), (%s,%s) para operação '%s'"
+ spa "Ilegal mezcla de collations (%s,%s), (%s,%s), (%s,%s) para operación '%s'"
+ER_CANT_AGGREGATE_NCOLLATIONS
+ eng "Illegal mix of collations for operation '%s'"
+ ger "Unerlaubte Mischung von Sortierreihenfolgen für Operation '%s'"
+ por "Ilegal combinação de collations para operação '%s'"
+ spa "Ilegal mezcla de collations para operación '%s'"
+ER_VARIABLE_IS_NOT_STRUCT
+ eng "Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)"
+ ger "Variable '%-.64s' ist keine Variablen-Komponente (kann nicht als XXXX.variablen_name verwendet werden)"
+ por "Variável '%-.64s' não é uma variável componente (Não pode ser usada como XXXX.variável_nome)"
+ spa "Variable '%-.64s' no es una variable componente (No puede ser usada como XXXX.variable_name)"
+ER_UNKNOWN_COLLATION
+ eng "Unknown collation: '%-.64s'"
+ ger "Unbekannte Sortierreihenfolge: '%-.64s'"
+ por "Collation desconhecida: '%-.64s'"
+ spa "Collation desconocida: '%-.64s'"
+ER_SLAVE_IGNORED_SSL_PARAMS
+ eng "SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started"
+ ger "SSL-Parameter in CHANGE MASTER werden ignoriert, weil dieser MySQL-Slave ohne SSL-Unterstützung kompiliert wurde. Sie können aber später verwendet werden, wenn ein MySQL-Slave mit SSL gestartet wird"
+ por "SSL parâmetros em CHANGE MASTER são ignorados porque este escravo MySQL foi compilado sem o SSL suporte. Os mesmos podem ser usados mais tarde quando o escravo MySQL com SSL seja iniciado."
+ spa "Parametros SSL en CHANGE MASTER son ignorados porque este slave MySQL fue compilado sin soporte SSL; pueden ser usados despues cuando el slave MySQL con SSL sea inicializado"
+ER_SERVER_IS_IN_SECURE_AUTH_MODE
+ eng "Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format"
+ ger "Server läuft im Modus --secure-auth, aber '%s'@'%s' hat ein Passwort im alten Format. Bitte Passwort ins neue Format ändern"
+ por "Servidor está rodando em --secure-auth modo, porêm '%s'@'%s' tem senha no formato antigo; por favor troque a senha para o novo formato"
+ rus "Сервер запущен в режиме --secure-auth (безопаÑной авторизации), но Ð´Ð»Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ '%s'@'%s' пароль Ñохранён в Ñтаром формате; необходимо обновить формат паролÑ"
+ spa "Servidor está rodando en modo --secure-auth, pero '%s'@'%s' tiene clave en el antiguo formato; por favor cambie la clave para el nuevo formato"
+ER_WARN_FIELD_RESOLVED
+ eng "Field or reference '%-.192s%s%-.192s%s%-.192s' of SELECT #%d was resolved in SELECT #%d"
+ ger "Feld oder Verweis '%-.192s%s%-.192s%s%-.192s' im SELECT-Befehl Nr. %d wurde im SELECT-Befehl Nr. %d aufgelöst"
+ por "Campo ou referência '%-.192s%s%-.192s%s%-.192s' de SELECT #%d foi resolvido em SELECT #%d"
+ rus "Поле или ÑÑылка '%-.192s%s%-.192s%s%-.192s' из SELECTа #%d была найдена в SELECTе #%d"
+ spa "Campo o referencia '%-.192s%s%-.192s%s%-.192s' de SELECT #%d fue resolvido en SELECT #%d"
+ ukr "Стовбець або поÑÐ¸Ð»Ð°Ð½Ð½Ñ '%-.192s%s%-.192s%s%-.192s' із SELECTу #%d було знайдене у SELECTÑ– #%d"
+ER_BAD_SLAVE_UNTIL_COND
+ eng "Incorrect parameter or combination of parameters for START SLAVE UNTIL"
+ ger "Falscher Parameter oder falsche Kombination von Parametern für START SLAVE UNTIL"
+ por "Parâmetro ou combinação de parâmetros errado para START SLAVE UNTIL"
+ spa "Parametro equivocado o combinación de parametros para START SLAVE UNTIL"
+ER_MISSING_SKIP_SLAVE
+ eng "It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mysqld restart"
+ ger "Es wird empfohlen, mit --skip-slave-start zu starten, wenn mit START SLAVE UNTIL eine Schritt-für-Schritt-Replikation ausgeführt wird. Ansonsten gibt es Probleme, wenn ein Slave-Server unerwartet neu startet"
+ por "É recomendado para rodar com --skip-slave-start quando fazendo replicação passo-por-passo com START SLAVE UNTIL, de outra forma você não está seguro em caso de inesperada reinicialição do mysqld escravo"
+ spa "Es recomendado rodar con --skip-slave-start cuando haciendo replicación step-by-step con START SLAVE UNTIL, a menos que usted no esté seguro en caso de inesperada reinicialización del mysqld slave"
+ER_UNTIL_COND_IGNORED
+ eng "SQL thread is not to be started so UNTIL options are ignored"
+ ger "SQL-Thread soll nicht gestartet werden. Daher werden UNTIL-Optionen ignoriert"
+ por "Thread SQL não pode ser inicializado tal que opções UNTIL são ignoradas"
+ spa "SQL thread no es inicializado tal que opciones UNTIL son ignoradas"
+ER_WRONG_NAME_FOR_INDEX 42000
+ eng "Incorrect index name '%-.100s'"
+ ger "Falscher Indexname '%-.100s'"
+ por "Incorreto nome de índice '%-.100s'"
+ spa "Nombre de índice incorrecto '%-.100s'"
+ swe "Felaktigt index namn '%-.100s'"
+ER_WRONG_NAME_FOR_CATALOG 42000
+ eng "Incorrect catalog name '%-.100s'"
+ ger "Falscher Katalogname '%-.100s'"
+ por "Incorreto nome de catálogo '%-.100s'"
+ spa "Nombre de catalog incorrecto '%-.100s'"
+ swe "Felaktigt katalog namn '%-.100s'"
+ER_WARN_QC_RESIZE
+ eng "Query cache failed to set size %lu; new query cache size is %lu"
+ ger "Änderung der Query-Cache-Größe auf %lu fehlgeschlagen; neue Query-Cache-Größe ist %lu"
+ por "Falha em Query cache para configurar tamanho %lu, novo tamanho de query cache é %lu"
+ rus "Кеш запроÑов не может уÑтановить размер %lu, новый размер кеша зпроÑов - %lu"
+ spa "Query cache fallada para configurar tamaño %lu, nuevo tamaño de query cache es %lu"
+ swe "Storleken av "Query cache" kunde inte sättas till %lu, ny storlek är %lu"
+ ukr "Кеш запитів неÑпроможен вÑтановити розмір %lu, новий розмір кеша запитів - %lu"
+ER_BAD_FT_COLUMN
+ eng "Column '%-.192s' cannot be part of FULLTEXT index"
+ ger "Feld '%-.192s' kann nicht Teil eines FULLTEXT-Index sein"
+ por "Coluna '%-.192s' não pode ser parte de índice FULLTEXT"
+ spa "Columna '%-.192s' no puede ser parte de FULLTEXT index"
+ swe "Kolumn '%-.192s' kan inte vara del av ett FULLTEXT index"
+ER_UNKNOWN_KEY_CACHE
+ eng "Unknown key cache '%-.100s'"
+ ger "Unbekannter Schlüssel-Cache '%-.100s'"
+ por "Key cache desconhecida '%-.100s'"
+ spa "Desconocida key cache '%-.100s'"
+ swe "Okänd nyckel cache '%-.100s'"
+ER_WARN_HOSTNAME_WONT_WORK
+ eng "MySQL is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work"
+ ger "MySQL wurde mit --skip-name-resolve gestartet. Diese Option darf nicht verwendet werden, damit diese Rechtevergabe möglich ist"
+ por "MySQL foi inicializado em modo --skip-name-resolve. Você necesita reincializá-lo sem esta opção para este grant funcionar"
+ spa "MySQL esta inicializado en modo --skip-name-resolve. Usted necesita reinicializarlo sin esta opción para este derecho funcionar"
+ER_UNKNOWN_STORAGE_ENGINE 42000
+ eng "Unknown table engine '%s'"
+ ger "Unbekannte Speicher-Engine '%s'"
+ por "Motor de tabela desconhecido '%s'"
+ spa "Desconocido motor de tabla '%s'"
+# When using this error code, use ER(ER_WARN_DEPRECATED_SYNTAX_WITH_VER)
+# for the message string. See, for example, code in mysql_priv.h.
+ER_WARN_DEPRECATED_SYNTAX
+ eng "'%s' is deprecated; use '%s' instead"
+ ger "'%s' ist veraltet. Bitte benutzen Sie '%s'"
+ por "'%s' é desatualizado. Use '%s' em seu lugar"
+ spa "'%s' está desaprobado, use '%s' en su lugar"
+ER_NON_UPDATABLE_TABLE
+ eng "The target table %-.100s of the %s is not updatable"
+ ger "Die Zieltabelle %-.100s von %s ist nicht aktualisierbar"
+ por "A tabela destino %-.100s do %s não é atualizável"
+ rus "Таблица %-.100s в %s не может изменÑÑ‚ÑÑ"
+ spa "La tabla destino %-.100s del %s no es actualizable"
+ swe "Tabell %-.100s använd med '%s' är inte uppdateringsbar"
+ ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ %-.100s у %s не може оновлюватиÑÑŒ"
+ER_FEATURE_DISABLED
+ eng "The '%s' feature is disabled; you need MySQL built with '%s' to have it working"
+ ger "Das Feature '%s' ist ausgeschaltet, Sie müssen MySQL mit '%s' übersetzen, damit es verfügbar ist"
+ por "O recurso '%s' foi desativado; você necessita MySQL construído com '%s' para ter isto funcionando"
+ spa "El recurso '%s' fue deshabilitado; usted necesita construir MySQL con '%s' para tener eso funcionando"
+ swe "'%s' är inte aktiverad; För att aktivera detta måste du bygga om MySQL med '%s' definerad"
+ER_OPTION_PREVENTS_STATEMENT
+ eng "The MySQL server is running with the %s option so it cannot execute this statement"
+ ger "Der MySQL-Server läuft mit der Option %s und kann diese Anweisung deswegen nicht ausführen"
+ por "O servidor MySQL está rodando com a opção %s razão pela qual não pode executar esse commando"
+ spa "El servidor MySQL está rodando con la opción %s tal que no puede ejecutar este comando"
+ swe "MySQL är startad med %s. Pga av detta kan du inte använda detta kommando"
+ER_DUPLICATED_VALUE_IN_TYPE
+ eng "Column '%-.100s' has duplicated value '%-.64s' in %s"
+ ger "Feld '%-.100s' hat doppelten Wert '%-.64s' in %s"
+ por "Coluna '%-.100s' tem valor duplicado '%-.64s' em %s"
+ spa "Columna '%-.100s' tiene valor doblado '%-.64s' en %s"
+ER_TRUNCATED_WRONG_VALUE 22007
+ eng "Truncated incorrect %-.32s value: '%-.128s'"
+ ger "Falscher %-.32s-Wert gekürzt: '%-.128s'"
+ por "Truncado errado %-.32s valor: '%-.128s'"
+ spa "Equivocado truncado %-.32s valor: '%-.128s'"
+ER_TOO_MUCH_AUTO_TIMESTAMP_COLS
+ eng "Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause"
+ ger "Fehlerhafte Tabellendefinition. Es kann nur eine einzige TIMESTAMP-Spalte mit CURRENT_TIMESTAMP als DEFAULT oder in einer ON-UPDATE-Klausel geben"
+ por "Incorreta definição de tabela; Pode ter somente uma coluna TIMESTAMP com CURRENT_TIMESTAMP em DEFAULT ou ON UPDATE cláusula"
+ spa "Incorrecta definición de tabla; Solamente debe haber una columna TIMESTAMP con CURRENT_TIMESTAMP en DEFAULT o ON UPDATE cláusula"
+ER_INVALID_ON_UPDATE
+ eng "Invalid ON UPDATE clause for '%-.192s' column"
+ ger "Ungültige ON-UPDATE-Klausel für Spalte '%-.192s'"
+ por "Inválida cláusula ON UPDATE para campo '%-.192s'"
+ spa "Inválido ON UPDATE cláusula para campo '%-.192s'"
+ER_UNSUPPORTED_PS
+ eng "This command is not supported in the prepared statement protocol yet"
+ ger "Dieser Befehl wird im Protokoll für vorbereitete Anweisungen noch nicht unterstützt"
+ER_GET_ERRMSG
+ dan "Modtog fejl %d '%-.100s' fra %s"
+ eng "Got error %d '%-.100s' from %s"
+ ger "Fehler %d '%-.100s' von %s"
+ nor "Mottok feil %d '%-.100s' fa %s"
+ norwegian-ny "Mottok feil %d '%-.100s' fra %s"
+ER_GET_TEMPORARY_ERRMSG
+ dan "Modtog temporary fejl %d '%-.100s' fra %s"
+ eng "Got temporary error %d '%-.100s' from %s"
+ ger "Temporärer Fehler %d '%-.100s' von %s"
+ nor "Mottok temporary feil %d '%-.100s' fra %s"
+ norwegian-ny "Mottok temporary feil %d '%-.100s' fra %s"
+ER_UNKNOWN_TIME_ZONE
+ eng "Unknown or incorrect time zone: '%-.64s'"
+ ger "Unbekannte oder falsche Zeitzone: '%-.64s'"
+ER_WARN_INVALID_TIMESTAMP
+ eng "Invalid TIMESTAMP value in column '%s' at row %ld"
+ ger "Ungültiger TIMESTAMP-Wert in Feld '%s', Zeile %ld"
+ER_INVALID_CHARACTER_STRING
+ eng "Invalid %s character string: '%.64s'"
+ ger "Ungültiger %s-Zeichen-String: '%.64s'"
+ER_WARN_ALLOWED_PACKET_OVERFLOWED
+ eng "Result of %s() was larger than max_allowed_packet (%ld) - truncated"
+ ger "Ergebnis von %s() war größer als max_allowed_packet (%ld) Bytes und wurde deshalb gekürzt"
+ER_CONFLICTING_DECLARATIONS
+ eng "Conflicting declarations: '%s%s' and '%s%s'"
+ ger "Widersprüchliche Deklarationen: '%s%s' und '%s%s'"
+ER_SP_NO_RECURSIVE_CREATE 2F003
+ eng "Can't create a %s from within another stored routine"
+ ger "Kann kein %s innerhalb einer anderen gespeicherten Routine erzeugen"
+ER_SP_ALREADY_EXISTS 42000
+ eng "%s %s already exists"
+ ger "%s %s existiert bereits"
+ER_SP_DOES_NOT_EXIST 42000
+ eng "%s %s does not exist"
+ ger "%s %s existiert nicht"
+ER_SP_DROP_FAILED
+ eng "Failed to DROP %s %s"
+ ger "DROP %s %s ist fehlgeschlagen"
+ER_SP_STORE_FAILED
+ eng "Failed to CREATE %s %s"
+ ger "CREATE %s %s ist fehlgeschlagen"
+ER_SP_LILABEL_MISMATCH 42000
+ eng "%s with no matching label: %s"
+ ger "%s ohne passende Marke: %s"
+ER_SP_LABEL_REDEFINE 42000
+ eng "Redefining label %s"
+ ger "Neudefinition der Marke %s"
+ER_SP_LABEL_MISMATCH 42000
+ eng "End-label %s without match"
+ ger "Ende-Marke %s ohne zugehörigen Anfang"
+ER_SP_UNINIT_VAR 01000
+ eng "Referring to uninitialized variable %s"
+ ger "Zugriff auf nichtinitialisierte Variable %s"
+ER_SP_BADSELECT 0A000
+ eng "PROCEDURE %s can't return a result set in the given context"
+ ger "PROCEDURE %s kann im gegebenen Kontext keine Ergebnismenge zurückgeben"
+ER_SP_BADRETURN 42000
+ eng "RETURN is only allowed in a FUNCTION"
+ ger "RETURN ist nur innerhalb einer FUNCTION erlaubt"
+ER_SP_BADSTATEMENT 0A000
+ eng "%s is not allowed in stored procedures"
+ ger "%s ist in gespeicherten Prozeduren nicht erlaubt"
+ER_UPDATE_LOG_DEPRECATED_IGNORED 42000
+ eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored"
+ ger "Das Update-Log ist veraltet und wurde durch das Binär-Log ersetzt. SET SQL_LOG_UPDATE wird ignoriert"
+ER_UPDATE_LOG_DEPRECATED_TRANSLATED 42000
+ eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN"
+ ger "Das Update-Log ist veraltet und wurde durch das Binär-Log ersetzt. SET SQL_LOG_UPDATE wurde in SET SQL_LOG_BIN übersetzt"
+ER_QUERY_INTERRUPTED 70100
+ eng "Query execution was interrupted"
+ ger "Ausführung der Abfrage wurde unterbrochen"
+ER_SP_WRONG_NO_OF_ARGS 42000
+ eng "Incorrect number of arguments for %s %s; expected %u, got %u"
+ ger "Falsche Anzahl von Argumenten für %s %s; erwarte %u, erhalte %u"
+ER_SP_COND_MISMATCH 42000
+ eng "Undefined CONDITION: %s"
+ ger "Undefinierte CONDITION: %s"
+ER_SP_NORETURN 42000
+ eng "No RETURN found in FUNCTION %s"
+ ger "Kein RETURN in FUNCTION %s gefunden"
+ER_SP_NORETURNEND 2F005
+ eng "FUNCTION %s ended without RETURN"
+ ger "FUNCTION %s endete ohne RETURN"
+ER_SP_BAD_CURSOR_QUERY 42000
+ eng "Cursor statement must be a SELECT"
+ ger "Cursor-Anweisung muss ein SELECT sein"
+ER_SP_BAD_CURSOR_SELECT 42000
+ eng "Cursor SELECT must not have INTO"
+ ger "Cursor-SELECT darf kein INTO haben"
+ER_SP_CURSOR_MISMATCH 42000
+ eng "Undefined CURSOR: %s"
+ ger "Undefinierter CURSOR: %s"
+ER_SP_CURSOR_ALREADY_OPEN 24000
+ eng "Cursor is already open"
+ ger "Cursor ist schon geöffnet"
+ER_SP_CURSOR_NOT_OPEN 24000
+ eng "Cursor is not open"
+ ger "Cursor ist nicht geöffnet"
+ER_SP_UNDECLARED_VAR 42000
+ eng "Undeclared variable: %s"
+ ger "Nicht deklarierte Variable: %s"
+ER_SP_WRONG_NO_OF_FETCH_ARGS
+ eng "Incorrect number of FETCH variables"
+ ger "Falsche Anzahl von FETCH-Variablen"
+ER_SP_FETCH_NO_DATA 02000
+ eng "No data - zero rows fetched, selected, or processed"
+ ger "Keine Daten - null Zeilen geholt (fetch), ausgewählt oder verarbeitet"
+ER_SP_DUP_PARAM 42000
+ eng "Duplicate parameter: %s"
+ ger "Doppelter Parameter: %s"
+ER_SP_DUP_VAR 42000
+ eng "Duplicate variable: %s"
+ ger "Doppelte Variable: %s"
+ER_SP_DUP_COND 42000
+ eng "Duplicate condition: %s"
+ ger "Doppelte Bedingung: %s"
+ER_SP_DUP_CURS 42000
+ eng "Duplicate cursor: %s"
+ ger "Doppelter Cursor: %s"
+ER_SP_CANT_ALTER
+ eng "Failed to ALTER %s %s"
+ ger "ALTER %s %s fehlgeschlagen"
+ER_SP_SUBSELECT_NYI 0A000
+ eng "Subquery value not supported"
+ ger "Subquery-Wert wird nicht unterstützt"
+ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG 0A000
+ eng "%s is not allowed in stored function or trigger"
+ ger "%s ist in gespeicherten Funktionen und in Triggern nicht erlaubt"
+ER_SP_VARCOND_AFTER_CURSHNDLR 42000
+ eng "Variable or condition declaration after cursor or handler declaration"
+ ger "Deklaration einer Variablen oder einer Bedingung nach der Deklaration eines Cursors oder eines Handlers"
+ER_SP_CURSOR_AFTER_HANDLER 42000
+ eng "Cursor declaration after handler declaration"
+ ger "Deklaration eines Cursors nach der Deklaration eines Handlers"
+ER_SP_CASE_NOT_FOUND 20000
+ eng "Case not found for CASE statement"
+ ger "Fall für CASE-Anweisung nicht gefunden"
+ER_FPARSER_TOO_BIG_FILE
+ eng "Configuration file '%-.192s' is too big"
+ ger "Konfigurationsdatei '%-.192s' ist zu groß"
+ rus "Слишком большой конфигурационный файл '%-.192s'"
+ ukr "Занадто великий конфігураційний файл '%-.192s'"
+ER_FPARSER_BAD_HEADER
+ eng "Malformed file type header in file '%-.192s'"
+ ger "Nicht wohlgeformter Dateityp-Header in Datei '%-.192s'"
+ rus "Ðеверный заголовок типа файла '%-.192s'"
+ ukr "Ðевірний заголовок типу у файлі '%-.192s'"
+ER_FPARSER_EOF_IN_COMMENT
+ eng "Unexpected end of file while parsing comment '%-.200s'"
+ ger "Unerwartetes Dateiende beim Parsen des Kommentars '%-.200s'"
+ rus "Ðеожиданный конец файла в коментарии '%-.200s'"
+ ukr "ÐеÑподіванний кінець файлу у коментарі '%-.200s'"
+ER_FPARSER_ERROR_IN_PARAMETER
+ eng "Error while parsing parameter '%-.192s' (line: '%-.192s')"
+ ger "Fehler beim Parsen des Parameters '%-.192s' (Zeile: '%-.192s')"
+ rus "Ошибка при раÑпознавании параметра '%-.192s' (Ñтрока: '%-.192s')"
+ ukr "Помилка в роÑпізнаванні параметру '%-.192s' (Ñ€Ñдок: '%-.192s')"
+ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER
+ eng "Unexpected end of file while skipping unknown parameter '%-.192s'"
+ ger "Unerwartetes Dateiende beim Ãœberspringen des unbekannten Parameters '%-.192s'"
+ rus "Ðеожиданный конец файла при пропуÑке неизвеÑтного параметра '%-.192s'"
+ ukr "ÐеÑподіванний кінець файлу у Ñпробі проминути невідомий параметр '%-.192s'"
+ER_VIEW_NO_EXPLAIN
+ eng "EXPLAIN/SHOW can not be issued; lacking privileges for underlying table"
+ ger "EXPLAIN/SHOW kann nicht verlangt werden. Rechte für zugrunde liegende Tabelle fehlen"
+ rus "EXPLAIN/SHOW не может быть выполненно; недоÑтаточно прав на такблицы запроÑа"
+ ukr "EXPLAIN/SHOW не може бути віконано; немає прав на тиблиці запиту"
+ER_FRM_UNKNOWN_TYPE
+ eng "File '%-.192s' has unknown type '%-.64s' in its header"
+ ger "Datei '%-.192s' hat unbekannten Typ '%-.64s' im Header"
+ rus "Файл '%-.192s' Ñодержит неизвеÑтный тип '%-.64s' в заголовке"
+ ukr "Файл '%-.192s' має невідомий тип '%-.64s' у заголовку"
+ER_WRONG_OBJECT
+ eng "'%-.192s.%-.192s' is not %s"
+ ger "'%-.192s.%-.192s' ist nicht %s"
+ rus "'%-.192s.%-.192s' - не %s"
+ ukr "'%-.192s.%-.192s' не є %s"
+ER_NONUPDATEABLE_COLUMN
+ eng "Column '%-.192s' is not updatable"
+ ger "Feld '%-.192s' ist nicht aktualisierbar"
+ rus "Столбец '%-.192s' не обновлÑемый"
+ ukr "Стовбець '%-.192s' не може бути зминений"
+ER_VIEW_SELECT_DERIVED
+ eng "View's SELECT contains a subquery in the FROM clause"
+ ger "SELECT der View enthält eine Subquery in der FROM-Klausel"
+ rus "View SELECT Ñодержит Ð¿Ð¾Ð´Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð² конÑтрукции FROM"
+ ukr "View SELECT має підзапит у конÑтрукції FROM"
+ER_VIEW_SELECT_CLAUSE
+ eng "View's SELECT contains a '%s' clause"
+ ger "SELECT der View enthält eine '%s'-Klausel"
+ rus "View SELECT Ñодержит конÑтрукцию '%s'"
+ ukr "View SELECT має конÑтрукцію '%s'"
+ER_VIEW_SELECT_VARIABLE
+ eng "View's SELECT contains a variable or parameter"
+ ger "SELECT der View enthält eine Variable oder einen Parameter"
+ rus "View SELECT Ñодержит переменную или параметр"
+ ukr "View SELECT має зминну або параметер"
+ER_VIEW_SELECT_TMPTABLE
+ eng "View's SELECT refers to a temporary table '%-.192s'"
+ ger "SELECT der View verweist auf eine temporäre Tabelle '%-.192s'"
+ rus "View SELECT Ñодержит ÑÑылку на временную таблицу '%-.192s'"
+ ukr "View SELECT викориÑтовує тимчаÑову таблицю '%-.192s'"
+ER_VIEW_WRONG_LIST
+ eng "View's SELECT and view's field list have different column counts"
+ ger "SELECT- und Feldliste der Views haben unterschiedliche Anzahlen von Spalten"
+ rus "View SELECT и ÑпиÑок полей view имеют разное количеÑтво Ñтолбцов"
+ ukr "View SELECT Ñ– перелік Ñтовбців view мають різну кількіÑÑ‚ÑŒ Ñковбців"
+ER_WARN_VIEW_MERGE
+ eng "View merge algorithm can't be used here for now (assumed undefined algorithm)"
+ ger "View-Merge-Algorithmus kann hier momentan nicht verwendet werden (undefinierter Algorithmus wird angenommen)"
+ rus "Ðлгоритм ÑлиÑÐ½Ð¸Ñ view не может быть иÑпользован ÑÐµÐ¹Ñ‡Ð°Ñ (алгоритм будет неопеределенным)"
+ ukr "Ðлгоритм Ð·Ð»Ð¸Ð²Ð°Ð½Ð½Ñ view не може бути викориÑтаний зараз (алгоритм буде невизначений)"
+ER_WARN_VIEW_WITHOUT_KEY
+ eng "View being updated does not have complete key of underlying table in it"
+ ger "Die aktualisierte View enthält nicht den vollständigen Schlüssel der zugrunde liegenden Tabelle"
+ rus "ОбновлÑемый view не Ñодержит ключа иÑпользованных(ой) в нем таблиц(Ñ‹)"
+ ukr "View, що оновлюетьÑÑ, не міÑтить повного ключа таблиці(ÑŒ), що викоріÑтана в ньюому"
+ER_VIEW_INVALID
+ eng "View '%-.192s.%-.192s' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them"
+ER_SP_NO_DROP_SP
+ eng "Can't drop or alter a %s from within another stored routine"
+ ger "Kann eine %s nicht von innerhalb einer anderen gespeicherten Routine löschen oder ändern"
+ER_SP_GOTO_IN_HNDLR
+ eng "GOTO is not allowed in a stored procedure handler"
+ ger "GOTO ist im Handler einer gespeicherten Prozedur nicht erlaubt"
+ER_TRG_ALREADY_EXISTS
+ eng "Trigger already exists"
+ ger "Trigger existiert bereits"
+ER_TRG_DOES_NOT_EXIST
+ eng "Trigger does not exist"
+ ger "Trigger existiert nicht"
+ER_TRG_ON_VIEW_OR_TEMP_TABLE
+ eng "Trigger's '%-.192s' is view or temporary table"
+ ger "'%-.192s' des Triggers ist View oder temporäre Tabelle"
+ER_TRG_CANT_CHANGE_ROW
+ eng "Updating of %s row is not allowed in %strigger"
+ ger "Aktualisieren einer %s-Zeile ist in einem %s-Trigger nicht erlaubt"
+ER_TRG_NO_SUCH_ROW_IN_TRG
+ eng "There is no %s row in %s trigger"
+ ger "Es gibt keine %s-Zeile im %s-Trigger"
+ER_NO_DEFAULT_FOR_FIELD
+ eng "Field '%-.192s' doesn't have a default value"
+ ger "Feld '%-.192s' hat keinen Vorgabewert"
+ER_DIVISION_BY_ZERO 22012
+ eng "Division by 0"
+ ger "Division durch 0"
+ER_TRUNCATED_WRONG_VALUE_FOR_FIELD
+ eng "Incorrect %-.32s value: '%-.128s' for column '%.192s' at row %ld"
+ ger "Falscher %-.32s-Wert: '%-.128s' für Feld '%.192s' in Zeile %ld"
+ER_ILLEGAL_VALUE_FOR_TYPE 22007
+ eng "Illegal %s '%-.192s' value found during parsing"
+ ger "Nicht zulässiger %s-Wert '%-.192s' beim Parsen gefunden"
+ER_VIEW_NONUPD_CHECK
+ eng "CHECK OPTION on non-updatable view '%-.192s.%-.192s'"
+ ger "CHECK OPTION auf nicht-aktualisierbarem View '%-.192s.%-.192s'"
+ rus "CHECK OPTION Ð´Ð»Ñ Ð½ÐµÐ¾Ð±Ð½Ð¾Ð²Ð»Ñемого VIEW '%-.192s.%-.192s'"
+ ukr "CHECK OPTION Ð´Ð»Ñ VIEW '%-.192s.%-.192s' що не може бути оновленним"
+ER_VIEW_CHECK_FAILED
+ eng "CHECK OPTION failed '%-.192s.%-.192s'"
+ ger "CHECK OPTION fehlgeschlagen: '%-.192s.%-.192s'"
+ rus "проверка CHECK OPTION Ð´Ð»Ñ VIEW '%-.192s.%-.192s' провалилаÑÑŒ"
+ ukr "Перевірка CHECK OPTION Ð´Ð»Ñ VIEW '%-.192s.%-.192s' не пройшла"
+ER_PROCACCESS_DENIED_ERROR 42000
+ eng "%-.16s command denied to user '%-.48s'@'%-.64s' for routine '%-.192s'"
+ ger "Befehl %-.16s nicht zulässig für Benutzer '%-.48s'@'%-.64s' in Routine '%-.192s'"
+ER_RELAY_LOG_FAIL
+ eng "Failed purging old relay logs: %s"
+ ger "Bereinigen alter Relais-Logs fehlgeschlagen: %s"
+ER_PASSWD_LENGTH
+ eng "Password hash should be a %d-digit hexadecimal number"
+ ger "Passwort-Hash sollte eine Hexdaezimalzahl mit %d Stellen sein"
+ER_UNKNOWN_TARGET_BINLOG
+ eng "Target log not found in binlog index"
+ ger "Ziel-Log im Binlog-Index nicht gefunden"
+ER_IO_ERR_LOG_INDEX_READ
+ eng "I/O error reading log index file"
+ ger "Fehler beim Lesen der Log-Index-Datei"
+ER_BINLOG_PURGE_PROHIBITED
+ eng "Server configuration does not permit binlog purge"
+ ger "Server-Konfiguration erlaubt keine Binlog-Bereinigung"
+ER_FSEEK_FAIL
+ eng "Failed on fseek()"
+ ger "fseek() fehlgeschlagen"
+ER_BINLOG_PURGE_FATAL_ERR
+ eng "Fatal error during log purge"
+ ger "Schwerwiegender Fehler bei der Log-Bereinigung"
+ER_LOG_IN_USE
+ eng "A purgeable log is in use, will not purge"
+ ger "Ein zu bereinigendes Log wird gerade benutzt, daher keine Bereinigung"
+ER_LOG_PURGE_UNKNOWN_ERR
+ eng "Unknown error during log purge"
+ ger "Unbekannter Fehler bei Log-Bereinigung"
+ER_RELAY_LOG_INIT
+ eng "Failed initializing relay log position: %s"
+ ger "Initialisierung der Relais-Log-Position fehlgeschlagen: %s"
+ER_NO_BINARY_LOGGING
+ eng "You are not using binary logging"
+ ger "Sie verwenden keine Binärlogs"
+ER_RESERVED_SYNTAX
+ eng "The '%-.64s' syntax is reserved for purposes internal to the MySQL server"
+ ger "Die Schreibweise '%-.64s' ist für interne Zwecke des MySQL-Servers reserviert"
+ER_WSAS_FAILED
+ eng "WSAStartup Failed"
+ ger "WSAStartup fehlgeschlagen"
+ER_DIFF_GROUPS_PROC
+ eng "Can't handle procedures with different groups yet"
+ ger "Kann Prozeduren mit unterschiedlichen Gruppen noch nicht verarbeiten"
+ER_NO_GROUP_FOR_PROC
+ eng "Select must have a group with this procedure"
+ ger "SELECT muss bei dieser Prozedur ein GROUP BY haben"
+ER_ORDER_WITH_PROC
+ eng "Can't use ORDER clause with this procedure"
+ ger "Kann bei dieser Prozedur keine ORDER-BY-Klausel verwenden"
+ER_LOGGING_PROHIBIT_CHANGING_OF
+ eng "Binary logging and replication forbid changing the global server %s"
+ ger "Binärlogs und Replikation verhindern Wechsel des globalen Servers %s"
+ER_NO_FILE_MAPPING
+ eng "Can't map file: %-.200s, errno: %d"
+ ger "Kann Datei nicht abbilden: %-.200s, Fehler: %d"
+ER_WRONG_MAGIC
+ eng "Wrong magic in %-.64s"
+ ger "Falsche magische Zahlen in %-.64s"
+ER_PS_MANY_PARAM
+ eng "Prepared statement contains too many placeholders"
+ ger "Vorbereitete Anweisung enthält zu viele Platzhalter"
+ER_KEY_PART_0
+ eng "Key part '%-.192s' length cannot be 0"
+ ger "Länge des Schlüsselteils '%-.192s' kann nicht 0 sein"
+ER_VIEW_CHECKSUM
+ eng "View text checksum failed"
+ ger "View-Text-Prüfsumme fehlgeschlagen"
+ rus "Проверка контрольной Ñуммы текÑта VIEW провалилаÑÑŒ"
+ ukr "Перевірка контрольної Ñуми текÑту VIEW не пройшла"
+ER_VIEW_MULTIUPDATE
+ eng "Can not modify more than one base table through a join view '%-.192s.%-.192s'"
+ ger "Kann nicht mehr als eine Basistabelle über Join-View '%-.192s.%-.192s' ändern"
+ rus "ÐÐµÐ»ÑŒÐ·Ñ Ð¸Ð·Ð¼ÐµÐ½Ð¸Ñ‚ÑŒ больше чем одну базовую таблицу иÑÐ¿Ð¾Ð»ÑŒÐ·ÑƒÑ Ð¼Ð½Ð¾Ð³Ð¾Ñ‚Ð°Ð±Ð»Ð¸Ñ‡Ð½Ñ‹Ð¹ VIEW '%-.192s.%-.192s'"
+ ukr "Ðеможливо оновити більш ниж одну базову таблицю выкориÑтовуючи VIEW '%-.192s.%-.192s', що міÑÑ‚Ñ–Ñ‚ÑŒ декілька таблиць"
+ER_VIEW_NO_INSERT_FIELD_LIST
+ eng "Can not insert into join view '%-.192s.%-.192s' without fields list"
+ ger "Kann nicht ohne Feldliste in Join-View '%-.192s.%-.192s' einfügen"
+ rus "ÐÐµÐ»ÑŒÐ·Ñ Ð²ÑтавлÑÑ‚ÑŒ запиÑи в многотабличный VIEW '%-.192s.%-.192s' без ÑпиÑка полей"
+ ukr "Ðеможливо уÑтавити Ñ€Ñдки у VIEW '%-.192s.%-.192s', що міÑтить декілька таблиць, без ÑпиÑку Ñтовбців"
+ER_VIEW_DELETE_MERGE_VIEW
+ eng "Can not delete from join view '%-.192s.%-.192s'"
+ ger "Kann nicht aus Join-View '%-.192s.%-.192s' löschen"
+ rus "ÐÐµÐ»ÑŒÐ·Ñ ÑƒÐ´Ð°Ð»ÑÑ‚ÑŒ из многотабличного VIEW '%-.192s.%-.192s'"
+ ukr "Ðеможливо видалити Ñ€Ñдки у VIEW '%-.192s.%-.192s', що міÑтить декілька таблиць"
+ER_CANNOT_USER
+ eng "Operation %s failed for %.256s"
+ ger "Operation %s schlug fehl für %.256s"
+ norwegian-ny "Operation %s failed for '%.256s'"
+ER_XAER_NOTA XAE04
+ eng "XAER_NOTA: Unknown XID"
+ ger "XAER_NOTA: Unbekannte XID"
+ER_XAER_INVAL XAE05
+ eng "XAER_INVAL: Invalid arguments (or unsupported command)"
+ ger "XAER_INVAL: Ungültige Argumente (oder nicht unterstützter Befehl)"
+ER_XAER_RMFAIL XAE07
+ eng "XAER_RMFAIL: The command cannot be executed when global transaction is in the %.64s state"
+ ger "XAER_RMFAIL: DEr Befehl kann nicht ausgeführt werden, wenn die globale Transaktion im Zustand %.64s ist"
+ rus "XAER_RMFAIL: Ñту команду Ð½ÐµÐ»ÑŒÐ·Ñ Ð²Ñ‹Ð¿Ð¾Ð»Ð½ÑÑ‚ÑŒ когда Ð³Ð»Ð¾Ð±Ð°Ð»ÑŒÐ½Ð°Ñ Ñ‚Ñ€Ð°Ð½Ð·Ð°ÐºÑ†Ð¸Ñ Ð½Ð°Ñ…Ð¾Ð´Ð¸Ñ‚ÑÑ Ð² ÑоÑтоÑнии '%.64s'"
+ER_XAER_OUTSIDE XAE09
+ eng "XAER_OUTSIDE: Some work is done outside global transaction"
+ ger "XAER_OUTSIDE: Einige Arbeiten werden außerhalb der globalen Transaktion verrichtet"
+ER_XAER_RMERR XAE03
+ eng "XAER_RMERR: Fatal error occurred in the transaction branch - check your data for consistency"
+ ger "XAER_RMERR: Schwerwiegender Fehler im Transaktionszweig - prüfen Sie Ihre Daten auf Konsistenz"
+ER_XA_RBROLLBACK XA100
+ eng "XA_RBROLLBACK: Transaction branch was rolled back"
+ ger "XA_RBROLLBACK: Transaktionszweig wurde zurückgerollt"
+ER_NONEXISTING_PROC_GRANT 42000
+ eng "There is no such grant defined for user '%-.48s' on host '%-.64s' on routine '%-.192s'"
+ ger "Es gibt diese Berechtigung für Benutzer '%-.48s' auf Host '%-.64s' für Routine '%-.192s' nicht"
+ER_PROC_AUTO_GRANT_FAIL
+ eng "Failed to grant EXECUTE and ALTER ROUTINE privileges"
+ ger "Gewährung von EXECUTE- und ALTER-ROUTINE-Rechten fehlgeschlagen"
+ER_PROC_AUTO_REVOKE_FAIL
+ eng "Failed to revoke all privileges to dropped routine"
+ ger "Rücknahme aller Rechte für die gelöschte Routine fehlgeschlagen"
+ER_DATA_TOO_LONG 22001
+ eng "Data too long for column '%s' at row %ld"
+ ger "Daten zu lang für Feld '%s' in Zeile %ld"
+ER_SP_BAD_SQLSTATE 42000
+ eng "Bad SQLSTATE: '%s'"
+ ger "Ungültiger SQLSTATE: '%s'"
+ER_STARTUP
+ eng "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d %s"
+ ger "%s: bereit für Verbindungen.\nVersion: '%s' Socket: '%s' Port: %d %s"
+ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR
+ eng "Can't load value from file with fixed size rows to variable"
+ ger "Kann Wert aus Datei mit Zeilen fester Größe nicht in Variable laden"
+ER_CANT_CREATE_USER_WITH_GRANT 42000
+ eng "You are not allowed to create a user with GRANT"
+ ger "Sie dürfen keinen Benutzer mit GRANT anlegen"
+ER_WRONG_VALUE_FOR_TYPE
+ eng "Incorrect %-.32s value: '%-.128s' for function %-.32s"
+ ger "Falscher %-.32s-Wert: '%-.128s' für Funktion %-.32s"
+ER_TABLE_DEF_CHANGED
+ eng "Table definition has changed, please retry transaction"
+ ger "Tabellendefinition wurde geändert, bitte starten Sie die Transaktion neu"
+ER_SP_DUP_HANDLER 42000
+ eng "Duplicate handler declared in the same block"
+ ger "Doppelter Handler im selben Block deklariert"
+ER_SP_NOT_VAR_ARG 42000
+ eng "OUT or INOUT argument %d for routine %s is not a variable or NEW pseudo-variable in BEFORE trigger"
+ ger "OUT- oder INOUT-Argument %d für Routine %s ist keine Variable"
+ER_SP_NO_RETSET 0A000
+ eng "Not allowed to return a result set from a %s"
+ ger "Rückgabe einer Ergebnismenge aus einer %s ist nicht erlaubt"
+ER_CANT_CREATE_GEOMETRY_OBJECT 22003
+ eng "Cannot get geometry object from data you send to the GEOMETRY field"
+ ger "Kann kein Geometrieobjekt aus den Daten machen, die Sie dem GEOMETRY-Feld übergeben haben"
+ER_FAILED_ROUTINE_BREAK_BINLOG
+ eng "A routine failed and has neither NO SQL nor READS SQL DATA in its declaration and binary logging is enabled; if non-transactional tables were updated, the binary log will miss their changes"
+ ger "Eine Routine, die weder NO SQL noch READS SQL DATA in der Deklaration hat, schlug fehl und Binärlogging ist aktiv. Wenn Nicht-Transaktions-Tabellen aktualisiert wurden, enthält das Binärlog ihre Änderungen nicht"
+ER_BINLOG_UNSAFE_ROUTINE
+ eng "This function has none of DETERMINISTIC, NO SQL, or READS SQL DATA in its declaration and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)"
+ ger "Diese Routine hat weder DETERMINISTIC, NO SQL noch READS SQL DATA in der Deklaration und Binärlogging ist aktiv (*vielleicht* sollten Sie die weniger sichere Variable log_bin_trust_routine_creators verwenden)"
+ER_BINLOG_CREATE_ROUTINE_NEED_SUPER
+ eng "You do not have the SUPER privilege and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)"
+ ger "Sie haben keine SUPER-Berechtigung und Binärlogging ist aktiv (*vielleicht* sollten Sie die weniger sichere Variable log_bin_trust_routine_creators verwenden)"
+ER_EXEC_STMT_WITH_OPEN_CURSOR
+ eng "You can't execute a prepared statement which has an open cursor associated with it. Reset the statement to re-execute it."
+ ger "Sie können keine vorbereitete Anweisung ausführen, die mit einem geöffneten Cursor verknüpft ist. Setzen Sie die Anweisung zurück, um sie neu auszuführen"
+ER_STMT_HAS_NO_OPEN_CURSOR
+ eng "The statement (%lu) has no open cursor."
+ ger "Die Anweisung (%lu) hat keinen geöffneten Cursor"
+ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG
+ eng "Explicit or implicit commit is not allowed in stored function or trigger."
+ ger "Explizites oder implizites Commit ist in gespeicherten Funktionen und in Triggern nicht erlaubt"
+ER_NO_DEFAULT_FOR_VIEW_FIELD
+ eng "Field of view '%-.192s.%-.192s' underlying table doesn't have a default value"
+ ger "Ein Feld der dem View '%-.192s.%-.192s' zugrundeliegenden Tabelle hat keinen Vorgabewert"
+ER_SP_NO_RECURSION
+ eng "Recursive stored functions and triggers are not allowed."
+ ger "Rekursive gespeicherte Routinen und Triggers sind nicht erlaubt"
+ER_TOO_BIG_SCALE 42000 S1009
+ eng "Too big scale %d specified for column '%-.192s'. Maximum is %lu."
+ ger "Zu großer Skalierungsfaktor %d für Feld '%-.192s' angegeben. Maximum ist %lu"
+ER_TOO_BIG_PRECISION 42000 S1009
+ eng "Too big precision %d specified for column '%-.192s'. Maximum is %lu."
+ ger "Zu große Genauigkeit %d für Feld '%-.192s' angegeben. Maximum ist %lu"
+ER_M_BIGGER_THAN_D 42000 S1009
+ eng "For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column '%-.192s')."
+ ger "Für FLOAT(M,D), DOUBLE(M,D) oder DECIMAL(M,D) muss M >= D sein (Feld '%-.192s')"
+ER_WRONG_LOCK_OF_SYSTEM_TABLE
+ eng "You can't combine write-locking of system tables with other tables or lock types"
+ ger "Sie können Schreibsperren auf der Systemtabelle nicht mit anderen Tabellen kombinieren"
+ER_CONNECT_TO_FOREIGN_DATA_SOURCE
+ eng "Unable to connect to foreign data source: %.64s"
+ ger "Kann nicht mit Fremddatenquelle verbinden: %.64s"
+ER_QUERY_ON_FOREIGN_DATA_SOURCE
+ eng "There was a problem processing the query on the foreign data source. Data source error: %-.64s"
+ ger "Bei der Verarbeitung der Abfrage ist in der Fremddatenquelle ein Problem aufgetreten. Datenquellenfehlermeldung: %-.64s"
+ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST
+ eng "The foreign data source you are trying to reference does not exist. Data source error: %-.64s"
+ ger "Die Fremddatenquelle, auf die Sie zugreifen wollen, existiert nicht. Datenquellenfehlermeldung: %-.64s"
+ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE
+ eng "Can't create federated table. The data source connection string '%-.64s' is not in the correct format"
+ ger "Kann föderierte Tabelle nicht erzeugen. Der Datenquellen-Verbindungsstring '%-.64s' hat kein korrektes Format"
+ER_FOREIGN_DATA_STRING_INVALID
+ eng "The data source connection string '%-.64s' is not in the correct format"
+ ger "Der Datenquellen-Verbindungsstring '%-.64s' hat kein korrektes Format"
+ER_CANT_CREATE_FEDERATED_TABLE
+ eng "Can't create federated table. Foreign data src error: %-.64s"
+ ger "Kann föderierte Tabelle nicht erzeugen. Fremddatenquellenfehlermeldung: %-.64s"
+ER_TRG_IN_WRONG_SCHEMA
+ eng "Trigger in wrong schema"
+ ger "Trigger im falschen Schema"
+ER_STACK_OVERRUN_NEED_MORE
+ eng "Thread stack overrun: %ld bytes used of a %ld byte stack, and %ld bytes needed. Use 'mysqld -O thread_stack=#' to specify a bigger stack."
+ ger "Thread-Stack-Überlauf: %ld Bytes eines %ld-Byte-Stacks in Verwendung, und %ld Bytes benötigt. Verwenden Sie 'mysqld -O thread_stack=#', um einen größeren Stack anzugeben"
+ER_TOO_LONG_BODY 42000 S1009
+ eng "Routine body for '%-.100s' is too long"
+ ger "Routinen-Body für '%-.100s' ist zu lang"
+ER_WARN_CANT_DROP_DEFAULT_KEYCACHE
+ eng "Cannot drop default keycache"
+ ger "Der vorgabemäßige Schlüssel-Cache kann nicht gelöscht werden"
+ER_TOO_BIG_DISPLAYWIDTH 42000 S1009
+ eng "Display width out of range for column '%-.192s' (max = %lu)"
+ ger "Anzeigebreite außerhalb des zulässigen Bereichs für Spalte '%-.192s' (Maximum: %lu)"
+ER_XAER_DUPID XAE08
+ eng "XAER_DUPID: The XID already exists"
+ ger "XAER_DUPID: Die XID existiert bereits"
+ER_DATETIME_FUNCTION_OVERFLOW 22008
+ eng "Datetime function: %-.32s field overflow"
+ ger "Datetime-Funktion: %-.32s Feldüberlauf"
+ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG
+ eng "Can't update table '%-.192s' in stored function/trigger because it is already used by statement which invoked this stored function/trigger."
+ ger "Kann Tabelle '%-.192s' in gespeicherter Funktion oder Trigger nicht aktualisieren, weil sie bereits von der Anweisung verwendet wird, die diese gespeicherte Funktion oder den Trigger aufrief"
+ER_VIEW_PREVENT_UPDATE
+ eng "The definition of table '%-.192s' prevents operation %.192s on table '%-.192s'."
+ ger "Die Definition der Tabelle '%-.192s' verhindert die Operation %.192s auf Tabelle '%-.192s'"
+ER_PS_NO_RECURSION
+ eng "The prepared statement contains a stored routine call that refers to that same statement. It's not allowed to execute a prepared statement in such a recursive manner"
+ ger "Die vorbereitete Anweisung enthält einen Aufruf einer gespeicherten Routine, die auf eben dieselbe Anweisung verweist. Es ist nicht erlaubt, eine vorbereitete Anweisung in solch rekursiver Weise auszuführen"
+ER_SP_CANT_SET_AUTOCOMMIT
+ eng "Not allowed to set autocommit from a stored function or trigger"
+ ger "Es ist nicht erlaubt, innerhalb einer gespeicherten Funktion oder eines Triggers AUTOCOMMIT zu setzen"
+ER_MALFORMED_DEFINER
+ eng "Definer is not fully qualified"
+ ger "Definierer des View ist nicht vollständig spezifiziert"
+ER_VIEW_FRM_NO_USER
+ eng "View '%-.192s'.'%-.192s' has no definer information (old table format). Current user is used as definer. Please recreate the view!"
+ ger "View '%-.192s'.'%-.192s' hat keine Definierer-Information (altes Tabellenformat). Der aktuelle Benutzer wird als Definierer verwendet. Bitte erstellen Sie den View neu"
+ER_VIEW_OTHER_USER
+ eng "You need the SUPER privilege for creation view with '%-.192s'@'%-.192s' definer"
+ ger "Sie brauchen die SUPER-Berechtigung, um einen View mit dem Definierer '%-.192s'@'%-.192s' zu erzeugen"
+ER_NO_SUCH_USER
+ eng "The user specified as a definer ('%-.64s'@'%-.64s') does not exist"
+ER_FORBID_SCHEMA_CHANGE
+ eng "Changing schema from '%-.192s' to '%-.192s' is not allowed."
+ ger "Wechsel des Schemas von '%-.192s' auf '%-.192s' ist nicht erlaubt"
+ER_ROW_IS_REFERENCED_2 23000
+ eng "Cannot delete or update a parent row: a foreign key constraint fails (%.192s)"
+ ger "Kann Eltern-Zeile nicht löschen oder aktualisieren: eine Fremdschlüsselbedingung schlägt fehl (%.192s)"
+ER_NO_REFERENCED_ROW_2 23000
+ eng "Cannot add or update a child row: a foreign key constraint fails (%.192s)"
+ ger "Kann Kind-Zeile nicht hinzufügen oder aktualisieren: eine Fremdschlüsselbedingung schlägt fehl (%.192s)"
+ER_SP_BAD_VAR_SHADOW 42000
+ eng "Variable '%-.64s' must be quoted with `...`, or renamed"
+ ger "Variable '%-.64s' muss mit `...` geschützt oder aber umbenannt werden"
+ER_TRG_NO_DEFINER
+ eng "No definer attribute for trigger '%-.192s'.'%-.192s'. The trigger will be activated under the authorization of the invoker, which may have insufficient privileges. Please recreate the trigger."
+ ger "Kein Definierer-Attribut für Trigger '%-.192s'.'%-.192s'. Der Trigger wird mit der Autorisierung des Aufrufers aktiviert, der möglicherweise keine zureichenden Berechtigungen hat. Bitte legen Sie den Trigger neu an."
+ER_OLD_FILE_FORMAT
+ eng "'%-.192s' has an old format, you should re-create the '%s' object(s)"
+ ger "'%-.192s' hat altes Format, Sie sollten die '%s'-Objekt(e) neu erzeugen"
+ER_SP_RECURSION_LIMIT
+ eng "Recursive limit %d (as set by the max_sp_recursion_depth variable) was exceeded for routine %.192s"
+ ger "Rekursionsgrenze %d (durch Variable max_sp_recursion_depth gegeben) wurde für Routine %.192s überschritten"
+ER_SP_PROC_TABLE_CORRUPT
+ eng "Failed to load routine %-.192s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)"
+ ger "Routine %-.192s konnte nicht geladen werden. Die Tabelle mysql.proc fehlt, ist beschädigt, oder enthält fehlerhaften Daten (interner Code: %d)"
+ER_SP_WRONG_NAME 42000
+ eng "Incorrect routine name '%-.192s'"
+ ger "Ungültiger Routinenname '%-.192s'"
+ER_TABLE_NEEDS_UPGRADE
+ eng "Table upgrade required. Please do \"REPAIR TABLE `%-.32s`\" or dump/reload to fix it!"
+ ger "Tabellenaktualisierung erforderlich. Bitte zum Reparieren \"REPAIR TABLE `%-.32s`\" eingeben!"
+ER_SP_NO_AGGREGATE 42000
+ eng "AGGREGATE is not supported for stored functions"
+ ger "AGGREGATE wird bei gespeicherten Funktionen nicht unterstützt"
+ER_MAX_PREPARED_STMT_COUNT_REACHED 42000
+ eng "Can't create more than max_prepared_stmt_count statements (current value: %lu)"
+ ger "Kann nicht mehr Anweisungen als max_prepared_stmt_count erzeugen (aktueller Wert: %lu)"
+ER_VIEW_RECURSIVE
+ eng "`%-.192s`.`%-.192s` contains view recursion"
+ ger "`%-.192s`.`%-.192s` enthält View-Rekursion"
+ER_NON_GROUPING_FIELD_USED 42000
+ eng "non-grouping field '%-.192s' is used in %-.64s clause"
+ ger "In der %-.192s-Klausel wird das die Nicht-Gruppierungsspalte '%-.64s' verwendet"
+ER_TABLE_CANT_HANDLE_SPKEYS
+ eng "The used table type doesn't support SPATIAL indexes"
+ ger "Der verwendete Tabellentyp unterstützt keine SPATIAL-Indizes"
+ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA
+ eng "Triggers can not be created on system tables"
+ ger "Trigger können nicht auf Systemtabellen erzeugt werden"
+ER_REMOVED_SPACES
+ eng "Leading spaces are removed from name '%s'"
+ ger "Führende Leerzeichen werden aus dem Namen '%s' entfernt"
+ER_AUTOINC_READ_FAILED
+ eng "Failed to read auto-increment value from storage engine"
+ ger "Lesen des Autoincrement-Werts von der Speicher-Engine fehlgeschlagen"
+ER_USERNAME
+ eng "user name"
+ ger "Benutzername"
+ER_HOSTNAME
+ eng "host name"
+ ger "Hostname"
+ER_WRONG_STRING_LENGTH
+ eng "String '%-.70s' is too long for %s (should be no longer than %d)"
+ ger "String '%-.70s' ist zu lang für %s (sollte nicht länger sein als %d)"
+ER_NON_INSERTABLE_TABLE
+ eng "The target table %-.100s of the %s is not insertable-into"
+ ger "Die Zieltabelle %-.100s von %s ist nicht einfügbar"
+ER_ADMIN_WRONG_MRG_TABLE
+ eng "Table '%-.64s' is differently defined or of non-MyISAM type or doesn't exist"
+ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT
+ eng "Too high level of nesting for select"
+ER_NAME_BECOMES_EMPTY
+ eng "Name '%-.64s' has become ''"
+ER_AMBIGUOUS_FIELD_TERM
+ eng "First character of the FIELDS TERMINATED string is ambiguous; please use non-optional and non-empty FIELDS ENCLOSED BY"
+ER_FOREIGN_SERVER_EXISTS
+ eng "The foreign server, %s, you are trying to create already exists."
+ER_FOREIGN_SERVER_DOESNT_EXIST
+ eng "The foreign server name you are trying to reference does not exist. Data source error: %-.64s"
+ ger "Die externe Verbindung, auf die Sie zugreifen wollen, existiert nicht. Datenquellenfehlermeldung: %-.64s"
+ER_ILLEGAL_HA_CREATE_OPTION
+ eng "Table storage engine '%-.64s' does not support the create option '%.64s'"
+ ger "Speicher-Engine '%-.64s' der Tabelle unterstützt die Option '%.64s' nicht"
+ER_PARTITION_REQUIRES_VALUES_ERROR
+ eng "Syntax error: %-.64s PARTITIONING requires definition of VALUES %-.64s for each partition"
+ ger "Fehler in der SQL-Syntax: %-.64s-PARTITIONierung erfordert Definition von VALUES %-.64s für jede Partition"
+ swe "Syntaxfel: %-.64s PARTITIONering kräver definition av VALUES %-.64s för varje partition"
+ER_PARTITION_WRONG_VALUES_ERROR
+ eng "Only %-.64s PARTITIONING can use VALUES %-.64s in partition definition"
+ ger "Nur %-.64s-PARTITIONierung kann VALUES %-.64s in der Partitionsdefinition verwenden"
+ swe "Endast %-.64s partitionering kan använda VALUES %-.64s i definition av partitionen"
+ER_PARTITION_MAXVALUE_ERROR
+ eng "MAXVALUE can only be used in last partition definition"
+ ger "MAXVALUE kann nur für die Definition der letzten Partition verwendet werden"
+ swe "MAXVALUE kan bara användas i definitionen av den sista partitionen"
+ER_PARTITION_SUBPARTITION_ERROR
+ eng "Subpartitions can only be hash partitions and by key"
+ ger "Unterpartitionen dürfen nur HASH- oder KEY-Partitionen sein"
+ swe "Subpartitioner kan bara vara hash och key partitioner"
+ER_PARTITION_SUBPART_MIX_ERROR
+ eng "Must define subpartitions on all partitions if on one partition"
+ ger "Unterpartitionen können nur Hash- oder Key-Partitionen sein"
+ swe "Subpartitioner måste definieras på alla partitioner om på en"
+ER_PARTITION_WRONG_NO_PART_ERROR
+ eng "Wrong number of partitions defined, mismatch with previous setting"
+ ger "Falsche Anzahl von Partitionen definiert, stimmt nicht mit vorherigen Einstellungen überein"
+ swe "Antal partitioner definierade och antal partitioner är inte lika"
+ER_PARTITION_WRONG_NO_SUBPART_ERROR
+ eng "Wrong number of subpartitions defined, mismatch with previous setting"
+ ger "Falsche Anzahl von Unterpartitionen definiert, stimmt nicht mit vorherigen Einstellungen überein"
+ swe "Antal subpartitioner definierade och antal subpartitioner är inte lika"
+ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR
+ eng "Constant/Random expression in (sub)partitioning function is not allowed"
+ ger "Konstante oder Random-Ausdrücke in (Unter-)Partitionsfunktionen sind nicht erlaubt"
+ swe "Konstanta uttryck eller slumpmässiga uttryck är inte tillåtna (sub)partitioneringsfunktioner"
+ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR
+ eng "Expression in RANGE/LIST VALUES must be constant"
+ ger "Ausdrücke in RANGE/LIST VALUES müssen konstant sein"
+ swe "Uttryck i RANGE/LIST VALUES måste vara ett konstant uttryck"
+ER_FIELD_NOT_FOUND_PART_ERROR
+ eng "Field in list of fields for partition function not found in table"
+ ger "Felder in der Feldliste der Partitionierungsfunktion wurden in der Tabelle nicht gefunden"
+ swe "Fält i listan av fält för partitionering med key inte funnen i tabellen"
+ER_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR
+ eng "List of fields is only allowed in KEY partitions"
+ ger "Eine Feldliste ist nur in KEY-Partitionen erlaubt"
+ swe "En lista av fält är endast tillåtet för KEY partitioner"
+ER_INCONSISTENT_PARTITION_INFO_ERROR
+ eng "The partition info in the frm file is not consistent with what can be written into the frm file"
+ ger "Die Partitionierungsinformationen in der frm-Datei stimmen nicht mit dem überein, was in die frm-Datei geschrieben werden kann"
+ swe "Partitioneringsinformationen i frm-filen är inte konsistent med vad som kan skrivas i frm-filen"
+ER_PARTITION_FUNC_NOT_ALLOWED_ERROR
+ eng "The %-.192s function returns the wrong type"
+ ger "Die %-.192s-Funktion gibt einen falschen Typ zurück"
+ swe "%-.192s-funktionen returnerar felaktig typ"
+ER_PARTITIONS_MUST_BE_DEFINED_ERROR
+ eng "For %-.64s partitions each partition must be defined"
+ ger "Für %-.64s-Partitionen muss jede Partition definiert sein"
+ swe "För %-.64s partitionering så måste varje partition definieras"
+ER_RANGE_NOT_INCREASING_ERROR
+ eng "VALUES LESS THAN value must be strictly increasing for each partition"
+ ger "Werte in VALUES LESS THAN müssen für jede Partition strikt aufsteigend sein"
+ swe "Värden i VALUES LESS THAN måste vara strikt växande för varje partition"
+ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR
+ eng "VALUES value must be of same type as partition function"
+ ger "VALUES-Werte müssen vom selben Typ wie die Partitionierungsfunktion sein"
+ swe "Värden i VALUES måste vara av samma typ som partitioneringsfunktionen"
+ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR
+ eng "Multiple definition of same constant in list partitioning"
+ ger "Mehrfachdefinition derselben Konstante bei Listen-Partitionierung"
+ swe "Multipel definition av samma konstant i list partitionering"
+ER_PARTITION_ENTRY_ERROR
+ eng "Partitioning can not be used stand-alone in query"
+ ger "Partitionierung kann in einer Abfrage nicht alleinstehend benutzt werden"
+ swe "Partitioneringssyntax kan inte användas på egen hand i en SQL-fråga"
+ER_MIX_HANDLER_ERROR
+ eng "The mix of handlers in the partitions is not allowed in this version of MySQL"
+ ger "Das Vermischen von Handlern in Partitionen ist in dieser Version von MySQL nicht erlaubt"
+ swe "Denna mix av lagringsmotorer är inte tillåten i denna version av MySQL"
+ER_PARTITION_NOT_DEFINED_ERROR
+ eng "For the partitioned engine it is necessary to define all %-.64s"
+ ger "Für die partitionierte Engine müssen alle %-.64s definiert sein"
+ swe "För partitioneringsmotorn så är det nödvändigt att definiera alla %-.64s"
+ER_TOO_MANY_PARTITIONS_ERROR
+ eng "Too many partitions (including subpartitions) were defined"
+ ger "Es wurden zu vielen Partitionen (einschließlich Unterpartitionen) definiert"
+ swe "För många partitioner (inkluderande subpartitioner) definierades"
+ER_SUBPARTITION_ERROR
+ eng "It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning"
+ ger "RANGE/LIST-Partitionierung kann bei Unterpartitionen nur zusammen mit HASH/KEY-Partitionierung verwendet werden"
+ swe "Det är endast möjligt att blanda RANGE/LIST partitionering med HASH/KEY partitionering för subpartitionering"
+ER_CANT_CREATE_HANDLER_FILE
+ eng "Failed to create specific handler file"
+ ger "Erzeugen einer spezifischen Handler-Datei fehlgeschlagen"
+ swe "Misslyckades med att skapa specifik fil i lagringsmotor"
+ER_BLOB_FIELD_IN_PART_FUNC_ERROR
+ eng "A BLOB field is not allowed in partition function"
+ ger "In der Partitionierungsfunktion sind BLOB-Spalten nicht erlaubt"
+ swe "Ett BLOB-fält är inte tillåtet i partitioneringsfunktioner"
+ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF
+ eng "A %-.192s must include all columns in the table's partitioning function"
+ER_NO_PARTS_ERROR
+ eng "Number of %-.64s = 0 is not an allowed value"
+ ger "Eine Anzahl von %-.64s = 0 ist kein erlaubter Wert"
+ swe "Antal %-.64s = 0 är inte ett tillåten värde"
+ER_PARTITION_MGMT_ON_NONPARTITIONED
+ eng "Partition management on a not partitioned table is not possible"
+ ger "Partitionsverwaltung einer nicht partitionierten Tabelle ist nicht möglich"
+ swe "Partitioneringskommando på en opartitionerad tabell är inte möjligt"
+ER_FOREIGN_KEY_ON_PARTITIONED
+ eng "Foreign key clause is not yet supported in conjunction with partitioning"
+ ger "Fremdschlüssel-Beschränkungen sind im Zusammenhang mit Partitionierung nicht zulässig"
+ swe "Foreign key klausul är inte ännu implementerad i kombination med partitionering"
+ER_DROP_PARTITION_NON_EXISTENT
+ eng "Error in list of partitions to %-.64s"
+ ger "Fehler in der Partitionsliste bei %-.64s"
+ swe "Fel i listan av partitioner att %-.64s"
+ER_DROP_LAST_PARTITION
+ eng "Cannot remove all partitions, use DROP TABLE instead"
+ ger "Es lassen sich nicht sämtliche Partitionen löschen, benutzen Sie statt dessen DROP TABLE"
+ swe "Det är inte tillåtet att ta bort alla partitioner, använd DROP TABLE istället"
+ER_COALESCE_ONLY_ON_HASH_PARTITION
+ eng "COALESCE PARTITION can only be used on HASH/KEY partitions"
+ ger "COALESCE PARTITION kann nur auf HASH- oder KEY-Partitionen benutzt werden"
+ swe "COALESCE PARTITION kan bara användas på HASH/KEY partitioner"
+ER_REORG_HASH_ONLY_ON_SAME_NO
+ eng "REORGANIZE PARTITION can only be used to reorganize partitions not to change their numbers"
+ ger "REORGANIZE PARTITION kann nur zur Reorganisation von Partitionen verwendet werden, nicht, um ihre Nummern zu ändern"
+ swe "REORGANIZE PARTITION kan bara användas för att omorganisera partitioner, inte för att ändra deras antal"
+ER_REORG_NO_PARAM_ERROR
+ eng "REORGANIZE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs"
+ ger "REORGANIZE PARTITION ohne Parameter kann nur für auto-partitionierte Tabellen verwendet werden, die HASH-Partitionierung benutzen"
+ swe "REORGANIZE PARTITION utan parametrar kan bara användas på auto-partitionerade tabeller som använder HASH partitionering"
+ER_ONLY_ON_RANGE_LIST_PARTITION
+ eng "%-.64s PARTITION can only be used on RANGE/LIST partitions"
+ ger "%-.64s PARTITION kann nur für RANGE- oder LIST-Partitionen verwendet werden"
+ swe "%-.64s PARTITION kan bara användas på RANGE/LIST-partitioner"
+ER_ADD_PARTITION_SUBPART_ERROR
+ eng "Trying to Add partition(s) with wrong number of subpartitions"
+ ger "Es wurde versucht, eine oder mehrere Partitionen mit der falschen Anzahl von Unterpartitionen hinzuzufügen"
+ swe "ADD PARTITION med fel antal subpartitioner"
+ER_ADD_PARTITION_NO_NEW_PARTITION
+ eng "At least one partition must be added"
+ ger "Es muss zumindest eine Partition hinzugefügt werden"
+ swe "Åtminstone en partition måste läggas till vid ADD PARTITION"
+ER_COALESCE_PARTITION_NO_PARTITION
+ eng "At least one partition must be coalesced"
+ ger "Zumindest eine Partition muss mit COALESCE PARTITION zusammengefügt werden"
+ swe "Åtminstone en partition måste slås ihop vid COALESCE PARTITION"
+ER_REORG_PARTITION_NOT_EXIST
+ eng "More partitions to reorganize than there are partitions"
+ ger "Es wurde versucht, mehr Partitionen als vorhanden zu reorganisieren"
+ swe "Fler partitioner att reorganisera än det finns partitioner"
+ER_SAME_NAME_PARTITION
+ eng "Duplicate partition name %-.192s"
+ ger "Doppelter Partitionsname: %-.192s"
+ swe "Duplicerat partitionsnamn %-.192s"
+ER_NO_BINLOG_ERROR
+ eng "It is not allowed to shut off binlog on this command"
+ ger "Es es nicht erlaubt, bei diesem Befehl binlog abzuschalten"
+ swe "Det är inte tillåtet att stänga av binlog på detta kommando"
+ER_CONSECUTIVE_REORG_PARTITIONS
+ eng "When reorganizing a set of partitions they must be in consecutive order"
+ ger "Bei der Reorganisation eines Satzes von Partitionen müssen diese in geordneter Reihenfolge vorliegen"
+ swe "När ett antal partitioner omorganiseras måste de vara i konsekutiv ordning"
+ER_REORG_OUTSIDE_RANGE
+ eng "Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range"
+ ger "Die Reorganisation von RANGE-Partitionen kann Gesamtbereiche nicht verändern, mit Ausnahme der letzten Partition, die den Bereich erweitern kann"
+ swe "Reorganisering av rangepartitioner kan inte ändra den totala intervallet utom för den sista partitionen där intervallet kan utökas"
+ER_PARTITION_FUNCTION_FAILURE
+ eng "Partition function not supported in this version for this handler"
+ ger "Partitionsfunktion in dieser Version dieses Handlers nicht unterstützt"
+ER_PART_STATE_ERROR
+ eng "Partition state cannot be defined from CREATE/ALTER TABLE"
+ ger "Partitionszustand kann nicht von CREATE oder ALTER TABLE aus definiert werden"
+ swe "Partition state kan inte definieras från CREATE/ALTER TABLE"
+ER_LIMITED_PART_RANGE
+ eng "The %-.64s handler only supports 32 bit integers in VALUES"
+ ger "Der Handler %-.64s unterstützt in VALUES nur 32-Bit-Integers"
+ swe "%-.64s stödjer endast 32 bitar i integers i VALUES"
+ER_PLUGIN_IS_NOT_LOADED
+ eng "Plugin '%-.192s' is not loaded"
+ ger "Plugin '%-.192s' ist nicht geladen"
+ER_WRONG_VALUE
+ eng "Incorrect %-.32s value: '%-.128s'"
+ ger "Falscher %-.32s-Wert: '%-.128s'"
+ER_NO_PARTITION_FOR_GIVEN_VALUE
+ eng "Table has no partition for value %-.64s"
+ ger "Tabelle hat für den Wert %-.64s keine Partition"
+ER_FILEGROUP_OPTION_ONLY_ONCE
+ eng "It is not allowed to specify %s more than once"
+ ger "%s darf nicht mehr als einmal angegegeben werden"
+ER_CREATE_FILEGROUP_FAILED
+ eng "Failed to create %s"
+ ger "Anlegen von %s fehlgeschlagen"
+ER_DROP_FILEGROUP_FAILED
+ eng "Failed to drop %s"
+ ger "Löschen (drop) von %s fehlgeschlagen"
+ER_TABLESPACE_AUTO_EXTEND_ERROR
+ eng "The handler doesn't support autoextend of tablespaces"
+ ger "Der Handler unterstützt keine automatische Erweiterung (Autoextend) von Tablespaces"
+ER_WRONG_SIZE_NUMBER
+ eng "A size parameter was incorrectly specified, either number or on the form 10M"
+ ger "Ein Größen-Parameter wurde unkorrekt angegeben, muss entweder Zahl sein oder im Format 10M"
+ER_SIZE_OVERFLOW_ERROR
+ eng "The size number was correct but we don't allow the digit part to be more than 2 billion"
+ ger "Die Zahl für die Größe war korrekt, aber der Zahlanteil darf nicht größer als 2 Milliarden sein"
+ER_ALTER_FILEGROUP_FAILED
+ eng "Failed to alter: %s"
+ ger "Änderung von %s fehlgeschlagen"
+ER_BINLOG_ROW_LOGGING_FAILED
+ eng "Writing one row to the row-based binary log failed"
+ ger "Schreiben einer Zeilen ins zeilenbasierte Binärlog fehlgeschlagen"
+ER_BINLOG_ROW_WRONG_TABLE_DEF
+ eng "Table definition on master and slave does not match: %s"
+ ger "Tabellendefinition auf Master und Slave stimmt nicht überein: %s"
+ER_BINLOG_ROW_RBR_TO_SBR
+ eng "Slave running with --log-slave-updates must use row-based binary logging to be able to replicate row-based binary log events"
+ ger "Slave, die mit --log-slave-updates laufen, müssen zeilenbasiertes Loggen verwenden, um zeilenbasierte Binärlog-Ereignisse loggen zu können"
+ER_EVENT_ALREADY_EXISTS
+ eng "Event '%-.192s' already exists"
+ ger "Event '%-.192s' existiert bereits"
+ER_EVENT_STORE_FAILED
+ eng "Failed to store event %s. Error code %d from storage engine."
+ ger "Speichern von Event %s fehlgeschlagen. Fehlercode der Speicher-Engine: %d"
+ER_EVENT_DOES_NOT_EXIST
+ eng "Unknown event '%-.192s'"
+ ger "Unbekanntes Event '%-.192s'"
+ER_EVENT_CANT_ALTER
+ eng "Failed to alter event '%-.192s'"
+ ger "Ändern des Events '%-.192s' fehlgeschlagen"
+ER_EVENT_DROP_FAILED
+ eng "Failed to drop %s"
+ ger "Löschen von %s fehlgeschlagen"
+ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG
+ eng "INTERVAL is either not positive or too big"
+ ger "INTERVAL ist entweder nicht positiv oder zu groß"
+ER_EVENT_ENDS_BEFORE_STARTS
+ eng "ENDS is either invalid or before STARTS"
+ ger "ENDS ist entweder ungültig oder liegt vor STARTS"
+ER_EVENT_EXEC_TIME_IN_THE_PAST
+ eng "Event execution time is in the past. Event has been disabled"
+ER_EVENT_OPEN_TABLE_FAILED
+ eng "Failed to open mysql.event"
+ ger "Öffnen von mysql.event fehlgeschlagen"
+ER_EVENT_NEITHER_M_EXPR_NOR_M_AT
+ eng "No datetime expression provided"
+ ger "Kein DATETIME-Ausdruck angegeben"
+ER_COL_COUNT_DOESNT_MATCH_CORRUPTED
+ eng "Column count of mysql.%s is wrong. Expected %d, found %d. The table is probably corrupted"
+ ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d gefunden. Tabelle ist wahrscheinlich beschädigt"
+ER_CANNOT_LOAD_FROM_TABLE
+ eng "Cannot load from mysql.%s. The table is probably corrupted"
+ ger "Kann mysql.%s nicht einlesen. Tabelle ist wahrscheinlich beschädigt"
+ER_EVENT_CANNOT_DELETE
+ eng "Failed to delete the event from mysql.event"
+ ger "Löschen des Events aus mysql.event fehlgeschlagen"
+ER_EVENT_COMPILE_ERROR
+ eng "Error during compilation of event's body"
+ ger "Fehler beim Kompilieren des Event-Bodys"
+ER_EVENT_SAME_NAME
+ eng "Same old and new event name"
+ ger "Alter und neuer Event-Name sind gleich"
+ER_EVENT_DATA_TOO_LONG
+ eng "Data for column '%s' too long"
+ ger "Daten der Spalte '%s' zu lang"
+ER_DROP_INDEX_FK
+ eng "Cannot drop index '%-.192s': needed in a foreign key constraint"
+ ger "Kann Index '%-.192s' nicht löschen: wird für einen Fremdschlüssel benötigt"
+# When using this error message, use the ER_WARN_DEPRECATED_SYNTAX error
+# code. See, for example, code in mysql_priv.h.
+ER_WARN_DEPRECATED_SYNTAX_WITH_VER
+ eng "The syntax '%s' is deprecated and will be removed in MySQL %s. Please use %s instead"
+ ger "Die Syntax '%s' ist veraltet und wird in MySQL %s entfernt. Bitte benutzen Sie statt dessen %s"
+ER_CANT_WRITE_LOCK_LOG_TABLE
+ eng "You can't write-lock a log table. Only read access is possible"
+ ger "Eine Log-Tabelle kann nicht schreibgesperrt werden. Es ist ohnehin nur Lesezugriff möglich"
+ER_CANT_LOCK_LOG_TABLE
+ eng "You can't use locks with log tables."
+ ger "Log-Tabellen können nicht mit normalen Lesesperren gesperrt werden. Verwenden Sie statt dessen READ LOCAL"
+ER_FOREIGN_DUPLICATE_KEY 23000 S1009
+ eng "Upholding foreign key constraints for table '%.192s', entry '%-.192s', key %d would lead to a duplicate entry"
+ ger "Aufrechterhalten der Fremdschlüssel-Constraints für Tabelle '%.192s', Eintrag '%-.192s', Schlüssel %d würde zu einem doppelten Eintrag führen"
+ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE
+ eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MySQL %d, now running %d. Please use mysql_upgrade to fix this error."
+ ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d erhalten. Erzeugt mit MySQL %d, jetzt unter %d. Bitte benutzen Sie mysql_upgrade, um den Fehler zu beheben"
+ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR
+ eng "Cannot switch out of the row-based binary log format when the session has open temporary tables"
+ ger "Kann nicht aus dem zeilenbasierten Binärlog-Format herauswechseln, wenn die Sitzung offene temporäre Tabellen hat"
+ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT
+ eng "Cannot change the binary logging format inside a stored function or trigger"
+ ger "Das Binärlog-Format kann innerhalb einer gespeicherten Funktion oder eines Triggers nicht geändert werden"
+ER_NDB_CANT_SWITCH_BINLOG_FORMAT
+ eng "The NDB cluster engine does not support changing the binlog format on the fly yet"
+ ger "Die Speicher-Engine NDB Cluster unterstützt das Ändern des Binärlog-Formats zur Laufzeit noch nicht"
+ER_PARTITION_NO_TEMPORARY
+ eng "Cannot create temporary table with partitions"
+ ger "Anlegen temporärer Tabellen mit Partitionen nicht möglich"
+ER_PARTITION_CONST_DOMAIN_ERROR
+ eng "Partition constant is out of partition function domain"
+ ger "Partitionskonstante liegt außerhalb der Partitionsfunktionsdomäne"
+ swe "Partitionskonstanten är utanför partitioneringsfunktionens domän"
+ER_PARTITION_FUNCTION_IS_NOT_ALLOWED
+ eng "This partition function is not allowed"
+ ger "Diese Partitionierungsfunktion ist nicht erlaubt"
+ swe "Denna partitioneringsfunktion är inte tillåten"
+ER_DDL_LOG_ERROR
+ eng "Error in DDL log"
+ ger "Fehler im DDL-Log"
+ER_NULL_IN_VALUES_LESS_THAN
+ eng "Not allowed to use NULL value in VALUES LESS THAN"
+ ger "In VALUES LESS THAN dürfen keine NULL-Werte verwendet werden"
+ swe "Det är inte tillåtet att använda NULL-värden i VALUES LESS THAN"
+ER_WRONG_PARTITION_NAME
+ eng "Incorrect partition name"
+ ger "Falscher Partitionsname"
+ swe "Felaktigt partitionsnamn"
+ER_CANT_CHANGE_TX_ISOLATION 25001
+ eng "Transaction isolation level can't be changed while a transaction is in progress"
+ ger "Transaktionsisolationsebene kann während einer laufenden Transaktion nicht geändert werden"
+ER_DUP_ENTRY_AUTOINCREMENT_CASE
+ eng "ALTER TABLE causes auto_increment resequencing, resulting in duplicate entry '%-.192s' for key '%-.192s'"
+ ger "ALTER TABLE führt zur Neusequenzierung von auto_increment, wodurch der doppelte Eintrag '%-.192s' für Schlüssel '%-.192s' auftritt"
+ER_EVENT_MODIFY_QUEUE_ERROR
+ eng "Internal scheduler error %d"
+ ger "Interner Scheduler-Fehler %d"
+ER_EVENT_SET_VAR_ERROR
+ eng "Error during starting/stopping of the scheduler. Error code %u"
+ ger "Fehler während des Startens oder Anhalten des Schedulers. Fehlercode %u"
+ER_PARTITION_MERGE_ERROR
+ eng "Engine cannot be used in partitioned tables"
+ ger "Engine kann in partitionierten Tabellen nicht verwendet werden"
+ swe "Engine inte användas i en partitionerad tabell"
+ER_CANT_ACTIVATE_LOG
+ eng "Cannot activate '%-.64s' log"
+ ger "Kann Logdatei '%-.64s' nicht aktivieren"
+ER_RBR_NOT_AVAILABLE
+ eng "The server was not built with row-based replication"
+ER_BASE64_DECODE_ERROR
+ eng "Decoding of base64 string failed"
+ swe "Avkodning av base64 sträng misslyckades"
+ ger "Der Server hat keine zeilenbasierte Replikation"
+ER_EVENT_RECURSION_FORBIDDEN
+ eng "Recursion of EVENT DDL statements is forbidden when body is present"
+ ger "Rekursivität von EVENT-DDL-Anweisungen ist unzulässig wenn ein Hauptteil (Body) existiert"
+ER_EVENTS_DB_ERROR
+ eng "Cannot proceed because system tables used by Event Scheduler were found damaged at server start"
+ ger "Kann nicht weitermachen, weil die Tabellen, die von Events verwendet werden, beim Serverstart als beschädigt markiert wurden"
+ER_ONLY_INTEGERS_ALLOWED
+ eng "Only integers allowed as number here"
+ ger "An dieser Stelle sind nur Ganzzahlen zulässig"
+ER_UNSUPORTED_LOG_ENGINE
+ eng "This storage engine cannot be used for log tables""
+ ger "Diese Speicher-Engine kann für Logtabellen nicht verwendet werden"
+ER_BAD_LOG_STATEMENT
+ eng "You cannot '%s' a log table if logging is enabled"
+ ger "Sie können eine Logtabelle nicht '%s', wenn Loggen angeschaltet ist"
+ER_CANT_RENAME_LOG_TABLE
+ eng "Cannot rename '%s'. When logging enabled, rename to/from log table must rename two tables: the log table to an archive table and another table back to '%s'"
+ ger "Kann '%s' nicht umbenennen. Wenn Loggen angeschaltet ist, müssen beim Umbenennen zu/von einer Logtabelle zwei Tabellen angegeben werden: die Logtabelle zu einer Archivtabelle und eine weitere Tabelle zurück zu '%s'"
+ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT 42000
+ eng "Incorrect parameter count in the call to native function '%-.192s'"
+ ger "Falsche Anzahl von Parametern beim Aufruf der nativen Funktion '%-.192s'"
+ER_WRONG_PARAMETERS_TO_NATIVE_FCT 42000
+ eng "Incorrect parameters in the call to native function '%-.192s'"
+ ger "Falscher Parameter beim Aufruf der nativen Funktion '%-.192s'"
+ER_WRONG_PARAMETERS_TO_STORED_FCT 42000
+ eng "Incorrect parameters in the call to stored function '%-.192s'"
+ ger "Falsche Parameter beim Aufruf der gespeicherten Funktion '%-.192s'"
+ER_NATIVE_FCT_NAME_COLLISION
+ eng "This function '%-.192s' has the same name as a native function"
+ ger "Die Funktion '%-.192s' hat denselben Namen wie eine native Funktion"
+# When using this error message, use the ER_DUP_ENTRY error code. See, for
+# example, code in handler.cc.
+ER_DUP_ENTRY_WITH_KEY_NAME 23000 S1009
+ cze "Zvojen-Bý klÃ­Ä '%-.64s' (Äíslo klíÄe '%-.192s')"
+ dan "Ens værdier '%-.64s' for indeks '%-.192s'"
+ nla "Dubbele ingang '%-.64s' voor zoeksleutel '%-.192s'"
+ eng "Duplicate entry '%-.64s' for key '%-.192s'"
+ jps "'%-.64s' 㯠key '%-.192s' ã«ãŠã„ã¦é‡è¤‡ã—ã¦ã„ã¾ã™",
+ est "Kattuv väärtus '%-.64s' võtmele '%-.192s'"
+ fre "Duplicata du champ '%-.64s' pour la clef '%-.192s'"
+ ger "Doppelter Eintrag '%-.64s' für Schlüssel '%-.192s'"
+ greek "Διπλή εγγÏαφή '%-.64s' για το κλειδί '%-.192s'"
+ hun "Duplikalt bejegyzes '%-.64s' a '%-.192s' kulcs szerint."
+ ita "Valore duplicato '%-.64s' per la chiave '%-.192s'"
+ jpn "'%-.64s' 㯠key '%-.192s' ã«ãŠã„ã¦é‡è¤‡ã—ã¦ã„ã¾ã™"
+ kor "ì¤‘ë³µëœ ìž…ë ¥ ê°’ '%-.64s': key '%-.192s'"
+ nor "Like verdier '%-.64s' for nøkkel '%-.192s'"
+ norwegian-ny "Like verdiar '%-.64s' for nykkel '%-.192s'"
+ pol "Powtórzone wyst?pienie '%-.64s' dla klucza '%-.192s'"
+ por "Entrada '%-.64s' duplicada para a chave '%-.192s'"
+ rum "Cimpul '%-.64s' e duplicat pentru cheia '%-.192s'"
+ rus "ДублирующаÑÑÑ Ð·Ð°Ð¿Ð¸ÑÑŒ '%-.64s' по ключу '%-.192s'"
+ serbian "Dupliran unos '%-.64s' za kljuÄ '%-.192s'"
+ slo "Opakovaný kÄ¾ÃºÄ '%-.64s' (Äíslo kľúÄa '%-.192s')"
+ spa "Entrada duplicada '%-.64s' para la clave '%-.192s'"
+ swe "Dubbel nyckel '%-.64s' för nyckel '%-.192s'"
+ ukr "Дублюючий Ð·Ð°Ð¿Ð¸Ñ '%-.64s' Ð´Ð»Ñ ÐºÐ»ÑŽÑ‡Ð° '%-.192s'"
+ER_BINLOG_PURGE_EMFILE
+ eng "Too many files opened, please execute the command again"
+ ger "Zu viele offene Dateien, bitte führen Sie den Befehl noch einmal aus"
+ER_EVENT_CANNOT_CREATE_IN_THE_PAST
+ eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation."
+ER_EVENT_CANNOT_ALTER_IN_THE_PAST
+ eng "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation."
+ER_SLAVE_INCIDENT
+ eng "The incident %s occured on the master. Message: %-.64s"
+ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT
+ eng "Table has no partition for some existing values"
+ER_BINLOG_UNSAFE_STATEMENT
+ eng "Statement may not be safe to log in statement format."
+ swe "Detta är inte säkert att logga i statement-format."
+ER_SLAVE_FATAL_ERROR
+ eng "Fatal error: %s"
+ER_SLAVE_RELAY_LOG_READ_FAILURE
+ eng "Relay log read failure: %s"
+ER_SLAVE_RELAY_LOG_WRITE_FAILURE
+ eng "Relay log write failure: %s"
+ER_SLAVE_CREATE_EVENT_FAILURE
+ eng "Failed to create %s"
+ER_SLAVE_MASTER_COM_FAILURE
+ eng "Master command %s failed: %s"
+ER_BINLOG_LOGGING_IMPOSSIBLE
+ eng "Binary logging not possible. Message: %s"
+
+ER_VIEW_NO_CREATION_CTX
+ eng "View `%-.64s`.`%-.64s` has no creation context"
+ER_VIEW_INVALID_CREATION_CTX
+ eng "Creation context of view `%-.64s`.`%-.64s' is invalid"
+
+ER_SR_INVALID_CREATION_CTX
+ eng "Creation context of stored routine `%-.64s`.`%-.64s` is invalid"
+
+ER_TRG_CORRUPTED_FILE
+ eng "Corrupted TRG file for table `%-.64s`.`%-.64s`"
+ER_TRG_NO_CREATION_CTX
+ eng "Triggers for table `%-.64s`.`%-.64s` have no creation context"
+ER_TRG_INVALID_CREATION_CTX
+ eng "Trigger creation context of table `%-.64s`.`%-.64s` is invalid"
+
+ER_EVENT_INVALID_CREATION_CTX
+ eng "Creation context of event `%-.64s`.`%-.64s` is invalid"
+
+ER_TRG_CANT_OPEN_TABLE
+ eng "Cannot open table for trigger `%-.64s`.`%-.64s`"
+
+ER_CANT_CREATE_SROUTINE
+ eng "Cannot create stored routine `%-.64s`. Check warnings"
+ER_SLAVE_AMBIGOUS_EXEC_MODE
+ eng "Ambiguous slave modes combination. %s"
+
+ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT
+ eng "The BINLOG statement of type `%s` was not preceded by a format description BINLOG statement."
+ER_SLAVE_CORRUPT_EVENT
+ eng "Corrupted replication event was detected"
+
+ER_LOAD_DATA_INVALID_COLUMN
+ eng "Invalid column reference (%-.64s) in LOAD DATA"
+
+ER_LOG_PURGE_NO_FILE
+ eng "Being purged log %s was not found"
+
+ER_XA_RBTIMEOUT XA106
+ eng "XA_RBTIMEOUT: Transaction branch was rolled back: took too long"
+
+ER_XA_RBDEADLOCK XA102
+ eng "XA_RBDEADLOCK: Transaction branch was rolled back: deadlock was detected"
+
+ER_NEED_REPREPARE
+ eng "Prepared statement needs to be re-prepared"
+
+ER_DELAYED_NOT_SUPPORTED
+ eng "DELAYED option not supported for table '%-.192s'"
+
+WARN_NO_MASTER_INFO
+ eng "The master info structure does not exist"
+
+WARN_OPTION_IGNORED
+ eng "<%-.64s> option ignored"
+
+WARN_PLUGIN_DELETE_BUILTIN
+ eng "Built-in plugins cannot be deleted"
+
+WARN_PLUGIN_BUSY
+ eng "Plugin is busy and will be uninstalled on shutdown"
+
+ER_VARIABLE_IS_READONLY
+ eng "%s variable '%s' is read-only. Use SET %s to assign the value"
+
+ER_WARN_ENGINE_TRANSACTION_ROLLBACK
+ eng "Storage engine %s does not support rollback for this statement. Transaction rolled back and must be restarted"
+
+ER_SLAVE_HEARTBEAT_FAILURE
+ eng "Unexpected master's heartbeat data: %s"
+ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE
+ eng "The requested value for the heartbeat period %s %s"
+
+ER_NDB_REPLICATION_SCHEMA_ERROR
+ eng "Bad schema for mysql.ndb_replication table. Message: %-.64s"
+ER_CONFLICT_FN_PARSE_ERROR
+ eng "Error in parsing conflict function. Message: %-.64s"
+ER_EXCEPTIONS_WRITE_ERROR
+ eng "Write to exceptions table failed. Message: %-.128s""
+
+ER_TOO_LONG_TABLE_COMMENT
+ eng "Comment for table '%-.64s' is too long (max = %lu)"
+ por "Comentário para a tabela '%-.64s' é longo demais (max = %lu)"
+
+ER_TOO_LONG_FIELD_COMMENT
+ eng "Comment for field '%-.64s' is too long (max = %lu)"
+ por "Comentário para o campo '%-.64s' é longo demais (max = %lu)"
+
+ER_FUNC_INEXISTENT_NAME_COLLISION 42000
+ eng "FUNCTION %s does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual"
+
+# When updating these, please update EXPLAIN_FILENAME_MAX_EXTRA_LENGTH in
+# mysql_priv.h with the new maximal additional length for explain_filename.
+ER_DATABASE_NAME
+ eng "Database"
+ swe "Databas"
+ER_TABLE_NAME
+ eng "Table"
+ swe "Tabell"
+ER_PARTITION_NAME
+ eng "Partition"
+ swe "Partition"
+ER_SUBPARTITION_NAME
+ eng "Subpartition"
+ swe "Subpartition"
+ER_TEMPORARY_NAME
+ eng "Temporary"
+ swe "Temporär"
+ER_RENAMED_NAME
+ eng "Renamed"
+ swe "Namnändrad"
+ER_TOO_MANY_CONCURRENT_TRXS
+ eng "Too many active concurrent transactions"
+
+WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED
+ eng "Non-ASCII separator arguments are not fully supported"
+
+ER_DEBUG_SYNC_TIMEOUT
+ eng "debug sync point wait timed out"
+ ger "Debug Sync Point Wartezeit überschritten"
+ER_DEBUG_SYNC_HIT_LIMIT
+ eng "debug sync point hit limit reached"
+ ger "Debug Sync Point Hit Limit erreicht"
+
+ER_DUP_SIGNAL_SET 42000
+ eng "Duplicate condition information item '%s'"
+
+# Note that the SQLSTATE is not 01000, it is provided by SIGNAL/RESIGNAL
+ER_SIGNAL_WARN 01000
+ eng "Unhandled user-defined warning condition"
+
+# Note that the SQLSTATE is not 02000, it is provided by SIGNAL/RESIGNAL
+ER_SIGNAL_NOT_FOUND 02000
+ eng "Unhandled user-defined not found condition"
+
+# Note that the SQLSTATE is not HY000, it is provided by SIGNAL/RESIGNAL
+ER_SIGNAL_EXCEPTION HY000
+ eng "Unhandled user-defined exception condition"
+
+ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER 0K000
+ eng "RESIGNAL when handler not active"
+
+ER_SIGNAL_BAD_CONDITION_TYPE
+ eng "SIGNAL/RESIGNAL can only use a CONDITION defined with SQLSTATE"
+
+WARN_COND_ITEM_TRUNCATED
+ eng "Data truncated for condition item '%s'"
+
+ER_COND_ITEM_TOO_LONG
+ eng "Data too long for condition item '%s'"
+
+ER_UNKNOWN_LOCALE
+ eng "Unknown locale: '%-.64s'"
+
+ER_SLAVE_IGNORE_SERVER_IDS
+ eng "The requested server id %d clashes with the slave startup option --replicate-same-server-id"
+ER_QUERY_CACHE_DISABLED
+ eng "Query cache is disabled; restart the server with query_cache_type=1 to enable it"
+ER_SAME_NAME_PARTITION_FIELD
+ eng "Duplicate partition field name '%-.192s'"
+ER_PARTITION_COLUMN_LIST_ERROR
+ eng "Inconsistency in usage of column lists for partitioning"
+ER_WRONG_TYPE_COLUMN_VALUE_ERROR
+ eng "Partition column values of incorrect type"
+ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR
+ eng "Too many fields in '%-.192s'"
+ER_MAXVALUE_IN_VALUES_IN
+ eng "Cannot use MAXVALUE as value in VALUES IN"
+ER_TOO_MANY_VALUES_ERROR
+ eng "Cannot have more than one value for this type of %-.64s partitioning"
+ER_ROW_SINGLE_PARTITION_FIELD_ERROR
+ eng "Row expressions in VALUES IN only allowed for multi-field column partitioning"
+ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD
+ eng "Field '%-.192s' is of a not allowed type for this type of partitioning"
+ER_PARTITION_FIELDS_TOO_LONG
+ eng "The total length of the partitioning fields is too large"
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index a17ad94ba82..034f987e0e7 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -4620,7 +4620,7 @@ ER_USER_LIMIT_REACHED 42000
swe "Användare '%-.64s' har överskridit '%s' (nuvarande värde: %ld)"
ER_SPECIFIC_ACCESS_DENIED_ERROR 42000
nla "Toegang geweigerd. U moet het %-.128s privilege hebben voor deze operatie"
- eng "Access denied; you need the %-.128s privilege for this operation"
+ eng "Access denied; you need (at least one of) the %-.128s privilege(s) for this operation"
ger "Kein Zugriff. Hierfür wird die Berechtigung %-.128s benötigt"
ita "Accesso non consentito. Serve il privilegio %-.128s per questa operazione"
por "Acesso negado. Você precisa o privilégio %-.128s para essa operação"
@@ -4879,13 +4879,7 @@ ER_ZLIB_Z_DATA_ERROR
por "ZLIB: Dados de entrada está corrupto"
spa "ZLIB: Dato de entrada fué corrompido para zlib"
ER_CUT_VALUE_GROUP_CONCAT
- eng "%d line(s) were cut by GROUP_CONCAT()"
- ger "%d Zeile(n) durch GROUP_CONCAT() abgeschnitten"
- nla "%d regel(s) door GROUP_CONCAT() ingekort"
- por "%d linha(s) foram cortada(s) por GROUP_CONCAT()"
- spa "%d línea(s) fue(fueron) cortadas por group_concat()"
- swe "%d rad(er) kapades av group_concat()"
- ukr "%d line(s) was(were) cut by group_concat()"
+ eng "Row %u was cut by GROUP_CONCAT()"
ER_WARN_TOO_FEW_RECORDS 01000
eng "Row %ld doesn't contain data for all columns"
ger "Zeile %ld enthält nicht für alle Felder Daten"
@@ -6213,3 +6207,55 @@ ER_DEBUG_SYNC_TIMEOUT
ER_DEBUG_SYNC_HIT_LIMIT
eng "debug sync point hit limit reached"
ger "Debug Sync Point Hit Limit erreicht"
+
+ER_DUP_SIGNAL_SET 42000
+ eng "Duplicate condition information item '%s'"
+
+# Note that the SQLSTATE is not 01000, it is provided by SIGNAL/RESIGNAL
+ER_SIGNAL_WARN 01000
+ eng "Unhandled user-defined warning condition"
+
+# Note that the SQLSTATE is not 02000, it is provided by SIGNAL/RESIGNAL
+ER_SIGNAL_NOT_FOUND 02000
+ eng "Unhandled user-defined not found condition"
+
+# Note that the SQLSTATE is not HY000, it is provided by SIGNAL/RESIGNAL
+ER_SIGNAL_EXCEPTION HY000
+ eng "Unhandled user-defined exception condition"
+
+ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER 0K000
+ eng "RESIGNAL when handler not active"
+
+ER_SIGNAL_BAD_CONDITION_TYPE
+ eng "SIGNAL/RESIGNAL can only use a CONDITION defined with SQLSTATE"
+
+WARN_COND_ITEM_TRUNCATED
+ eng "Data truncated for condition item '%s'"
+
+ER_COND_ITEM_TOO_LONG
+ eng "Data too long for condition item '%s'"
+
+ER_UNKNOWN_LOCALE
+ eng "Unknown locale: '%-.64s'"
+
+ER_SLAVE_IGNORE_SERVER_IDS
+ eng "The requested server id %d clashes with the slave startup option --replicate-same-server-id"
+
+ER_SAME_NAME_PARTITION_FIELD
+ eng "Duplicate partition field name '%-.192s'"
+ER_PARTITION_COLUMN_LIST_ERROR
+ eng "Inconsistency in usage of column lists for partitioning"
+ER_WRONG_TYPE_COLUMN_VALUE_ERROR
+ eng "Partition column values of incorrect type"
+ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR
+ eng "Too many fields in '%-.192s'"
+ER_MAXVALUE_IN_VALUES_IN
+ eng "Cannot use MAXVALUE as value in VALUES IN"
+ER_TOO_MANY_VALUES_ERROR
+ eng "Cannot have more than one value for this type of %-.64s partitioning"
+ER_ROW_SINGLE_PARTITION_FIELD_ERROR
+ eng "Row expressions in VALUES IN only allowed for multi-field column partitioning"
+ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD
+ eng "Field '%-.192s' is of a not allowed type for this type of partitioning"
+ER_PARTITION_FIELDS_TOO_LONG
+ eng "The total length of the partitioning fields is too large"
diff --git a/sql/slave.cc b/sql/slave.cc
index f6660e5a5c8..8be17860c61 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -40,6 +40,7 @@
#include <errmsg.h>
#include <mysqld_error.h>
#include <mysys_err.h>
+#include "rpl_handler.h"
#ifdef HAVE_REPLICATION
@@ -48,6 +49,10 @@
#define FLAGSTR(V,F) ((V)&(F)?#F" ":"")
#define MAX_SLAVE_RETRY_PAUSE 5
+/*
+ a parameter of sql_slave_killed() to defer the killed status
+*/
+#define SLAVE_WAIT_GROUP_DONE 60
bool use_slave_mask = 0;
MY_BITMAP slave_error_mask;
char slave_skip_error_names[SHOW_VAR_FUNC_BUFF_SIZE];
@@ -69,6 +74,8 @@ ulonglong relay_log_space_limit = 0;
int disconnect_slave_event_count = 0, abort_slave_event_count = 0;
int events_till_abort = -1;
+static pthread_key(Master_info*, RPL_MASTER_INFO);
+
enum enum_slave_reconnect_actions
{
SLAVE_RECON_ACT_REG= 0,
@@ -220,6 +227,7 @@ void unlock_slave_threads(Master_info* mi)
int init_slave()
{
DBUG_ENTER("init_slave");
+ int error= 0;
/*
This is called when mysqld starts. Before client connections are
@@ -231,7 +239,10 @@ int init_slave()
TODO: re-write this to interate through the list of files
for multi-master
*/
- active_mi= new Master_info;
+ active_mi= new Master_info(relay_log_recovery);
+
+ if (pthread_key_create(&RPL_MASTER_INFO, NULL))
+ goto err;
/*
If --slave-skip-errors=... was not used, the string value for the
@@ -250,6 +261,7 @@ int init_slave()
if (!active_mi)
{
sql_print_error("Failed to allocate memory for the master info structure");
+ error= 1;
goto err;
}
@@ -257,6 +269,7 @@ int init_slave()
!master_host, (SLAVE_IO | SLAVE_SQL)))
{
sql_print_error("Failed to initialize the master info structure");
+ error= 1;
goto err;
}
@@ -275,18 +288,69 @@ int init_slave()
SLAVE_IO | SLAVE_SQL))
{
sql_print_error("Failed to create slave threads");
+ error= 1;
goto err;
}
}
- pthread_mutex_unlock(&LOCK_active_mi);
- DBUG_RETURN(0);
err:
pthread_mutex_unlock(&LOCK_active_mi);
- DBUG_RETURN(1);
+ DBUG_RETURN(error);
}
+/*
+ Updates the master info based on the information stored in the
+ relay info and ignores relay logs previously retrieved by the IO
+ thread, which thus starts fetching again based on to the
+ group_master_log_pos and group_master_log_name. Eventually, the old
+ relay logs will be purged by the normal purge mechanism.
+
+ In the feature, we should improve this routine in order to avoid throwing
+ away logs that are safely stored in the disk. Note also that this recovery
+ routine relies on the correctness of the relay-log.info and only tolerates
+ coordinate problems in master.info.
+
+ In this function, there is no need for a mutex as the caller
+ (i.e. init_slave) already has one acquired.
+
+ Specifically, the following structures are updated:
+
+ 1 - mi->master_log_pos <-- rli->group_master_log_pos
+ 2 - mi->master_log_name <-- rli->group_master_log_name
+ 3 - It moves the relay log to the new relay log file, by
+ rli->group_relay_log_pos <-- BIN_LOG_HEADER_SIZE;
+ rli->event_relay_log_pos <-- BIN_LOG_HEADER_SIZE;
+ rli->group_relay_log_name <-- rli->relay_log.get_log_fname();
+ rli->event_relay_log_name <-- rli->relay_log.get_log_fname();
+
+ If there is an error, it returns (1), otherwise returns (0).
+ */
+int init_recovery(Master_info* mi, const char** errmsg)
+{
+ DBUG_ENTER("init_recovery");
+
+ Relay_log_info *rli= &mi->rli;
+ if (rli->group_master_log_name[0])
+ {
+ mi->master_log_pos= max(BIN_LOG_HEADER_SIZE,
+ rli->group_master_log_pos);
+ strmake(mi->master_log_name, rli->group_master_log_name,
+ sizeof(mi->master_log_name)-1);
+
+ sql_print_warning("Recovery from master pos %ld and file %s.",
+ (ulong) mi->master_log_pos, mi->master_log_name);
+
+ strmake(rli->group_relay_log_name, rli->relay_log.get_log_fname(),
+ sizeof(rli->group_relay_log_name)-1);
+ strmake(rli->event_relay_log_name, rli->relay_log.get_log_fname(),
+ sizeof(mi->rli.event_relay_log_name)-1);
+
+ rli->group_relay_log_pos= rli->event_relay_log_pos= BIN_LOG_HEADER_SIZE;
+ }
+ DBUG_RETURN(0);
+}
+
/**
Convert slave skip errors bitmap into a printable string.
*/
@@ -519,7 +583,7 @@ terminate_slave_thread(THD *thd,
EINVAL: invalid signal number (can't happen)
ESRCH: thread already killed (can happen, should be ignored)
*/
- IF_DBUG(int err= ) pthread_kill(thd->real_id, thr_client_alarm);
+ int err __attribute__((unused))= pthread_kill(thd->real_id, thr_client_alarm);
DBUG_ASSERT(err != EINVAL);
#endif
thd->awake(THD::NOT_KILLED);
@@ -730,44 +794,92 @@ static bool io_slave_killed(THD* thd, Master_info* mi)
DBUG_RETURN(mi->abort_slave || abort_loop || thd->killed);
}
+/**
+ The function analyzes a possible killed status and makes
+ a decision whether to accept it or not.
+ Normally upon accepting the sql thread goes to shutdown.
+ In the event of deffering decision @rli->last_event_start_time waiting
+ timer is set to force the killed status be accepted upon its expiration.
+
+ @param thd pointer to a THD instance
+ @param rli pointer to Relay_log_info instance
+ @return TRUE the killed status is recognized, FALSE a possible killed
+ status is deferred.
+*/
static bool sql_slave_killed(THD* thd, Relay_log_info* rli)
{
+ bool ret= FALSE;
DBUG_ENTER("sql_slave_killed");
DBUG_ASSERT(rli->sql_thd == thd);
DBUG_ASSERT(rli->slave_running == 1);// tracking buffer overrun
if (abort_loop || thd->killed || rli->abort_slave)
{
- if (rli->abort_slave && rli->is_in_group() &&
- thd->transaction.all.modified_non_trans_table)
- DBUG_RETURN(0);
- /*
- If we are in an unsafe situation (stopping could corrupt replication),
- we give one minute to the slave SQL thread of grace before really
- terminating, in the hope that it will be able to read more events and
- the unsafe situation will soon be left. Note that this one minute starts
- from the last time anything happened in the slave SQL thread. So it's
- really one minute of idleness, we don't timeout if the slave SQL thread
- is actively working.
- */
- if (rli->last_event_start_time == 0)
- DBUG_RETURN(1);
- DBUG_PRINT("info", ("Slave SQL thread is in an unsafe situation, giving "
- "it some grace period"));
- if (difftime(time(0), rli->last_event_start_time) > 60)
+ if (thd->transaction.all.modified_non_trans_table && rli->is_in_group())
{
- rli->report(ERROR_LEVEL, 0,
- "SQL thread had to stop in an unsafe situation, in "
- "the middle of applying updates to a "
- "non-transactional table without any primary key. "
- "There is a risk of duplicate updates when the slave "
- "SQL thread is restarted. Please check your tables' "
- "contents after restart.");
- DBUG_RETURN(1);
+ char msg_stopped[]=
+ "... The slave SQL is stopped, leaving the current group "
+ "of events unfinished with a non-transaction table changed. "
+ "If the group consists solely of Row-based events, you can try "
+ "restarting the slave with --slave-exec-mode=IDEMPOTENT, which "
+ "ignores duplicate key, key not found, and similar errors (see "
+ "documentation for details).";
+
+ if (rli->abort_slave)
+ {
+ DBUG_PRINT("info", ("Slave SQL thread is being stopped in the middle of"
+ " a group having updated a non-trans table, giving"
+ " it some grace period"));
+
+ /*
+ Slave sql thread shutdown in face of unfinished group modified
+ Non-trans table is handled via a timer. The slave may eventually
+ give out to complete the current group and in that case there
+ might be issues at consequent slave restart, see the error message.
+ WL#2975 offers a robust solution requiring to store the last exectuted
+ event's coordinates along with the group's coordianates
+ instead of waiting with @c last_event_start_time the timer.
+ */
+
+ if (rli->last_event_start_time == 0)
+ rli->last_event_start_time= my_time(0);
+ ret= difftime(my_time(0), rli->last_event_start_time) <=
+ SLAVE_WAIT_GROUP_DONE ? FALSE : TRUE;
+
+ DBUG_EXECUTE_IF("stop_slave_middle_group",
+ DBUG_EXECUTE_IF("incomplete_group_in_relay_log",
+ ret= TRUE;);); // time is over
+
+ if (ret == 0)
+ {
+ rli->report(WARNING_LEVEL, 0,
+ "slave SQL thread is being stopped in the middle "
+ "of applying of a group having updated a non-transaction "
+ "table; waiting for the group completion ... ");
+ }
+ else
+ {
+ rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ ER(ER_SLAVE_FATAL_ERROR), msg_stopped);
+ }
+ }
+ else
+ {
+ ret= TRUE;
+ rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, ER(ER_SLAVE_FATAL_ERROR),
+ msg_stopped);
+ }
+ }
+ else
+ {
+ ret= TRUE;
}
}
- DBUG_RETURN(0);
+ if (ret)
+ rli->last_event_start_time= 0;
+
+ DBUG_RETURN(ret);
}
@@ -860,6 +972,126 @@ int init_intvar_from_file(int* var, IO_CACHE* f, int default_val)
DBUG_RETURN(1);
}
+int init_floatvar_from_file(float* var, IO_CACHE* f, float default_val)
+{
+ char buf[16];
+ DBUG_ENTER("init_floatvar_from_file");
+
+
+ if (my_b_gets(f, buf, sizeof(buf)))
+ {
+ if (sscanf(buf, "%f", var) != 1)
+ DBUG_RETURN(1);
+ else
+ DBUG_RETURN(0);
+ }
+ else if (default_val != 0.0)
+ {
+ *var = default_val;
+ DBUG_RETURN(0);
+ }
+ DBUG_RETURN(1);
+}
+
+
+/**
+ A master info read method
+
+ This function is called from @c init_master_info() along with
+ relatives to restore some of @c active_mi members.
+ Particularly, this function is responsible for restoring
+ IGNORE_SERVER_IDS list of servers whose events the slave is
+ going to ignore (to not log them in the relay log).
+ Items being read are supposed to be decimal output of values of a
+ type shorter or equal of @c long and separated by the single space.
+
+ @param arr @c DYNAMIC_ARRAY pointer to storage for servers id
+ @param f @c IO_CACHE pointer to the source file
+
+ @retval 0 All OK
+ @retval non-zero An error
+*/
+
+int init_dynarray_intvar_from_file(DYNAMIC_ARRAY* arr, IO_CACHE* f)
+{
+ int ret= 0;
+ char buf[16 * (sizeof(long)*4 + 1)]; // static buffer to use most of times
+ char *buf_act= buf; // actual buffer can be dynamic if static is short
+ char *token, *last;
+ uint num_items; // number of items of `arr'
+ size_t read_size;
+ DBUG_ENTER("init_dynarray_intvar_from_file");
+
+ if ((read_size= my_b_gets(f, buf_act, sizeof(buf))) == 0)
+ {
+ return 0; // no line in master.info
+ }
+ if (read_size + 1 == sizeof(buf) && buf[sizeof(buf) - 2] != '\n')
+ {
+ /*
+ short read happend; allocate sufficient memory and make the 2nd read
+ */
+ char buf_work[(sizeof(long)*3 + 1)*16];
+ memcpy(buf_work, buf, sizeof(buf_work));
+ num_items= atoi(strtok_r(buf_work, " ", &last));
+ size_t snd_size;
+ /*
+ max size lower bound approximate estimation bases on the formula:
+ (the items number + items themselves) *
+ (decimal size + space) - 1 + `\n' + '\0'
+ */
+ size_t max_size= (1 + num_items) * (sizeof(long)*3 + 1) + 1;
+ buf_act= (char*) my_malloc(max_size, MYF(MY_WME));
+ memcpy(buf_act, buf, read_size);
+ snd_size= my_b_gets(f, buf_act + read_size, max_size - read_size);
+ if (snd_size == 0 ||
+ (snd_size + 1 == max_size - read_size) && buf[max_size - 2] != '\n')
+ {
+ /*
+ failure to make the 2nd read or short read again
+ */
+ ret= 1;
+ goto err;
+ }
+ }
+ token= strtok_r(buf_act, " ", &last);
+ if (token == NULL)
+ {
+ ret= 1;
+ goto err;
+ }
+ num_items= atoi(token);
+ for (uint i=0; i < num_items; i++)
+ {
+ token= strtok_r(NULL, " ", &last);
+ if (token == NULL)
+ {
+ ret= 1;
+ goto err;
+ }
+ else
+ {
+ ulong val= atol(token);
+ insert_dynamic(arr, (uchar *) &val);
+ }
+ }
+err:
+ if (buf_act != buf)
+ my_free(buf_act, MYF(0));
+ DBUG_RETURN(ret);
+}
+
+
+static bool check_io_slave_killed(THD *thd, Master_info *mi, const char *info)
+{
+ if (io_slave_killed(thd, mi))
+ {
+ if (info && global_system_variables.log_warnings)
+ sql_print_information("%s", info);
+ return TRUE;
+ }
+ return FALSE;
+}
/*
Check if the error is caused by network.
@@ -1028,7 +1260,7 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
(master_res= mysql_store_result(mysql)) &&
(master_row= mysql_fetch_row(master_res)))
{
- if ((::server_id == strtoul(master_row[1], 0, 10)) &&
+ if ((::server_id == (mi->master_id= strtoul(master_row[1], 0, 10))) &&
!mi->rli.replicate_same_server_id)
{
errmsg= "The slave I/O thread stops because master and slave have equal \
@@ -1066,6 +1298,13 @@ maybe it is a *VERY OLD MASTER*.");
mysql_free_result(master_res);
master_res= NULL;
}
+ if (mi->master_id == 0 && mi->ignore_server_ids.elements > 0)
+ {
+ errmsg= "Slave configured with server id filtering could not detect the master server id.";
+ err_code= ER_SLAVE_FATAL_ERROR;
+ sprintf(err_buff, ER(err_code), errmsg);
+ goto err;
+ }
/*
Check that the master's global character_set_server and ours are the same.
@@ -1189,6 +1428,31 @@ when it try to get the value of TIME_ZONE global variable from master.";
}
}
+ if (mi->heartbeat_period != 0.0)
+ {
+ char llbuf[22];
+ const char query_format[]= "SET @master_heartbeat_period= %s";
+ char query[sizeof(query_format) - 2 + sizeof(llbuf)];
+ /*
+ the period is an ulonglong of nano-secs.
+ */
+ llstr((ulonglong) (mi->heartbeat_period*1000000000UL), llbuf);
+ my_sprintf(query, (query, query_format, llbuf));
+
+ if (mysql_real_query(mysql, query, strlen(query))
+ && !check_io_slave_killed(mi->io_thd, mi, NULL))
+ {
+ errmsg= "The slave I/O thread stops because SET @master_heartbeat_period "
+ "on master failed.";
+ err_code= ER_SLAVE_FATAL_ERROR;
+ sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql));
+ mysql_free_result(mysql_store_result(mysql));
+ goto err;
+ }
+ mysql_free_result(mysql_store_result(mysql));
+ }
+
+
err:
if (errmsg)
{
@@ -1274,7 +1538,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
else
{
/* Clear the OK result of mysql_rm_table(). */
- thd->main_da.reset_diagnostics_area();
+ thd->stmt_da->reset_diagnostics_area();
}
}
@@ -1298,7 +1562,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
goto err; // mysql_parse took care of the error send
thd_proc_info(thd, "Opening master dump table");
- thd->main_da.reset_diagnostics_area(); /* cleanup from CREATE_TABLE */
+ thd->stmt_da->reset_diagnostics_area(); /* cleanup from CREATE_TABLE */
/*
Note: If this function starts to fail for MERGE tables,
change the next two lines to these:
@@ -1605,8 +1869,12 @@ bool show_master_info(THD* thd, Master_info* mi)
field_list.push_back(new Item_empty_string("Last_IO_Error", 20));
field_list.push_back(new Item_return_int("Last_SQL_Errno", 4, MYSQL_TYPE_LONG));
field_list.push_back(new Item_empty_string("Last_SQL_Error", 20));
+ field_list.push_back(new Item_empty_string("Replicate_Ignore_Server_Ids",
+ FN_REFLEN));
+ field_list.push_back(new Item_return_int("Master_Server_Id", sizeof(ulong),
+ MYSQL_TYPE_LONG));
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
@@ -1640,7 +1908,8 @@ bool show_master_info(THD* thd, Master_info* mi)
protocol->store((ulonglong) mi->rli.group_relay_log_pos);
protocol->store(mi->rli.group_master_log_name, &my_charset_bin);
protocol->store(mi->slave_running == MYSQL_SLAVE_RUN_CONNECT ?
- "Yes" : "No", &my_charset_bin);
+ "Yes" : (mi->slave_running == MYSQL_SLAVE_RUN_NOT_CONNECT ?
+ "Connecting" : "No"), &my_charset_bin);
protocol->store(mi->rli.slave_running ? "Yes":"No", &my_charset_bin);
protocol->store(rpl_filter->get_do_db());
protocol->store(rpl_filter->get_ignore_db());
@@ -1726,6 +1995,32 @@ bool show_master_info(THD* thd, Master_info* mi)
protocol->store(mi->rli.last_error().number);
// Last_SQL_Error
protocol->store(mi->rli.last_error().message, &my_charset_bin);
+ // Replicate_Ignore_Server_Ids
+ {
+ char buff[FN_REFLEN];
+ ulong i, cur_len;
+ for (i= 0, buff[0]= 0, cur_len= 0;
+ i < mi->ignore_server_ids.elements; i++)
+ {
+ ulong s_id, slen;
+ char sbuff[FN_REFLEN];
+ get_dynamic(&mi->ignore_server_ids, (uchar*) &s_id, i);
+ slen= my_sprintf(sbuff, (sbuff, (i==0? "%lu" : ", %lu"), s_id));
+ if (cur_len + slen + 4 > FN_REFLEN)
+ {
+ /*
+ break the loop whenever remained space could not fit
+ ellipses on the next cycle
+ */
+ my_sprintf(buff + cur_len, (buff + cur_len, "..."));
+ break;
+ }
+ cur_len += my_sprintf(buff + cur_len, (buff + cur_len, "%s", sbuff));
+ }
+ protocol->store(buff, &my_charset_bin);
+ }
+ // Master_Server_id
+ protocol->store((uint32) mi->master_id);
pthread_mutex_unlock(&mi->rli.err_lock);
pthread_mutex_unlock(&mi->err_lock);
@@ -1869,17 +2164,22 @@ static int safe_sleep(THD* thd, int sec, CHECK_KILLED_FUNC thread_killed,
}
-static int request_dump(MYSQL* mysql, Master_info* mi,
- bool *suppress_warnings)
+static int request_dump(THD *thd, MYSQL* mysql, Master_info* mi,
+ bool *suppress_warnings)
{
uchar buf[FN_REFLEN + 10];
int len;
- int binlog_flags = 0; // for now
+ ushort binlog_flags = 0; // for now
char* logname = mi->master_log_name;
DBUG_ENTER("request_dump");
*suppress_warnings= FALSE;
+ if (RUN_HOOK(binlog_relay_io,
+ before_request_transmit,
+ (thd, mi, binlog_flags)))
+ DBUG_RETURN(1);
+
// TODO if big log files: Change next to int8store()
int4store(buf, (ulong) mi->master_log_pos);
int2store(buf + 4, binlog_flags);
@@ -2012,7 +2312,7 @@ static int has_temporary_error(THD *thd)
DBUG_ENTER("has_temporary_error");
DBUG_EXECUTE_IF("all_errors_are_temporary_errors",
- if (thd->main_da.is_error())
+ if (thd->stmt_da->is_error())
{
thd->clear_error();
my_error(ER_LOCK_DEADLOCK, MYF(0));
@@ -2031,20 +2331,21 @@ static int has_temporary_error(THD *thd)
currently, InnoDB deadlock detected by InnoDB or lock
wait timeout (innodb_lock_wait_timeout exceeded
*/
- if (thd->main_da.sql_errno() == ER_LOCK_DEADLOCK ||
- thd->main_da.sql_errno() == ER_LOCK_WAIT_TIMEOUT)
+ if (thd->stmt_da->sql_errno() == ER_LOCK_DEADLOCK ||
+ thd->stmt_da->sql_errno() == ER_LOCK_WAIT_TIMEOUT)
DBUG_RETURN(1);
#ifdef HAVE_NDB_BINLOG
/*
currently temporary error set in ndbcluster
*/
- List_iterator_fast<MYSQL_ERROR> it(thd->warn_list);
+ List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list());
MYSQL_ERROR *err;
while ((err= it++))
{
- DBUG_PRINT("info", ("has warning %d %s", err->code, err->msg));
- switch (err->code)
+ DBUG_PRINT("info", ("has condition %d %s", err->get_sql_errno(),
+ err->get_message_text()));
+ switch (err->get_sql_errno())
{
case ER_GET_TEMPORARY_ERRMSG:
DBUG_RETURN(1);
@@ -2274,6 +2575,27 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli)
delete ev;
DBUG_RETURN(1);
}
+
+ { /**
+ The following failure injecion works in cooperation with tests
+ setting @@global.debug= 'd,incomplete_group_in_relay_log'.
+ Xid or Commit events are not executed to force the slave sql
+ read hanging if the realy log does not have any more events.
+ */
+ DBUG_EXECUTE_IF("incomplete_group_in_relay_log",
+ if ((ev->get_type_code() == XID_EVENT) ||
+ ((ev->get_type_code() == QUERY_EVENT) &&
+ strcmp("COMMIT", ((Query_log_event *) ev)->query) == 0))
+ {
+ DBUG_ASSERT(thd->transaction.all.modified_non_trans_table);
+ rli->abort_slave= 1;
+ pthread_mutex_unlock(&rli->data_lock);
+ delete ev;
+ rli->inc_event_relay_log_pos();
+ DBUG_RETURN(0);
+ };);
+ }
+
exec_res= apply_event_and_update_pos(ev, thd, rli);
/*
@@ -2377,18 +2699,6 @@ on this slave.\
}
-static bool check_io_slave_killed(THD *thd, Master_info *mi, const char *info)
-{
- if (io_slave_killed(thd, mi))
- {
- if (info && global_system_variables.log_warnings)
- sql_print_information("%s", info);
- return TRUE;
- }
- return FALSE;
-}
-
-
/**
@brief Try to reconnect slave IO thread.
@@ -2528,6 +2838,16 @@ pthread_handler_t handle_slave_io(void *arg)
mi->master_log_name,
llstr(mi->master_log_pos,llbuff)));
+ /* This must be called before run any binlog_relay_io hooks */
+ my_pthread_setspecific_ptr(RPL_MASTER_INFO, mi);
+
+ if (RUN_HOOK(binlog_relay_io, thread_start, (thd, mi)))
+ {
+ mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ ER(ER_SLAVE_FATAL_ERROR), "Failed to run 'thread_start' hook");
+ goto err;
+ }
+
if (!(mi->mysql = mysql = mysql_init(NULL)))
{
mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
@@ -2617,7 +2937,7 @@ connected:
while (!io_slave_killed(thd,mi))
{
thd_proc_info(thd, "Requesting binlog dump");
- if (request_dump(mysql, mi, &suppress_warnings))
+ if (request_dump(thd, mysql, mi, &suppress_warnings))
{
sql_print_error("Failed on request_dump()");
if (check_io_slave_killed(thd, mi, "Slave I/O thread killed while \
@@ -2637,6 +2957,7 @@ requesting master dump") ||
goto err;
goto connected;
});
+ const char *event_buf;
DBUG_ASSERT(mi->last_error().number == 0);
while (!io_slave_killed(thd,mi))
@@ -2697,14 +3018,37 @@ Stopping slave I/O thread due to out-of-memory error from master");
retry_count=0; // ok event, reset retry counter
thd_proc_info(thd, "Queueing master event to the relay log");
- if (queue_event(mi,(const char*)mysql->net.read_pos + 1,
- event_len))
+ event_buf= (const char*)mysql->net.read_pos + 1;
+ if (RUN_HOOK(binlog_relay_io, after_read_event,
+ (thd, mi,(const char*)mysql->net.read_pos + 1,
+ event_len, &event_buf, &event_len)))
+ {
+ mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ ER(ER_SLAVE_FATAL_ERROR),
+ "Failed to run 'after_read_event' hook");
+ goto err;
+ }
+
+ /* XXX: 'synced' should be updated by queue_event to indicate
+ whether event has been synced to disk */
+ bool synced= 0;
+ if (queue_event(mi, event_buf, event_len))
{
mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE,
ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE),
"could not queue event from master");
goto err;
}
+
+ if (RUN_HOOK(binlog_relay_io, after_queue_event,
+ (thd, mi, event_buf, event_len, synced)))
+ {
+ mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ ER(ER_SLAVE_FATAL_ERROR),
+ "Failed to run 'after_queue_event' hook");
+ goto err;
+ }
+
if (flush_master_info(mi, 1))
{
sql_print_error("Failed to flush master info file");
@@ -2750,6 +3094,7 @@ err:
// print the current replication position
sql_print_information("Slave I/O thread exiting, read up to log '%s', position %s",
IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff));
+ RUN_HOOK(binlog_relay_io, thread_stop, (thd, mi));
thd->set_query(NULL, 0);
thd->reset_db(NULL, 0);
if (mysql)
@@ -2979,9 +3324,9 @@ log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME,
if (check_temp_dir(rli->slave_patternload_file))
{
- rli->report(ERROR_LEVEL, thd->main_da.sql_errno(),
+ rli->report(ERROR_LEVEL, thd->stmt_da->sql_errno(),
"Unable to use slave's temporary directory %s - %s",
- slave_load_tmpdir, thd->main_da.message());
+ slave_load_tmpdir, thd->stmt_da->message());
goto err;
}
@@ -2991,7 +3336,7 @@ log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME,
execute_init_command(thd, &sys_init_slave, &LOCK_sys_init_slave);
if (thd->is_slave_error)
{
- rli->report(ERROR_LEVEL, thd->main_da.sql_errno(),
+ rli->report(ERROR_LEVEL, thd->stmt_da->sql_errno(),
"Slave SQL thread aborted. Can't execute init_slave query");
goto err;
}
@@ -3035,20 +3380,20 @@ log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME,
if (thd->is_error())
{
- char const *const errmsg= thd->main_da.message();
+ char const *const errmsg= thd->stmt_da->message();
DBUG_PRINT("info",
- ("thd->main_da.sql_errno()=%d; rli->last_error.number=%d",
- thd->main_da.sql_errno(), last_errno));
+ ("thd->stmt_da->sql_errno()=%d; rli->last_error.number=%d",
+ thd->stmt_da->sql_errno(), last_errno));
if (last_errno == 0)
{
/*
This function is reporting an error which was not reported
while executing exec_relay_log_event().
*/
- rli->report(ERROR_LEVEL, thd->main_da.sql_errno(), "%s", errmsg);
+ rli->report(ERROR_LEVEL, thd->stmt_da->sql_errno(), "%s", errmsg);
}
- else if (last_errno != thd->main_da.sql_errno())
+ else if (last_errno != thd->stmt_da->sql_errno())
{
/*
* An error was reported while executing exec_relay_log_event()
@@ -3057,12 +3402,12 @@ log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME,
* what caused the problem.
*/
sql_print_error("Slave (additional info): %s Error_code: %d",
- errmsg, thd->main_da.sql_errno());
+ errmsg, thd->stmt_da->sql_errno());
}
}
/* Print any warnings issued */
- List_iterator_fast<MYSQL_ERROR> it(thd->warn_list);
+ List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list());
MYSQL_ERROR *err;
/*
Added controlled slave thread cancel for replication
@@ -3071,9 +3416,9 @@ log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME,
bool udf_error = false;
while ((err= it++))
{
- if (err->code == ER_CANT_OPEN_LIBRARY)
+ if (err->get_sql_errno() == ER_CANT_OPEN_LIBRARY)
udf_error = true;
- sql_print_warning("Slave: %s Error_code: %d",err->msg, err->code);
+ sql_print_warning("Slave: %s Error_code: %d", err->get_message_text(), err->get_sql_errno());
}
if (udf_error)
sql_print_error("Error loading user-defined library, slave SQL "
@@ -3555,9 +3900,11 @@ static int queue_old_event(Master_info *mi, const char *buf,
static int queue_event(Master_info* mi,const char* buf, ulong event_len)
{
int error= 0;
+ String error_msg;
ulong inc_pos;
Relay_log_info *rli= &mi->rli;
pthread_mutex_t *log_lock= rli->relay_log.get_log_lock();
+ ulong s_id;
DBUG_ENTER("queue_event");
LINT_INIT(inc_pos);
@@ -3589,7 +3936,7 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
Rotate_log_event rev(buf,event_len,mi->rli.relay_log.description_event_for_queue);
if (unlikely(process_io_rotate(mi,&rev)))
{
- error= 1;
+ error= ER_SLAVE_RELAY_LOG_WRITE_FAILURE;
goto err;
}
/*
@@ -3616,7 +3963,7 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
Log_event::read_log_event(buf, event_len, &errmsg,
mi->rli.relay_log.description_event_for_queue)))
{
- error= 2;
+ error= ER_SLAVE_RELAY_LOG_WRITE_FAILURE;
goto err;
}
delete mi->rli.relay_log.description_event_for_queue;
@@ -3635,6 +3982,56 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
}
break;
+
+ case HEARTBEAT_LOG_EVENT:
+ {
+ /*
+ HB (heartbeat) cannot come before RL (Relay)
+ */
+ char llbuf[22];
+ Heartbeat_log_event hb(buf, event_len, mi->rli.relay_log.description_event_for_queue);
+ if (!hb.is_valid())
+ {
+ error= ER_SLAVE_HEARTBEAT_FAILURE;
+ error_msg.append(STRING_WITH_LEN("inconsistent heartbeat event content;"));
+ error_msg.append(STRING_WITH_LEN("the event's data: log_file_name "));
+ error_msg.append(hb.get_log_ident(), (uint) strlen(hb.get_log_ident()));
+ error_msg.append(STRING_WITH_LEN(" log_pos "));
+ llstr(hb.log_pos, llbuf);
+ error_msg.append(llbuf, strlen(llbuf));
+ goto err;
+ }
+ mi->received_heartbeats++;
+ /*
+ compare local and event's versions of log_file, log_pos.
+
+ Heartbeat is sent only after an event corresponding to the corrdinates
+ the heartbeat carries.
+ Slave can not have a difference in coordinates except in the only
+ special case when mi->master_log_name, master_log_pos have never
+ been updated by Rotate event i.e when slave does not have any history
+ with the master (and thereafter mi->master_log_pos is NULL).
+
+ TODO: handling `when' for SHOW SLAVE STATUS' snds behind
+ */
+ if ((memcmp(mi->master_log_name, hb.get_log_ident(), hb.get_ident_len())
+ && mi->master_log_name != NULL)
+ || mi->master_log_pos != hb.log_pos)
+ {
+ /* missed events of heartbeat from the past */
+ error= ER_SLAVE_HEARTBEAT_FAILURE;
+ error_msg.append(STRING_WITH_LEN("heartbeat is not compatible with local info;"));
+ error_msg.append(STRING_WITH_LEN("the event's data: log_file_name "));
+ error_msg.append(hb.get_log_ident(), (uint) strlen(hb.get_log_ident()));
+ error_msg.append(STRING_WITH_LEN(" log_pos "));
+ llstr(hb.log_pos, llbuf);
+ error_msg.append(llbuf, strlen(llbuf));
+ goto err;
+ }
+ goto skip_relay_logging;
+ }
+ break;
+
default:
inc_pos= event_len;
break;
@@ -3654,9 +4051,20 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
*/
pthread_mutex_lock(log_lock);
-
- if ((uint4korr(buf + SERVER_ID_OFFSET) == ::server_id) &&
- !mi->rli.replicate_same_server_id)
+ s_id= uint4korr(buf + SERVER_ID_OFFSET);
+ if ((s_id == ::server_id && !mi->rli.replicate_same_server_id) ||
+ /*
+ the following conjunction deals with IGNORE_SERVER_IDS, if set
+ If the master is on the ignore list, execution of
+ format description log events and rotate events is necessary.
+ */
+ (mi->ignore_server_ids.elements > 0 &&
+ mi->shall_ignore_server_id(s_id) &&
+ /* everything is filtered out from non-master */
+ (s_id != mi->master_id ||
+ /* for the master meta information is necessary */
+ buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT &&
+ buf[EVENT_TYPE_OFFSET] != ROTATE_EVENT)))
{
/*
Do not write it to the relay log.
@@ -3671,10 +4079,14 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
But events which were generated by this slave and which do not exist in
the master's binlog (i.e. Format_desc, Rotate & Stop) should not increment
mi->master_log_pos.
+ If the event is originated remotely and is being filtered out by
+ IGNORE_SERVER_IDS it increments mi->master_log_pos
+ as well as rli->group_relay_log_pos.
*/
- if (buf[EVENT_TYPE_OFFSET]!=FORMAT_DESCRIPTION_EVENT &&
- buf[EVENT_TYPE_OFFSET]!=ROTATE_EVENT &&
- buf[EVENT_TYPE_OFFSET]!=STOP_EVENT)
+ if (!(s_id == ::server_id && !mi->rli.replicate_same_server_id) ||
+ buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT &&
+ buf[EVENT_TYPE_OFFSET] != ROTATE_EVENT &&
+ buf[EVENT_TYPE_OFFSET] != STOP_EVENT)
{
mi->master_log_pos+= inc_pos;
memcpy(rli->ign_master_log_name_end, mi->master_log_name, FN_REFLEN);
@@ -3682,8 +4094,8 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
rli->ign_master_log_pos_end= mi->master_log_pos;
}
rli->relay_log.signal_update(); // the slave SQL thread needs to re-check
- DBUG_PRINT("info", ("master_log_pos: %lu, event originating from the same server, ignored",
- (ulong) mi->master_log_pos));
+ DBUG_PRINT("info", ("master_log_pos: %lu, event originating from %u server, ignored",
+ (ulong) mi->master_log_pos, uint4korr(buf + SERVER_ID_OFFSET)));
}
else
{
@@ -3695,15 +4107,23 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len)
rli->relay_log.harvest_bytes_written(&rli->log_space_total);
}
else
- error= 3;
+ {
+ error= ER_SLAVE_RELAY_LOG_WRITE_FAILURE;
+ }
rli->ign_master_log_name_end[0]= 0; // last event is not ignored
}
pthread_mutex_unlock(log_lock);
-
+skip_relay_logging:
+
err:
pthread_mutex_unlock(&mi->data_lock);
DBUG_PRINT("info", ("error: %d", error));
+ if (error)
+ mi->report(ERROR_LEVEL, error, ER(error),
+ (error == ER_SLAVE_RELAY_LOG_WRITE_FAILURE)?
+ "could not queue event from master" :
+ error_msg.ptr());
DBUG_RETURN(error);
}
@@ -3909,6 +4329,71 @@ static int safe_reconnect(THD* thd, MYSQL* mysql, Master_info* mi,
}
+MYSQL *rpl_connect_master(MYSQL *mysql)
+{
+ THD *thd= current_thd;
+ Master_info *mi= my_pthread_getspecific_ptr(Master_info*, RPL_MASTER_INFO);
+ if (!mi)
+ {
+ sql_print_error("'rpl_connect_master' must be called in slave I/O thread context.");
+ return NULL;
+ }
+
+ bool allocated= false;
+
+ if (!mysql)
+ {
+ if(!(mysql= mysql_init(NULL)))
+ {
+ sql_print_error("rpl_connect_master: failed in mysql_init()");
+ return NULL;
+ }
+ allocated= true;
+ }
+
+ /*
+ XXX: copied from connect_to_master, this function should not
+ change the slave status, so we cannot use connect_to_master
+ directly
+
+ TODO: make this part a seperate function to eliminate duplication
+ */
+ mysql_options(mysql, MYSQL_OPT_CONNECT_TIMEOUT, (char *) &slave_net_timeout);
+ mysql_options(mysql, MYSQL_OPT_READ_TIMEOUT, (char *) &slave_net_timeout);
+
+#ifdef HAVE_OPENSSL
+ if (mi->ssl)
+ {
+ mysql_ssl_set(mysql,
+ mi->ssl_key[0]?mi->ssl_key:0,
+ mi->ssl_cert[0]?mi->ssl_cert:0,
+ mi->ssl_ca[0]?mi->ssl_ca:0,
+ mi->ssl_capath[0]?mi->ssl_capath:0,
+ mi->ssl_cipher[0]?mi->ssl_cipher:0);
+ mysql_options(mysql, MYSQL_OPT_SSL_VERIFY_SERVER_CERT,
+ &mi->ssl_verify_server_cert);
+ }
+#endif
+
+ mysql_options(mysql, MYSQL_SET_CHARSET_NAME, default_charset_info->csname);
+ /* This one is not strictly needed but we have it here for completeness */
+ mysql_options(mysql, MYSQL_SET_CHARSET_DIR, (char *) charsets_dir);
+
+ if (io_slave_killed(thd, mi)
+ || !mysql_real_connect(mysql, mi->host, mi->user, mi->password, 0,
+ mi->port, 0, 0))
+ {
+ if (!io_slave_killed(thd, mi))
+ sql_print_error("rpl_connect_master: error connecting to master: %s (server_error: %d)",
+ mysql_error(mysql), mysql_errno(mysql));
+
+ if (allocated)
+ mysql_close(mysql); // this will free the object
+ return NULL;
+ }
+ return mysql;
+}
+
/*
Store the file and position where the execute-slave thread are in the
relay log.
@@ -3962,7 +4447,14 @@ bool flush_relay_log_info(Relay_log_info* rli)
error=1;
if (flush_io_cache(file))
error=1;
-
+ if (sync_relayloginfo_period &&
+ !error &&
+ ++(rli->sync_counter) >= sync_relayloginfo_period)
+ {
+ if (my_sync(rli->info_fd, MYF(MY_WME)))
+ error=1;
+ rli->sync_counter= 0;
+ }
/* Flushing the relay log is done by the slave I/O thread */
DBUG_RETURN(error);
}
@@ -4211,8 +4703,8 @@ static Log_event* next_event(Relay_log_info* rli)
*/
pthread_mutex_unlock(&rli->log_space_lock);
pthread_cond_broadcast(&rli->log_space_cond);
- // Note that wait_for_update unlocks lock_log !
- rli->relay_log.wait_for_update(rli->sql_thd, 1);
+ // Note that wait_for_update_relay_log unlocks lock_log !
+ rli->relay_log.wait_for_update_relay_log(rli->sql_thd);
// re-acquire data lock since we released it earlier
pthread_mutex_lock(&rli->data_lock);
rli->last_master_timestamp= save_timestamp;
@@ -4369,6 +4861,8 @@ void rotate_relay_log(Master_info* mi)
DBUG_ENTER("rotate_relay_log");
Relay_log_info* rli= &mi->rli;
+ DBUG_EXECUTE_IF("crash_before_rotate_relaylog", abort(););
+
/* We don't lock rli->run_lock. This would lead to deadlocks. */
pthread_mutex_lock(&mi->run_lock);
diff --git a/sql/slave.h b/sql/slave.h
index f356d28b626..eff0fa49f61 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -22,6 +22,17 @@
@file
*/
+
+/**
+ Some of defines are need in parser even though replication is not
+ compiled in (embedded).
+*/
+
+/**
+ The maximum is defined as (ULONG_MAX/1000) with 4 bytes ulong
+*/
+#define SLAVE_MAX_HEARTBEAT_PERIOD 4294967
+
#ifdef HAVE_REPLICATION
#include "log.h"
@@ -33,7 +44,6 @@
#define MAX_SLAVE_ERROR 2000
-
// Forward declarations
class Relay_log_info;
class Master_info;
@@ -134,6 +144,7 @@ extern ulonglong relay_log_space_limit;
#define SLAVE_FORCE_ALL 4
int init_slave();
+int init_recovery(Master_info* mi, const char** errmsg);
void init_slave_skip_errors(const char* arg);
bool flush_relay_log_info(Relay_log_info* rli);
int register_slave_on_master(MYSQL* mysql);
diff --git a/sql/sp.cc b/sql/sp.cc
index d3c5dfb96d0..b5f4aec4a61 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -158,7 +158,7 @@ TABLE_FIELD_TYPE proc_table_fields[MYSQL_PROC_FIELD_COUNT] =
},
{
{ C_STRING_WITH_LEN("comment") },
- { C_STRING_WITH_LEN("char(64)") },
+ { C_STRING_WITH_LEN("text") },
{ C_STRING_WITH_LEN("utf8") }
},
{
@@ -690,16 +690,24 @@ db_find_routine(THD *thd, int type, sp_name *name, sp_head **sphp)
struct Silence_deprecated_warning : public Internal_error_handler
{
public:
- virtual bool handle_error(uint sql_errno, const char *message,
- MYSQL_ERROR::enum_warning_level level,
- THD *thd);
+ virtual bool handle_condition(THD *thd,
+ uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl);
};
bool
-Silence_deprecated_warning::handle_error(uint sql_errno, const char *message,
- MYSQL_ERROR::enum_warning_level level,
- THD *thd)
+Silence_deprecated_warning::handle_condition(
+ THD *,
+ uint sql_errno,
+ const char*,
+ MYSQL_ERROR::enum_warning_level level,
+ const char*,
+ MYSQL_ERROR ** cond_hdl)
{
+ *cond_hdl= NULL;
if (sql_errno == ER_WARN_DEPRECATED_SYNTAX &&
level == MYSQL_ERROR::WARN_LEVEL_WARN)
return TRUE;
@@ -1504,7 +1512,7 @@ sp_exist_routines(THD *thd, TABLE_LIST *routines, bool any)
&thd->sp_proc_cache, FALSE) != NULL ||
sp_find_routine(thd, TYPE_ENUM_FUNCTION, name,
&thd->sp_func_cache, FALSE) != NULL;
- mysql_reset_errors(thd, TRUE);
+ thd->warning_info->clear_warning_info(thd->query_id);
if (sp_object_found)
{
if (any)
@@ -1660,11 +1668,11 @@ static bool add_used_routine(LEX *lex, Query_arena *arena,
const LEX_STRING *key,
TABLE_LIST *belong_to_view)
{
- hash_init_opt(&lex->sroutines, system_charset_info,
- Query_tables_list::START_SROUTINES_HASH_SIZE,
- 0, 0, sp_sroutine_key, 0, 0);
+ my_hash_init_opt(&lex->sroutines, system_charset_info,
+ Query_tables_list::START_SROUTINES_HASH_SIZE,
+ 0, 0, sp_sroutine_key, 0, 0);
- if (!hash_search(&lex->sroutines, (uchar *)key->str, key->length))
+ if (!my_hash_search(&lex->sroutines, (uchar *)key->str, key->length))
{
Sroutine_hash_entry *rn=
(Sroutine_hash_entry *)arena->alloc(sizeof(Sroutine_hash_entry) +
@@ -1730,7 +1738,7 @@ void sp_remove_not_own_routines(LEX *lex)
but we want to be more future-proof.
*/
next_rt= not_own_rt->next;
- hash_delete(&lex->sroutines, (uchar *)not_own_rt);
+ my_hash_delete(&lex->sroutines, (uchar *)not_own_rt);
}
*(Sroutine_hash_entry **)lex->sroutines_list_own_last= NULL;
@@ -1763,8 +1771,8 @@ bool sp_update_sp_used_routines(HASH *dst, HASH *src)
{
for (uint i=0 ; i < src->records ; i++)
{
- Sroutine_hash_entry *rt= (Sroutine_hash_entry *)hash_element(src, i);
- if (!hash_search(dst, (uchar *)rt->key.str, rt->key.length))
+ Sroutine_hash_entry *rt= (Sroutine_hash_entry *)my_hash_element(src, i);
+ if (!my_hash_search(dst, (uchar *)rt->key.str, rt->key.length))
{
if (my_hash_insert(dst, (uchar *)rt))
return TRUE;
@@ -1794,7 +1802,7 @@ sp_update_stmt_used_routines(THD *thd, LEX *lex, HASH *src,
{
for (uint i=0 ; i < src->records ; i++)
{
- Sroutine_hash_entry *rt= (Sroutine_hash_entry *)hash_element(src, i);
+ Sroutine_hash_entry *rt= (Sroutine_hash_entry *)my_hash_element(src, i);
(void)add_used_routine(lex, thd->stmt_arena, &rt->key, belong_to_view);
}
}
diff --git a/sql/sp_cache.cc b/sql/sp_cache.cc
index b8209a373a2..0da5e44b846 100644
--- a/sql/sp_cache.cc
+++ b/sql/sp_cache.cc
@@ -50,7 +50,8 @@ public:
inline sp_head *lookup(char *name, uint namelen)
{
- return (sp_head *)hash_search(&m_hashtable, (const uchar *)name, namelen);
+ return (sp_head *) my_hash_search(&m_hashtable, (const uchar *)name,
+ namelen);
}
#ifdef NOT_USED
@@ -261,15 +262,15 @@ sp_cache::sp_cache()
sp_cache::~sp_cache()
{
- hash_free(&m_hashtable);
+ my_hash_free(&m_hashtable);
}
void
sp_cache::init()
{
- hash_init(&m_hashtable, system_charset_info, 0, 0, 0,
- hash_get_key_for_sp_head, hash_free_sp_head, 0);
+ my_hash_init(&m_hashtable, system_charset_info, 0, 0, 0,
+ hash_get_key_for_sp_head, hash_free_sp_head, 0);
version= 0;
}
@@ -277,5 +278,5 @@ sp_cache::init()
void
sp_cache::cleanup()
{
- hash_free(&m_hashtable);
+ my_hash_free(&m_hashtable);
}
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 78539034677..7644cc8e92d 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -14,6 +14,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include "mysql_priv.h"
+#include "sql_prepare.h"
#include "probes_mysql.h"
#ifdef USE_PRAGMA_IMPLEMENTATION
#pragma implementation
@@ -175,9 +176,9 @@ sp_get_flags_for_command(LEX *lex)
case SQLCOM_SHOW_AUTHORS:
case SQLCOM_SHOW_BINLOGS:
case SQLCOM_SHOW_BINLOG_EVENTS:
+ case SQLCOM_SHOW_RELAYLOG_EVENTS:
case SQLCOM_SHOW_CHARSETS:
case SQLCOM_SHOW_COLLATIONS:
- case SQLCOM_SHOW_COLUMN_TYPES:
case SQLCOM_SHOW_CONTRIBUTORS:
case SQLCOM_SHOW_CREATE:
case SQLCOM_SHOW_CREATE_DB:
@@ -534,8 +535,9 @@ sp_head::sp_head()
m_backpatch.empty();
m_cont_backpatch.empty();
m_lex.empty();
- hash_init(&m_sptabs, system_charset_info, 0, 0, 0, sp_table_key, 0, 0);
- hash_init(&m_sroutines, system_charset_info, 0, 0, 0, sp_sroutine_key, 0, 0);
+ my_hash_init(&m_sptabs, system_charset_info, 0, 0, 0, sp_table_key, 0, 0);
+ my_hash_init(&m_sroutines, system_charset_info, 0, 0, 0, sp_sroutine_key,
+ 0, 0);
m_body_utf8.str= NULL;
m_body_utf8.length= 0;
@@ -784,8 +786,8 @@ sp_head::destroy()
m_thd->lex= lex;
}
- hash_free(&m_sptabs);
- hash_free(&m_sroutines);
+ my_hash_free(&m_sptabs);
+ my_hash_free(&m_sroutines);
DBUG_VOID_RETURN;
}
@@ -1086,6 +1088,7 @@ sp_head::execute(THD *thd)
Reprepare_observer *save_reprepare_observer= thd->m_reprepare_observer;
Object_creation_ctx *saved_creation_ctx;
+ Warning_info *saved_warning_info, warning_info(thd->warning_info->warn_id());
/* Use some extra margin for possible SP recursion and functions */
if (check_stack_overrun(thd, 8 * STACK_MIN_SIZE, (uchar*)&old_packet))
@@ -1134,6 +1137,11 @@ sp_head::execute(THD *thd)
thd->is_slave_error= 0;
old_arena= thd->stmt_arena;
+ /* Push a new warning information area. */
+ warning_info.append_warning_info(thd, thd->warning_info);
+ saved_warning_info= thd->warning_info;
+ thd->warning_info= &warning_info;
+
/*
Switch query context. This has to be done early as this is sometimes
allocated trough sql_alloc
@@ -1204,7 +1212,7 @@ sp_head::execute(THD *thd)
*/
thd->spcont->callers_arena= &backup_arena;
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
/* Discard the initial part of executing routines. */
thd->profiling.discard_current_query();
#endif
@@ -1213,7 +1221,7 @@ sp_head::execute(THD *thd)
sp_instr *i;
uint hip; // Handler ip
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
/*
Treat each "instr" of a routine as discrete unit that could be profiled.
Profiling only records information for segments of code that set the
@@ -1226,7 +1234,7 @@ sp_head::execute(THD *thd)
i = get_instr(ip); // Returns NULL when we're done.
if (i == NULL)
{
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
thd->profiling.discard_current_query();
#endif
break;
@@ -1281,31 +1289,35 @@ sp_head::execute(THD *thd)
*/
if (ctx)
{
- uint hf;
+ uint handler_index;
- switch (ctx->found_handler(&hip, &hf)) {
+ switch (ctx->found_handler(& hip, & handler_index)) {
case SP_HANDLER_NONE:
break;
case SP_HANDLER_CONTINUE:
thd->restore_active_arena(&execute_arena, &backup_arena);
thd->set_n_backup_active_arena(&execute_arena, &backup_arena);
ctx->push_hstack(i->get_cont_dest());
- // Fall through
+ /* Fall through */
default:
+ if (ctx->end_partial_result_set)
+ thd->protocol->end_partial_result_set(thd);
ip= hip;
err_status= FALSE;
ctx->clear_handler();
- ctx->enter_handler(hip);
+ ctx->enter_handler(hip, handler_index);
thd->clear_error();
thd->is_fatal_error= 0;
thd->killed= THD::NOT_KILLED;
thd->mysys_var->abort= 0;
continue;
}
+
+ ctx->end_partial_result_set= FALSE;
}
- } while (!err_status && !thd->killed);
+ } while (!err_status && !thd->killed && !thd->is_fatal_error);
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
thd->profiling.finish_current_query();
thd->profiling.start_new_query("tail end of routine");
#endif
@@ -1337,6 +1349,10 @@ sp_head::execute(THD *thd)
thd->stmt_arena= old_arena;
state= EXECUTED;
+ /* Restore the caller's original warning information area. */
+ saved_warning_info->merge_with_routine_info(thd, thd->warning_info);
+ thd->warning_info= saved_warning_info;
+
done:
DBUG_PRINT("info", ("err_status: %d killed: %d is_slave_error: %d report_error: %d",
err_status, thd->killed, thd->is_slave_error,
@@ -1946,15 +1962,19 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
}
}
- /*
- Okay, got values for all arguments. Close tables that might be used by
- arguments evaluation. If arguments evaluation required prelocking mode,
+ /*
+ Okay, got values for all arguments. Close tables that might be used by
+ arguments evaluation. If arguments evaluation required prelocking mode,
we'll leave it here.
*/
if (!thd->in_sub_stmt)
{
thd->lex->unit.cleanup();
- close_thread_tables(thd);
+
+ thd_proc_info(thd, "closing tables");
+ close_thread_tables(thd);
+ thd_proc_info(thd, 0);
+
thd->rollback_item_tree_changes();
}
@@ -2027,6 +2047,16 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
err_status= TRUE;
break;
}
+
+ Send_field *out_param_info= new (thd->mem_root) Send_field();
+ nctx->get_item(i)->make_field(out_param_info);
+ out_param_info->db_name= m_db.str;
+ out_param_info->table_name= m_name.str;
+ out_param_info->org_table_name= m_name.str;
+ out_param_info->col_name= spvar->name.str;
+ out_param_info->org_col_name= spvar->name.str;
+
+ srp->set_out_param_info(out_param_info);
}
}
@@ -2362,7 +2392,8 @@ bool check_show_routine_access(THD *thd, sp_head *sp, bool *full_access)
bzero((char*) &tables,sizeof(tables));
tables.db= (char*) "mysql";
tables.table_name= tables.alias= (char*) "proc";
- *full_access= (!check_table_access(thd, SELECT_ACL, &tables, 1, TRUE) ||
+ *full_access= (!check_table_access(thd, SELECT_ACL, &tables, FALSE,
+ 1, TRUE) ||
(!strcmp(sp->m_definer_user.str,
thd->security_ctx->priv_user) &&
!strcmp(sp->m_definer_host.str,
@@ -2445,7 +2476,7 @@ sp_head::show_create_routine(THD *thd, int type)
fields.push_back(new Item_empty_string("Database Collation",
MY_CS_NAME_SIZE));
- if (protocol->send_fields(&fields,
+ if (protocol->send_result_set_metadata(&fields,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
{
DBUG_RETURN(TRUE);
@@ -2537,7 +2568,8 @@ void sp_head::optimize()
else
{
if (src != dst)
- { // Move the instruction and update prev. jumps
+ {
+ /* Move the instruction and update prev. jumps */
sp_instr *ibp;
List_iterator_fast<sp_instr> li(bp);
@@ -2630,8 +2662,8 @@ sp_head::show_routine_code(THD *thd)
field_list.push_back(new Item_uint("Pos", 9));
// 1024 is for not to confuse old clients
field_list.push_back(new Item_empty_string("Instruction",
- max(buffer.length(), 1024)));
- if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS |
+ max(buffer.length(), 1024)));
+ if (protocol->send_result_set_metadata(&field_list, Protocol::SEND_NUM_ROWS |
Protocol::SEND_EOF))
DBUG_RETURN(1);
@@ -2807,7 +2839,7 @@ int sp_instr::exec_open_and_lock_tables(THD *thd, TABLE_LIST *tables)
Check whenever we have access to tables for this statement
and open and lock them before executing instructions core function.
*/
- if (check_table_access(thd, SELECT_ACL, tables, UINT_MAX, FALSE)
+ if (check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE)
|| open_and_lock_tables(thd, tables))
result= -1;
else
@@ -2843,7 +2875,7 @@ sp_instr_stmt::execute(THD *thd, uint *nextp)
query= thd->query();
query_length= thd->query_length();
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
/* This s-p instr is profilable and will be captured. */
thd->profiling.set_query_source(m_query.str, m_query.length);
#endif
@@ -2863,8 +2895,8 @@ sp_instr_stmt::execute(THD *thd, uint *nextp)
{
res= m_lex_keeper.reset_lex_and_exec_core(thd, nextp, FALSE, this);
- if (thd->main_da.is_eof())
- net_end_statement(thd);
+ if (thd->stmt_da->is_eof())
+ thd->protocol->end_statement();
query_cache_end_of_result(thd);
@@ -2877,7 +2909,7 @@ sp_instr_stmt::execute(THD *thd, uint *nextp)
thd->query_name_consts= 0;
if (!thd->is_error())
- thd->main_da.reset_diagnostics_area();
+ thd->stmt_da->reset_diagnostics_area();
}
DBUG_RETURN(res || thd->is_error());
}
@@ -3253,7 +3285,7 @@ sp_instr_hpush_jump::execute(THD *thd, uint *nextp)
sp_cond_type_t *p;
while ((p= li++))
- thd->spcont->push_handler(p, m_ip+1, m_type, m_frame);
+ thd->spcont->push_handler(p, m_ip+1, m_type);
*nextp= m_dest;
DBUG_RETURN(0);
@@ -3820,7 +3852,7 @@ sp_head::merge_table_list(THD *thd, TABLE_LIST *table, LEX *lex_for_tmp_check)
for (uint i= 0 ; i < m_sptabs.records ; i++)
{
- tab= (SP_TABLE *)hash_element(&m_sptabs, i);
+ tab= (SP_TABLE*) my_hash_element(&m_sptabs, i);
tab->query_lock_count= 0;
}
@@ -3854,8 +3886,8 @@ sp_head::merge_table_list(THD *thd, TABLE_LIST *table, LEX *lex_for_tmp_check)
(and therefore should not be prelocked). Otherwise we will erroneously
treat table with same name but with different alias as non-temporary.
*/
- if ((tab= (SP_TABLE *)hash_search(&m_sptabs, (uchar *)tname, tlen)) ||
- ((tab= (SP_TABLE *)hash_search(&m_sptabs, (uchar *)tname,
+ if ((tab= (SP_TABLE*) my_hash_search(&m_sptabs, (uchar *)tname, tlen)) ||
+ ((tab= (SP_TABLE*) my_hash_search(&m_sptabs, (uchar *)tname,
tlen - alen - 1)) &&
tab->temp))
{
@@ -3941,7 +3973,7 @@ sp_head::add_used_tables_to_table_list(THD *thd,
{
char *tab_buff, *key_buff;
TABLE_LIST *table;
- SP_TABLE *stab= (SP_TABLE *)hash_element(&m_sptabs, i);
+ SP_TABLE *stab= (SP_TABLE*) my_hash_element(&m_sptabs, i);
if (stab->temp)
continue;
diff --git a/sql/sp_pcontext.cc b/sql/sp_pcontext.cc
index 302faf3f681..31c307ebe74 100644
--- a/sql/sp_pcontext.cc
+++ b/sql/sp_pcontext.cc
@@ -51,7 +51,8 @@ sp_cond_check(LEX_STRING *sqlstate)
(c < 'A' || 'Z' < c))
return FALSE;
}
- if (strcmp(sqlstate->str, "00000") == 0)
+ /* SQLSTATE class '00' : completion condition */
+ if (strncmp(sqlstate->str, "00", 2) == 0)
return FALSE;
return TRUE;
}
diff --git a/sql/sp_pcontext.h b/sql/sp_pcontext.h
index 3145ba2fea4..75e55880e60 100644
--- a/sql/sp_pcontext.h
+++ b/sql/sp_pcontext.h
@@ -71,7 +71,7 @@ typedef struct sp_label
typedef struct sp_cond_type
{
enum { number, state, warning, notfound, exception } type;
- char sqlstate[6];
+ char sqlstate[SQLSTATE_LENGTH+1];
uint mysqlerr;
} sp_cond_type_t;
diff --git a/sql/sp_rcontext.cc b/sql/sp_rcontext.cc
index be8f705a53e..d966de03b4e 100644
--- a/sql/sp_rcontext.cc
+++ b/sql/sp_rcontext.cc
@@ -32,7 +32,8 @@
sp_rcontext::sp_rcontext(sp_pcontext *root_parsing_ctx,
Field *return_value_fld,
sp_rcontext *prev_runtime_ctx)
- :m_root_parsing_ctx(root_parsing_ctx),
+ :end_partial_result_set(FALSE),
+ m_root_parsing_ctx(root_parsing_ctx),
m_var_table(0),
m_var_items(0),
m_return_value_fld(return_value_fld),
@@ -68,21 +69,28 @@ sp_rcontext::~sp_rcontext()
bool sp_rcontext::init(THD *thd)
{
+ uint handler_count= m_root_parsing_ctx->max_handler_index();
+ uint i;
+
in_sub_stmt= thd->in_sub_stmt;
if (init_var_table(thd) || init_var_items())
return TRUE;
+ if (!(m_raised_conditions= new (thd->mem_root) MYSQL_ERROR[handler_count]))
+ return TRUE;
+
+ for (i= 0; i<handler_count; i++)
+ m_raised_conditions[i].init(thd->mem_root);
+
return
!(m_handler=
- (sp_handler_t*)thd->alloc(m_root_parsing_ctx->max_handler_index() *
- sizeof(sp_handler_t))) ||
+ (sp_handler_t*)thd->alloc(handler_count * sizeof(sp_handler_t))) ||
!(m_hstack=
- (uint*)thd->alloc(m_root_parsing_ctx->max_handler_index() *
- sizeof(uint))) ||
+ (uint*)thd->alloc(handler_count * sizeof(uint))) ||
!(m_in_handler=
- (uint*)thd->alloc(m_root_parsing_ctx->max_handler_index() *
- sizeof(uint))) ||
+ (sp_active_handler_t*)thd->alloc(handler_count *
+ sizeof(sp_active_handler_t))) ||
!(m_cstack=
(sp_cursor**)thd->alloc(m_root_parsing_ctx->max_cursor_index() *
sizeof(sp_cursor*))) ||
@@ -194,13 +202,19 @@ sp_rcontext::set_return_value(THD *thd, Item **return_value_item)
*/
bool
-sp_rcontext::find_handler(THD *thd, uint sql_errno,
- MYSQL_ERROR::enum_warning_level level)
+sp_rcontext::find_handler(THD *thd,
+ uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl)
{
if (m_hfound >= 0)
- return 1; // Already got one
+ {
+ *cond_hdl= NULL;
+ return TRUE; // Already got one
+ }
- const char *sqlstate= mysql_errno_to_sqlstate(sql_errno);
int i= m_hcount, found= -1;
/*
@@ -220,7 +234,7 @@ sp_rcontext::find_handler(THD *thd, uint sql_errno,
/* Check active handlers, to avoid invoking one recursively */
while (j--)
- if (m_in_handler[j] == m_handler[i].handler)
+ if (m_in_handler[j].ip == m_handler[i].handler)
break;
if (j >= 0)
continue; // Already executing this handler
@@ -264,10 +278,26 @@ sp_rcontext::find_handler(THD *thd, uint sql_errno,
*/
if (m_prev_runtime_ctx && IS_EXCEPTION_CONDITION(sqlstate) &&
level == MYSQL_ERROR::WARN_LEVEL_ERROR)
- return m_prev_runtime_ctx->find_handler(thd, sql_errno, level);
+ return m_prev_runtime_ctx->find_handler(thd,
+ sql_errno,
+ sqlstate,
+ level,
+ msg,
+ cond_hdl);
+ *cond_hdl= NULL;
return FALSE;
}
+
m_hfound= found;
+
+ MYSQL_ERROR *raised= NULL;
+ DBUG_ASSERT(m_hfound >= 0);
+ DBUG_ASSERT((uint) m_hfound < m_root_parsing_ctx->max_handler_index());
+ raised= & m_raised_conditions[m_hfound];
+ raised->clear();
+ raised->set(sql_errno, sqlstate, level, msg);
+
+ *cond_hdl= raised;
return TRUE;
}
@@ -293,9 +323,12 @@ sp_rcontext::find_handler(THD *thd, uint sql_errno,
FALSE if no handler was found.
*/
bool
-sp_rcontext::handle_error(uint sql_errno,
- MYSQL_ERROR::enum_warning_level level,
- THD *thd)
+sp_rcontext::handle_condition(THD *thd,
+ uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl)
{
MYSQL_ERROR::enum_warning_level elevated_level= level;
@@ -308,7 +341,7 @@ sp_rcontext::handle_error(uint sql_errno,
elevated_level= MYSQL_ERROR::WARN_LEVEL_ERROR;
}
- return find_handler(thd, sql_errno, elevated_level);
+ return find_handler(thd, sql_errno, sqlstate, elevated_level, msg, cond_hdl);
}
void
@@ -335,7 +368,7 @@ sp_rcontext::pop_cursors(uint count)
}
void
-sp_rcontext::push_handler(struct sp_cond_type *cond, uint h, int type, uint f)
+sp_rcontext::push_handler(struct sp_cond_type *cond, uint h, int type)
{
DBUG_ENTER("sp_rcontext::push_handler");
DBUG_ASSERT(m_hcount < m_root_parsing_ctx->max_handler_index());
@@ -343,7 +376,6 @@ sp_rcontext::push_handler(struct sp_cond_type *cond, uint h, int type, uint f)
m_handler[m_hcount].cond= cond;
m_handler[m_hcount].handler= h;
m_handler[m_hcount].type= type;
- m_handler[m_hcount].foffset= f;
m_hcount+= 1;
DBUG_PRINT("info", ("m_hcount: %d", m_hcount));
@@ -382,11 +414,13 @@ sp_rcontext::pop_hstack()
}
void
-sp_rcontext::enter_handler(int hid)
+sp_rcontext::enter_handler(uint hip, uint hindex)
{
DBUG_ENTER("sp_rcontext::enter_handler");
DBUG_ASSERT(m_ihsp < m_root_parsing_ctx->max_handler_index());
- m_in_handler[m_ihsp++]= hid;
+ m_in_handler[m_ihsp].ip= hip;
+ m_in_handler[m_ihsp].index= hindex;
+ m_ihsp++;
DBUG_PRINT("info", ("m_ihsp: %d", m_ihsp));
DBUG_VOID_RETURN;
}
@@ -396,11 +430,29 @@ sp_rcontext::exit_handler()
{
DBUG_ENTER("sp_rcontext::exit_handler");
DBUG_ASSERT(m_ihsp);
+ uint hindex= m_in_handler[m_ihsp-1].index;
+ m_raised_conditions[hindex].clear();
m_ihsp-= 1;
DBUG_PRINT("info", ("m_ihsp: %d", m_ihsp));
DBUG_VOID_RETURN;
}
+MYSQL_ERROR*
+sp_rcontext::raised_condition() const
+{
+ if (m_ihsp > 0)
+ {
+ uint hindex= m_in_handler[m_ihsp - 1].index;
+ MYSQL_ERROR *raised= & m_raised_conditions[hindex];
+ return raised;
+ }
+
+ if (m_prev_runtime_ctx)
+ return m_prev_runtime_ctx->raised_condition();
+
+ return NULL;
+}
+
int
sp_rcontext::set_variable(THD *thd, uint var_idx, Item **value)
diff --git a/sql/sp_rcontext.h b/sql/sp_rcontext.h
index 368a017da21..2af96cf64dd 100644
--- a/sql/sp_rcontext.h
+++ b/sql/sp_rcontext.h
@@ -34,12 +34,21 @@ class sp_instr_cpush;
typedef struct
{
+ /** Condition caught by this HANDLER. */
struct sp_cond_type *cond;
- uint handler; // Location of handler
+ /** Location (instruction pointer) of the handler code. */
+ uint handler;
+ /** Handler type (EXIT, CONTINUE). */
int type;
- uint foffset; // Frame offset for the handlers declare level
} sp_handler_t;
+typedef struct
+{
+ /** Instruction pointer of the active handler. */
+ uint ip;
+ /** Handler index of the active handler. */
+ uint index;
+} sp_active_handler_t;
/*
This class is a runtime context of a Stored Routine. It is used in an
@@ -75,6 +84,13 @@ class sp_rcontext : public Sql_alloc
*/
Query_arena *callers_arena;
+ /*
+ End a open result set before start executing a continue/exit
+ handler if one is found as otherwise the client will hang
+ due to a violation of the client/server protocol.
+ */
+ bool end_partial_result_set;
+
#ifndef DBUG_OFF
/*
The routine for which this runtime context is created. Used for checking
@@ -107,31 +123,41 @@ class sp_rcontext : public Sql_alloc
return m_return_value_set;
}
- void push_handler(struct sp_cond_type *cond, uint h, int type, uint f);
+ void push_handler(struct sp_cond_type *cond, uint h, int type);
void pop_handlers(uint count);
// Returns 1 if a handler was found, 0 otherwise.
bool
- find_handler(THD *thd, uint sql_errno,MYSQL_ERROR::enum_warning_level level);
+ find_handler(THD *thd,
+ uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl);
// If there is an error handler for this error, handle it and return TRUE.
bool
- handle_error(uint sql_errno,
- MYSQL_ERROR::enum_warning_level level,
- THD *thd);
+ handle_condition(THD *thd,
+ uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl);
// Returns handler type and sets *ip to location if one was found
inline int
- found_handler(uint *ip, uint *fp)
+ found_handler(uint *ip, uint *index)
{
if (m_hfound < 0)
return SP_HANDLER_NONE;
*ip= m_handler[m_hfound].handler;
- *fp= m_handler[m_hfound].foffset;
+ *index= m_hfound;
return m_handler[m_hfound].type;
}
+ MYSQL_ERROR* raised_condition() const;
+
// Returns true if we found a handler in this context
inline bool
found_handler_here()
@@ -150,7 +176,12 @@ class sp_rcontext : public Sql_alloc
uint pop_hstack();
- void enter_handler(int hid);
+ /**
+ Enter a SQL exception handler.
+ @param hip the handler instruction pointer
+ @param index the handler index
+ */
+ void enter_handler(uint hip, uint index);
void exit_handler();
@@ -214,10 +245,18 @@ private:
bool in_sub_stmt;
sp_handler_t *m_handler; // Visible handlers
+
+ /**
+ SQL conditions caught by each handler.
+ This is an array indexed by handler index.
+ */
+ MYSQL_ERROR *m_raised_conditions;
+
uint m_hcount; // Stack pointer for m_handler
uint *m_hstack; // Return stack for continue handlers
uint m_hsp; // Stack pointer for m_hstack
- uint *m_in_handler; // Active handler, for recursion check
+ /** Active handler stack. */
+ sp_active_handler_t *m_in_handler;
uint m_ihsp; // Stack pointer for m_in_handler
int m_hfound; // Set by find_handler; -1 if not found
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 77c72066429..8eb429e9ffc 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -262,8 +262,8 @@ my_bool acl_init(bool dont_read_acl_tables)
DBUG_ENTER("acl_init");
acl_cache= new hash_filo(ACL_CACHE_SIZE, 0, 0,
- (hash_get_key) acl_entry_get_key,
- (hash_free_key) free,
+ (my_hash_get_key) acl_entry_get_key,
+ (my_hash_free_key) free,
&my_charset_utf8_bin);
if (dont_read_acl_tables)
{
@@ -638,7 +638,7 @@ void acl_free(bool end)
delete_dynamic(&acl_users);
delete_dynamic(&acl_dbs);
delete_dynamic(&acl_wild_hosts);
- hash_free(&acl_check_hosts);
+ my_hash_free(&acl_check_hosts);
if (!end)
acl_cache->clear(1); /* purecov: inspected */
else
@@ -702,7 +702,7 @@ my_bool acl_reload(THD *thd)
if (simple_open_n_lock_tables(thd, tables))
{
sql_print_error("Fatal error: Can't open and lock privilege tables: %s",
- thd->main_da.message());
+ thd->stmt_da->message());
goto end;
}
@@ -714,7 +714,7 @@ my_bool acl_reload(THD *thd)
old_acl_dbs=acl_dbs;
old_mem=mem;
delete_dynamic(&acl_wild_hosts);
- hash_free(&acl_check_hosts);
+ my_hash_free(&acl_check_hosts);
if ((return_val= acl_load(thd, tables)))
{ // Error. Revert to old list
@@ -1429,8 +1429,8 @@ static void init_check_host(void)
DBUG_ENTER("init_check_host");
VOID(my_init_dynamic_array(&acl_wild_hosts,sizeof(struct acl_host_and_ip),
acl_users.elements,1));
- VOID(hash_init(&acl_check_hosts,system_charset_info,acl_users.elements,0,0,
- (hash_get_key) check_get_key,0,0));
+ VOID(my_hash_init(&acl_check_hosts,system_charset_info,acl_users.elements,0,0,
+ (my_hash_get_key) check_get_key,0,0));
if (!allow_all_hosts)
{
for (uint i=0 ; i < acl_users.elements ; i++)
@@ -1452,8 +1452,9 @@ static void init_check_host(void)
if (j == acl_wild_hosts.elements) // If new
(void) push_dynamic(&acl_wild_hosts,(uchar*) &acl_user->host);
}
- else if (!hash_search(&acl_check_hosts,(uchar*) acl_user->host.hostname,
- strlen(acl_user->host.hostname)))
+ else if (!my_hash_search(&acl_check_hosts,(uchar*)
+ acl_user->host.hostname,
+ strlen(acl_user->host.hostname)))
{
if (my_hash_insert(&acl_check_hosts,(uchar*) acl_user))
{ // End of memory
@@ -1480,7 +1481,7 @@ static void init_check_host(void)
void rebuild_check_host(void)
{
delete_dynamic(&acl_wild_hosts);
- hash_free(&acl_check_hosts);
+ my_hash_free(&acl_check_hosts);
init_check_host();
}
@@ -1493,8 +1494,8 @@ bool acl_check_host(const char *host, const char *ip)
return 0;
VOID(pthread_mutex_lock(&acl_cache->lock));
- if ((host && hash_search(&acl_check_hosts,(uchar*) host,strlen(host))) ||
- (ip && hash_search(&acl_check_hosts,(uchar*) ip, strlen(ip))))
+ if ((host && my_hash_search(&acl_check_hosts,(uchar*) host,strlen(host))) ||
+ (ip && my_hash_search(&acl_check_hosts,(uchar*) ip, strlen(ip))))
{
VOID(pthread_mutex_unlock(&acl_cache->lock));
return 0; // Found host
@@ -1879,7 +1880,7 @@ static bool test_if_create_new_users(THD *thd)
sctx->priv_user, tl.db, 0);
if (!(db_access & INSERT_ACL))
{
- if (check_grant(thd, INSERT_ACL, &tl, 0, UINT_MAX, 1))
+ if (check_grant(thd, INSERT_ACL, &tl, FALSE, UINT_MAX, TRUE))
create_new_users=0;
}
}
@@ -2311,8 +2312,8 @@ GRANT_TABLE::GRANT_TABLE(const char *h, const char *d,const char *u,
const char *t, ulong p, ulong c)
:GRANT_NAME(h,d,u,t,p, FALSE), cols(c)
{
- (void) hash_init2(&hash_columns,4,system_charset_info,
- 0,0,0, (hash_get_key) get_key_column,0,0);
+ (void) my_hash_init2(&hash_columns,4,system_charset_info,
+ 0,0,0, (my_hash_get_key) get_key_column,0,0);
}
@@ -2355,15 +2356,15 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs)
if (!db || !tname)
{
/* Wrong table row; Ignore it */
- hash_clear(&hash_columns); /* allow for destruction */
+ my_hash_clear(&hash_columns); /* allow for destruction */
cols= 0;
return;
}
cols= (ulong) form->field[7]->val_int();
cols = fix_rights_for_column(cols);
- (void) hash_init2(&hash_columns,4,system_charset_info,
- 0,0,0, (hash_get_key) get_key_column,0,0);
+ (void) my_hash_init2(&hash_columns,4,system_charset_info,
+ 0,0,0, (my_hash_get_key) get_key_column,0,0);
if (cols)
{
uint key_prefix_len;
@@ -2420,7 +2421,7 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs)
GRANT_TABLE::~GRANT_TABLE()
{
- hash_free(&hash_columns);
+ my_hash_free(&hash_columns);
}
@@ -2434,7 +2435,7 @@ static uchar* get_grant_table(GRANT_NAME *buff, size_t *length,
void free_grant_table(GRANT_TABLE *grant_table)
{
- hash_free(&grant_table->hash_columns);
+ my_hash_free(&grant_table->hash_columns);
}
@@ -2455,11 +2456,11 @@ static GRANT_NAME *name_hash_search(HASH *name_hash,
len = (uint) (strmov(name_ptr, tname) - helping) + 1;
if (name_tolower)
my_casedn_str(files_charset_info, name_ptr);
- for (grant_name= (GRANT_NAME*) hash_first(name_hash, (uchar*) helping,
- len, &state);
+ for (grant_name= (GRANT_NAME*) my_hash_first(name_hash, (uchar*) helping,
+ len, &state);
grant_name ;
- grant_name= (GRANT_NAME*) hash_next(name_hash,(uchar*) helping,
- len, &state))
+ grant_name= (GRANT_NAME*) my_hash_next(name_hash,(uchar*) helping,
+ len, &state))
{
if (exact)
{
@@ -2503,7 +2504,8 @@ table_hash_search(const char *host, const char *ip, const char *db,
inline GRANT_COLUMN *
column_hash_search(GRANT_TABLE *t, const char *cname, uint length)
{
- return (GRANT_COLUMN*) hash_search(&t->hash_columns, (uchar*) cname,length);
+ return (GRANT_COLUMN*) my_hash_search(&t->hash_columns,
+ (uchar*) cname, length);
}
@@ -2687,7 +2689,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
goto end; /* purecov: deadcode */
}
if (grant_column)
- hash_delete(&g_t->hash_columns,(uchar*) grant_column);
+ my_hash_delete(&g_t->hash_columns,(uchar*) grant_column);
}
}
} while (!table->file->index_next(table->record[0]) &&
@@ -2813,7 +2815,7 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table,
}
else
{
- hash_delete(&column_priv_hash,(uchar*) grant_table);
+ my_hash_delete(&column_priv_hash,(uchar*) grant_table);
}
DBUG_RETURN(0);
@@ -2934,7 +2936,8 @@ static int replace_routine_table(THD *thd, GRANT_NAME *grant_name,
}
else
{
- hash_delete(is_proc ? &proc_priv_hash : &func_priv_hash,(uchar*) grant_name);
+ my_hash_delete(is_proc ? &proc_priv_hash : &func_priv_hash,(uchar*)
+ grant_name);
}
DBUG_RETURN(0);
@@ -3175,8 +3178,8 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
column_priv= 0;
for (uint idx=0 ; idx < grant_table->hash_columns.records ; idx++)
{
- grant_column= (GRANT_COLUMN*) hash_element(&grant_table->hash_columns,
- idx);
+ grant_column= (GRANT_COLUMN*)
+ my_hash_element(&grant_table->hash_columns, idx);
grant_column->rights&= ~rights; // Fix other columns
column_priv|= grant_column->rights;
}
@@ -3515,9 +3518,9 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
void grant_free(void)
{
DBUG_ENTER("grant_free");
- hash_free(&column_priv_hash);
- hash_free(&proc_priv_hash);
- hash_free(&func_priv_hash);
+ my_hash_free(&column_priv_hash);
+ my_hash_free(&proc_priv_hash);
+ my_hash_free(&func_priv_hash);
free_root(&memex,MYF(0));
DBUG_VOID_RETURN;
}
@@ -3574,12 +3577,12 @@ static my_bool grant_load_procs_priv(TABLE *p_table)
MEM_ROOT **save_mem_root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**,
THR_MALLOC);
DBUG_ENTER("grant_load_procs_priv");
- (void) hash_init(&proc_priv_hash, &my_charset_utf8_bin,
- 0,0,0, (hash_get_key) get_grant_table,
- 0,0);
- (void) hash_init(&func_priv_hash, &my_charset_utf8_bin,
- 0,0,0, (hash_get_key) get_grant_table,
- 0,0);
+ (void) my_hash_init(&proc_priv_hash, &my_charset_utf8_bin,
+ 0,0,0, (my_hash_get_key) get_grant_table,
+ 0,0);
+ (void) my_hash_init(&func_priv_hash, &my_charset_utf8_bin,
+ 0,0,0, (my_hash_get_key) get_grant_table,
+ 0,0);
p_table->file->ha_index_init(0, 1);
p_table->use_all_columns();
@@ -3675,9 +3678,9 @@ static my_bool grant_load(THD *thd, TABLE_LIST *tables)
thd->variables.sql_mode&= ~MODE_PAD_CHAR_TO_FULL_LENGTH;
- (void) hash_init(&column_priv_hash, &my_charset_utf8_bin,
- 0,0,0, (hash_get_key) get_grant_table,
- (hash_free_key) free_grant_table,0);
+ (void) my_hash_init(&column_priv_hash, &my_charset_utf8_bin,
+ 0,0,0, (my_hash_get_key) get_grant_table,
+ (my_hash_free_key) free_grant_table,0);
t_table = tables[0].table;
c_table = tables[1].table;
@@ -3780,8 +3783,8 @@ static my_bool grant_reload_procs_priv(THD *thd)
}
else
{
- hash_free(&old_proc_priv_hash);
- hash_free(&old_func_priv_hash);
+ my_hash_free(&old_proc_priv_hash);
+ my_hash_free(&old_func_priv_hash);
}
rw_unlock(&LOCK_grant);
@@ -3850,7 +3853,7 @@ my_bool grant_reload(THD *thd)
}
else
{
- hash_free(&old_column_priv_hash);
+ my_hash_free(&old_column_priv_hash);
free_root(&old_mem,MYF(0));
}
rw_unlock(&LOCK_grant);
@@ -3871,40 +3874,52 @@ end:
DBUG_RETURN(return_val);
}
-/****************************************************************************
- Check table level grants
- SYNOPSIS
- bool check_grant()
- thd Thread handler
- want_access Bits of privileges user needs to have
- tables List of tables to check. The user should have 'want_access'
- to all tables in list.
- show_table <> 0 if we are in show table. In this case it's enough to have
- any privilege for the table
- number Check at most this number of tables.
- no_errors If 0 then we write an error. The error is sent directly to
- the client
+/**
+ @brief Check table level grants
- RETURN
- 0 ok
- 1 Error: User did not have the requested privileges
+ @param thd Thread handler
+ @param want_access Bits of privileges user needs to have.
+ @param tables List of tables to check. The user should have
+ 'want_access' to all tables in list.
+ @param any_combination_will_do TRUE if it's enough to have any privilege for
+ any combination of the table columns.
+ @param number Check at most this number of tables.
+ @param no_errors TRUE if no error should be sent directly to the client.
- NOTE
- This functions assumes that either number of tables to be inspected
+ If table->grant.want_privilege != 0 then the requested privileges where
+ in the set of COL_ACLS but access was not granted on the table level. As
+ a consequence an extra check of column privileges is required.
+
+ Specifically if this function returns FALSE the user has some kind of
+ privilege on a combination of columns in each table.
+
+ This function is usually preceeded by check_access which establish the
+ User-, Db- and Host access rights.
+
+ @see check_access
+ @see check_table_access
+
+ @note This functions assumes that either number of tables to be inspected
by it is limited explicitly (i.e. is is not UINT_MAX) or table list
used and thd->lex->query_tables_own_last value correspond to each
other (the latter should be either 0 or point to next_global member
of one of elements of this table list).
-****************************************************************************/
+
+ @return Access status
+ @retval FALSE Access granted; But column privileges might need to be
+ checked.
+ @retval TRUE The user did not have the requested privileges on any of the
+ tables.
+
+*/
bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables,
- uint show_table, uint number, bool no_errors)
+ bool any_combination_will_do, uint number, bool no_errors)
{
TABLE_LIST *table, *first_not_own_table= thd->lex->first_not_own_table();
Security_context *sctx= thd->security_ctx;
uint i;
- ulong orig_want_access= want_access;
DBUG_ENTER("check_grant");
DBUG_ASSERT(number > 0);
@@ -3922,7 +3937,10 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables,
i < number && table != first_not_own_table;
table= table->next_global, i++)
{
- /* Remove SHOW_VIEW_ACL, because it will be checked during making view */
+ /*
+ Save a copy of the privileges without the SHOW_VIEW_ACL attribute.
+ It will be checked during making view.
+ */
table->grant.orig_want_privilege= (want_access & ~SHOW_VIEW_ACL);
}
@@ -3935,7 +3953,6 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables,
sctx = test(table->security_ctx) ?
table->security_ctx : thd->security_ctx;
- want_access= orig_want_access;
want_access&= ~sctx->master_access;
if (!want_access)
continue; // ok
@@ -3965,8 +3982,13 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables,
want_access &= ~table->grant.privilege;
goto err; // No grants
}
- if (show_table)
- continue; // We have some priv on this
+
+ /*
+ For SHOW COLUMNS, SHOW INDEX it is enough to have some
+ privileges on any column combination on the table.
+ */
+ if (any_combination_will_do)
+ continue;
table->grant.grant_table=grant_table; // Remember for column test
table->grant.version=grant_version;
@@ -3984,7 +4006,7 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables,
}
}
rw_unlock(&LOCK_grant);
- DBUG_RETURN(0);
+ DBUG_RETURN(FALSE);
err:
rw_unlock(&LOCK_grant);
@@ -3998,7 +4020,97 @@ err:
sctx->host_or_ip,
table ? table->get_table_name() : "unknown");
}
- DBUG_RETURN(1);
+ DBUG_RETURN(TRUE);
+}
+
+
+/**
+ Check if all tables in the table list has any of the requested table level
+ privileges matching the current user.
+
+ @param thd A pointer to the thread context.
+ @param required_access Set of privileges to compare against.
+ @param tables[in,out] A list of tables to be checked.
+
+ @note If the table grant hash contains any grant table, this table will be
+ attached to the corresponding TABLE_LIST object in 'tables'.
+
+ @return
+ @retval TRUE There is a privilege on the table level granted to the
+ current user.
+ @retval FALSE There are no privileges on the table level granted to the
+ current user.
+*/
+
+bool has_any_table_level_privileges(THD *thd, ulong required_access,
+ TABLE_LIST *tables)
+{
+
+ Security_context *sctx;
+ GRANT_TABLE *grant_table;
+ TABLE_LIST *table;
+
+ /* For each table in tables */
+ for (table= tables; table; table= table->next_global)
+ {
+ /*
+ If this table is a VIEW, then it will supply its own security context.
+ This is because VIEWs can have a DEFINER or an INVOKER security role.
+ */
+ sctx= table->security_ctx ? table->security_ctx : thd->security_ctx;
+
+ /*
+ Get privileges from table_priv and column_priv tables by searching
+ the cache.
+ */
+ rw_rdlock(&LOCK_grant);
+ grant_table= table_hash_search(sctx->host, sctx->ip,
+ table->db, sctx->priv_user,
+ table->table_name,0);
+ rw_unlock(&LOCK_grant);
+
+ /* Stop if there are no grants for the current user */
+ if (!grant_table)
+ return FALSE;
+
+ /*
+ Save a pointer to the found grant_table in the table object.
+ This pointer can later be used to verify other access requirements
+ without having to look up the grant table in the hash.
+ */
+ table->grant.grant_table= grant_table;
+ table->grant.version= grant_version;
+ table->grant.privilege|= grant_table->privs;
+ /*
+ Save all privileges which might be subject to column privileges
+ but not which aren't yet granted by table level ACLs.
+ This is can later be used for column privilege checks.
+ */
+ table->grant.want_privilege= ((required_access & COL_ACLS)
+ & ~table->grant.privilege);
+
+ /*
+ If the requested privileges share any intersection with the current
+ table privileges we have found at least one common privilege on the
+ table level.
+ */
+ if (grant_table->privs & required_access)
+ continue; /* Check next table */
+
+ /*
+ There are no table level privileges which satisfies any of the
+ requested privileges. There might still be column privileges which
+ does though.
+ */
+ return FALSE;
+ }
+
+ /*
+ All tables in TABLE_LIST satisfy the requirement of having any
+ privilege on the table level.
+ */
+
+ return TRUE;
}
@@ -4245,7 +4357,7 @@ static bool check_grant_db_routine(THD *thd, const char *db, HASH *hash)
for (uint idx= 0; idx < hash->records; ++idx)
{
- GRANT_NAME *item= (GRANT_NAME*) hash_element(hash, idx);
+ GRANT_NAME *item= (GRANT_NAME*) my_hash_element(hash, idx);
if (strcmp(item->user, sctx->priv_user) == 0 &&
strcmp(item->db, db) == 0 &&
@@ -4278,8 +4390,9 @@ bool check_grant_db(THD *thd,const char *db)
for (uint idx=0 ; idx < column_priv_hash.records ; idx++)
{
- GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash,
- idx);
+ GRANT_TABLE *grant_table= (GRANT_TABLE*)
+ my_hash_element(&column_priv_hash,
+ idx);
if (len < grant_table->key_length &&
!memcmp(grant_table->hash_key,helping,len) &&
compare_hostname(&grant_table->host, sctx->host, sctx->ip))
@@ -4502,13 +4615,13 @@ static const char *command_array[]=
"ALTER", "SHOW DATABASES", "SUPER", "CREATE TEMPORARY TABLES",
"LOCK TABLES", "EXECUTE", "REPLICATION SLAVE", "REPLICATION CLIENT",
"CREATE VIEW", "SHOW VIEW", "CREATE ROUTINE", "ALTER ROUTINE",
- "CREATE USER", "EVENT", "TRIGGER"
+ "CREATE USER", "EVENT", "TRIGGER", "CREATE TABLESPACE"
};
static uint command_lengths[]=
{
6, 6, 6, 6, 6, 4, 6, 8, 7, 4, 5, 10, 5, 5, 14, 5, 23, 11, 7, 17, 18, 11, 9,
- 14, 13, 11, 5, 7
+ 14, 13, 11, 5, 7, 17
};
@@ -4563,7 +4676,7 @@ bool mysql_show_grants(THD *thd,LEX_USER *lex_user)
strxmov(buff,"Grants for ",lex_user->user.str,"@",
lex_user->host.str,NullS);
field_list.push_back(field);
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
{
VOID(pthread_mutex_unlock(&acl_cache->lock));
@@ -4751,8 +4864,8 @@ bool mysql_show_grants(THD *thd,LEX_USER *lex_user)
for (index=0 ; index < column_priv_hash.records ; index++)
{
const char *user, *host;
- GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash,
- index);
+ GRANT_TABLE *grant_table= (GRANT_TABLE*)
+ my_hash_element(&column_priv_hash, index);
if (!(user=grant_table->user))
user= "";
@@ -4805,7 +4918,7 @@ bool mysql_show_grants(THD *thd,LEX_USER *lex_user)
col_index++)
{
GRANT_COLUMN *grant_column = (GRANT_COLUMN*)
- hash_element(&grant_table->hash_columns,col_index);
+ my_hash_element(&grant_table->hash_columns,col_index);
if (grant_column->rights & j)
{
if (!found_col)
@@ -4895,7 +5008,7 @@ static int show_routine_grants(THD* thd, LEX_USER *lex_user, HASH *hash,
for (index=0 ; index < hash->records ; index++)
{
const char *user, *host;
- GRANT_NAME *grant_proc= (GRANT_NAME*) hash_element(hash, index);
+ GRANT_NAME *grant_proc= (GRANT_NAME*) my_hash_element(hash, index);
if (!(user=grant_proc->user))
user= "";
@@ -5408,13 +5521,13 @@ static int handle_grant_struct(uint struct_no, bool drop,
break;
case 2:
- grant_name= (GRANT_NAME*) hash_element(&column_priv_hash, idx);
+ grant_name= (GRANT_NAME*) my_hash_element(&column_priv_hash, idx);
user= grant_name->user;
host= grant_name->host.hostname;
break;
case 3:
- grant_name= (GRANT_NAME*) hash_element(&proc_priv_hash, idx);
+ grant_name= (GRANT_NAME*) my_hash_element(&proc_priv_hash, idx);
user= grant_name->user;
host= grant_name->host.hostname;
break;
@@ -5447,11 +5560,11 @@ static int handle_grant_struct(uint struct_no, bool drop,
break;
case 2:
- hash_delete(&column_priv_hash, (uchar*) grant_name);
+ my_hash_delete(&column_priv_hash, (uchar*) grant_name);
break;
case 3:
- hash_delete(&proc_priv_hash, (uchar*) grant_name);
+ my_hash_delete(&proc_priv_hash, (uchar*) grant_name);
break;
}
elements--;
@@ -5485,8 +5598,8 @@ static int handle_grant_struct(uint struct_no, bool drop,
is renamed, the hash key is changed. Update the hash to
ensure that the position matches the new hash key value
*/
- hash_update(&column_priv_hash, (uchar*) grant_name,
- (uchar*) grant_name->hash_key, grant_name->key_length);
+ my_hash_update(&column_priv_hash, (uchar*) grant_name,
+ (uchar*) grant_name->hash_key, grant_name->key_length);
break;
}
}
@@ -5977,8 +6090,8 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
for (counter= 0, revoked= 0 ; counter < column_priv_hash.records ; )
{
const char *user,*host;
- GRANT_TABLE *grant_table= (GRANT_TABLE*)hash_element(&column_priv_hash,
- counter);
+ GRANT_TABLE *grant_table=
+ (GRANT_TABLE*) my_hash_element(&column_priv_hash, counter);
if (!(user=grant_table->user))
user= "";
if (!(host=grant_table->host.hostname))
@@ -6024,7 +6137,7 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
for (counter= 0, revoked= 0 ; counter < hash->records ; )
{
const char *user,*host;
- GRANT_NAME *grant_proc= (GRANT_NAME*) hash_element(hash, counter);
+ GRANT_NAME *grant_proc= (GRANT_NAME*) my_hash_element(hash, counter);
if (!(user=grant_proc->user))
user= "";
if (!(host=grant_proc->host.hostname))
@@ -6083,9 +6196,12 @@ public:
virtual ~Silence_routine_definer_errors()
{}
- virtual bool handle_error(uint sql_errno, const char *message,
- MYSQL_ERROR::enum_warning_level level,
- THD *thd);
+ virtual bool handle_condition(THD *thd,
+ uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl);
bool has_errors() { return is_grave; }
@@ -6094,18 +6210,23 @@ private:
};
bool
-Silence_routine_definer_errors::handle_error(uint sql_errno,
- const char *message,
- MYSQL_ERROR::enum_warning_level level,
- THD *thd)
+Silence_routine_definer_errors::handle_condition(
+ THD *thd,
+ uint sql_errno,
+ const char*,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl)
{
+ *cond_hdl= NULL;
if (level == MYSQL_ERROR::WARN_LEVEL_ERROR)
{
switch (sql_errno)
{
case ER_NONEXISTING_PROC_GRANT:
/* Convert the error into a warning. */
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, sql_errno, message);
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ sql_errno, msg);
return TRUE;
default:
is_grave= TRUE;
@@ -6164,7 +6285,7 @@ bool sp_revoke_privileges(THD *thd, const char *sp_db, const char *sp_name,
{
for (counter= 0, revoked= 0 ; counter < hash->records ; )
{
- GRANT_NAME *grant_proc= (GRANT_NAME*) hash_element(hash, counter);
+ GRANT_NAME *grant_proc= (GRANT_NAME*) my_hash_element(hash, counter);
if (!my_strcasecmp(&my_charset_utf8_bin, grant_proc->db, sp_db) &&
!my_strcasecmp(system_charset_info, grant_proc->tname, sp_name))
{
@@ -6374,6 +6495,7 @@ static bool update_schema_privilege(THD *thd, TABLE *table, char *buff,
CHARSET_INFO *cs= system_charset_info;
restore_record(table, s->default_values);
table->field[0]->store(buff, (uint) strlen(buff), cs);
+ table->field[1]->store(STRING_WITH_LEN("def"), cs);
if (db)
table->field[i++]->store(db, (uint) strlen(db), cs);
if (t_name)
@@ -6554,7 +6676,7 @@ int fill_schema_table_privileges(THD *thd, TABLE_LIST *tables, COND *cond)
for (index=0 ; index < column_priv_hash.records ; index++)
{
const char *user, *host, *is_grantable= "YES";
- GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash,
+ GRANT_TABLE *grant_table= (GRANT_TABLE*) my_hash_element(&column_priv_hash,
index);
if (!(user=grant_table->user))
user= "";
@@ -6637,7 +6759,7 @@ int fill_schema_column_privileges(THD *thd, TABLE_LIST *tables, COND *cond)
for (index=0 ; index < column_priv_hash.records ; index++)
{
const char *user, *host, *is_grantable= "YES";
- GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash,
+ GRANT_TABLE *grant_table= (GRANT_TABLE*) my_hash_element(&column_priv_hash,
index);
if (!(user=grant_table->user))
user= "";
@@ -6672,7 +6794,7 @@ int fill_schema_column_privileges(THD *thd, TABLE_LIST *tables, COND *cond)
col_index++)
{
GRANT_COLUMN *grant_column = (GRANT_COLUMN*)
- hash_element(&grant_table->hash_columns,col_index);
+ my_hash_element(&grant_table->hash_columns,col_index);
if ((grant_column->rights & j) && (table_access & j))
{
if (update_schema_privilege(thd, table, buff, grant_table->db,
diff --git a/sql/sql_acl.h b/sql/sql_acl.h
index 4c835e2718c..25a4766e58c 100644
--- a/sql/sql_acl.h
+++ b/sql/sql_acl.h
@@ -1,3 +1,6 @@
+#ifndef SQL_ACL_INCLUDED
+#define SQL_ACL_INCLUDED
+
/* Copyright (C) 2000-2006 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -43,6 +46,7 @@
#define CREATE_USER_ACL (1L << 25)
#define EVENT_ACL (1L << 26)
#define TRIGGER_ACL (1L << 27)
+#define CREATE_TABLESPACE_ACL (1L << 28)
/*
don't forget to update
1. static struct show_privileges_st sys_privileges[]
@@ -51,7 +55,6 @@
4. acl_init() or whatever - to define behaviour for old privilege tables
5. sql_yacc.yy - for GRANT/REVOKE to work
*/
-#define EXTRA_ACL (1L << 29)
#define NO_ACCESS (1L << 30)
#define DB_ACLS \
(UPDATE_ACL | SELECT_ACL | INSERT_ACL | DELETE_ACL | CREATE_ACL | DROP_ACL | \
@@ -79,11 +82,17 @@
REFERENCES_ACL | INDEX_ACL | ALTER_ACL | SHOW_DB_ACL | SUPER_ACL | \
CREATE_TMP_ACL | LOCK_TABLES_ACL | REPL_SLAVE_ACL | REPL_CLIENT_ACL | \
EXECUTE_ACL | CREATE_VIEW_ACL | SHOW_VIEW_ACL | CREATE_PROC_ACL | \
- ALTER_PROC_ACL | CREATE_USER_ACL | EVENT_ACL | TRIGGER_ACL)
+ ALTER_PROC_ACL | CREATE_USER_ACL | EVENT_ACL | TRIGGER_ACL | \
+ CREATE_TABLESPACE_ACL)
#define DEFAULT_CREATE_PROC_ACLS \
(ALTER_PROC_ACL | EXECUTE_ACL)
+#define SHOW_CREATE_TABLE_ACLS \
+(SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL | \
+ CREATE_ACL | DROP_ACL | ALTER_ACL | INDEX_ACL | \
+ TRIGGER_ACL | REFERENCES_ACL | GRANT_ACL | CREATE_VIEW_ACL | SHOW_VIEW_ACL)
+
/*
Defines to change the above bits to how things are stored in tables
This is needed as the 'host' and 'db' table is missing a few privileges
@@ -237,7 +246,7 @@ my_bool grant_init();
void grant_free(void);
my_bool grant_reload(THD *thd);
bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables,
- uint show_command, uint number, bool dont_print_error);
+ bool any_combination_will_do, uint number, bool no_errors);
bool check_grant_column (THD *thd, GRANT_INFO *grant,
const char *db_name, const char *table_name,
const char *name, uint length, Security_context *sctx);
@@ -268,7 +277,12 @@ bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name,
bool check_routine_level_acl(THD *thd, const char *db, const char *name,
bool is_proc);
bool is_acl_user(const char *host, const char *user);
+bool has_any_table_level_privileges(THD *thd, ulong required_access,
+ TABLE_LIST *tables);
+
#ifdef NO_EMBEDDED_ACCESS_CHECKS
#define check_grant(A,B,C,D,E,F) 0
#define check_grant_db(A,B) 0
+#define has_any_table_level_privileges(A,B,C) 0
#endif
+#endif /* SQL_ACL_INCLUDED */
diff --git a/sql/sql_analyse.h b/sql/sql_analyse.h
index 8807b40857e..8f52b90c874 100644
--- a/sql/sql_analyse.h
+++ b/sql/sql_analyse.h
@@ -1,3 +1,6 @@
+#ifndef SQL_ANALYSE_INCLUDED
+#define SQL_ANALYSE_INCLUDED
+
/* Copyright (C) 2000-2003, 2005 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -355,3 +358,5 @@ public:
select_result *result,
List<Item> &field_list);
};
+
+#endif /* SQL_ANALYSE_INCLUDED */
diff --git a/sql/sql_array.h b/sql/sql_array.h
index e1b22921519..dfaa9b02947 100644
--- a/sql/sql_array.h
+++ b/sql/sql_array.h
@@ -1,3 +1,6 @@
+#ifndef SQL_ARRAY_INCLUDED
+#define SQL_ARRAY_INCLUDED
+
/* Copyright (C) 2003 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -66,3 +69,4 @@ public:
}
};
+#endif /* SQL_ARRAY_INCLUDED */
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index cf8a0b32764..a285f63dcb7 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -22,6 +22,7 @@
#include "sp_head.h"
#include "sp.h"
#include "sql_trigger.h"
+#include "sql_prepare.h"
#include <m_ctype.h>
#include <my_dir.h>
#include <hash.h>
@@ -46,9 +47,12 @@ public:
virtual ~Prelock_error_handler() {}
- virtual bool handle_error(uint sql_errno, const char *message,
- MYSQL_ERROR::enum_warning_level level,
- THD *thd);
+ virtual bool handle_condition(THD *thd,
+ uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl);
bool safely_trapped_errors();
@@ -59,11 +63,14 @@ private:
bool
-Prelock_error_handler::handle_error(uint sql_errno,
- const char * /* message */,
- MYSQL_ERROR::enum_warning_level /* level */,
- THD * /* thd */)
+Prelock_error_handler::handle_condition(THD *,
+ uint sql_errno,
+ const char*,
+ MYSQL_ERROR::enum_warning_level,
+ const char*,
+ MYSQL_ERROR ** cond_hdl)
{
+ *cond_hdl= NULL;
if (sql_errno == ER_NO_SUCH_TABLE)
{
m_handled_errors++;
@@ -122,9 +129,9 @@ extern "C" uchar *table_cache_key(const uchar *record, size_t *length,
bool table_cache_init(void)
{
- return hash_init(&open_cache, &my_charset_bin, table_cache_size+16,
- 0, 0, table_cache_key,
- (hash_free_key) free_cache_entry, 0) != 0;
+ return my_hash_init(&open_cache, &my_charset_bin, table_cache_size+16,
+ 0, 0, table_cache_key,
+ (my_hash_free_key) free_cache_entry, 0) != 0;
}
void table_cache_free(void)
@@ -134,7 +141,7 @@ void table_cache_free(void)
{
close_cached_tables(NULL, NULL, FALSE, FALSE, FALSE);
if (!open_cache.records) // Safety first
- hash_free(&open_cache);
+ my_hash_free(&open_cache);
}
DBUG_VOID_RETURN;
}
@@ -169,7 +176,7 @@ static void check_unused(void)
}
for (idx=0 ; idx < open_cache.records ; idx++)
{
- TABLE *entry=(TABLE*) hash_element(&open_cache,idx);
+ TABLE *entry=(TABLE*) my_hash_element(&open_cache,idx);
if (!entry->in_use)
count--;
if (entry->file)
@@ -282,9 +289,9 @@ bool table_def_init(void)
oldest_unused_share= &end_of_unused_share;
end_of_unused_share.prev= &oldest_unused_share;
- return hash_init(&table_def_cache, &my_charset_bin, table_def_size,
- 0, 0, table_def_key,
- (hash_free_key) table_def_free_entry, 0) != 0;
+ return my_hash_init(&table_def_cache, &my_charset_bin, table_def_size,
+ 0, 0, table_def_key,
+ (my_hash_free_key) table_def_free_entry, 0) != 0;
}
@@ -295,7 +302,7 @@ void table_def_free(void)
{
table_def_inited= 0;
pthread_mutex_destroy(&LOCK_table_share);
- hash_free(&table_def_cache);
+ my_hash_free(&table_def_cache);
}
DBUG_VOID_RETURN;
}
@@ -341,8 +348,8 @@ TABLE_SHARE *get_table_share(THD *thd, TABLE_LIST *table_list, char *key,
*error= 0;
/* Read table definition from cache */
- if ((share= (TABLE_SHARE*) hash_search(&table_def_cache,(uchar*) key,
- key_length)))
+ if ((share= (TABLE_SHARE*) my_hash_search(&table_def_cache,(uchar*) key,
+ key_length)))
goto found;
if (!(share= alloc_table_share(table_list, key, key_length)))
@@ -379,7 +386,7 @@ TABLE_SHARE *get_table_share(THD *thd, TABLE_LIST *table_list, char *key,
if (open_table_def(thd, share, db_flags))
{
*error= share->error;
- (void) hash_delete(&table_def_cache, (uchar*) share);
+ (void) my_hash_delete(&table_def_cache, (uchar*) share);
DBUG_RETURN(0);
}
share->ref_count++; // Mark in use
@@ -431,7 +438,7 @@ found:
oldest_unused_share->next)
{
pthread_mutex_lock(&oldest_unused_share->mutex);
- VOID(hash_delete(&table_def_cache, (uchar*) oldest_unused_share));
+ VOID(my_hash_delete(&table_def_cache, (uchar*) oldest_unused_share));
}
DBUG_PRINT("exit", ("share: 0x%lx ref_count: %u",
@@ -475,7 +482,7 @@ static TABLE_SHARE
@todo Rework alternative ways to deal with ER_NO_SUCH TABLE.
*/
- if (share || (thd->is_error() && thd->main_da.sql_errno() != ER_NO_SUCH_TABLE))
+ if (share || (thd->is_error() && thd->stmt_da->sql_errno() != ER_NO_SUCH_TABLE))
DBUG_RETURN(share);
@@ -522,7 +529,7 @@ static TABLE_SHARE
DBUG_RETURN(0);
}
/* Table existed in engine. Let's open it */
- mysql_reset_errors(thd, 1); // Clear warnings
+ thd->warning_info->clear_warning_info(thd->query_id);
thd->clear_error(); // Clear error message
DBUG_RETURN(get_table_share(thd, table_list, key, key_length,
db_flags, error));
@@ -586,7 +593,7 @@ void release_table_share(TABLE_SHARE *share, enum release_type type)
if (to_be_deleted)
{
DBUG_PRINT("info", ("Deleting share"));
- hash_delete(&table_def_cache, (uchar*) share);
+ my_hash_delete(&table_def_cache, (uchar*) share);
DBUG_VOID_RETURN;
}
pthread_mutex_unlock(&share->mutex);
@@ -617,7 +624,8 @@ TABLE_SHARE *get_cached_table_share(const char *db, const char *table_name)
table_list.db= (char*) db;
table_list.table_name= (char*) table_name;
key_length= create_table_def_key((THD*) 0, key, &table_list, 0);
- return (TABLE_SHARE*) hash_search(&table_def_cache,(uchar*) key, key_length);
+ return (TABLE_SHARE*) my_hash_search(&table_def_cache,
+ (uchar*) key, key_length);
}
@@ -714,7 +722,7 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *db, const char *wild)
for (uint idx=0 ; result == 0 && idx < open_cache.records; idx++)
{
OPEN_TABLE_LIST *table;
- TABLE *entry=(TABLE*) hash_element(&open_cache,idx);
+ TABLE *entry=(TABLE*) my_hash_element(&open_cache,idx);
TABLE_SHARE *share= entry->s;
if (db && my_strcasecmp(system_charset_info, db, share->db.str))
@@ -727,7 +735,7 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *db, const char *wild)
table_list.table_name= share->table_name.str;
table_list.grant.privilege=0;
- if (check_table_access(thd,SELECT_ACL | EXTRA_ACL,&table_list, 1, TRUE))
+ if (check_table_access(thd,SELECT_ACL,&table_list, TRUE, 1, TRUE))
continue;
/* need to check if we haven't already listed it */
for (table= open_list ; table ; table=table->next)
@@ -863,17 +871,17 @@ bool close_cached_tables(THD *thd, TABLE_LIST *tables, bool have_lock,
while (unused_tables)
{
#ifdef EXTRA_DEBUG
- if (hash_delete(&open_cache,(uchar*) unused_tables))
+ if (my_hash_delete(&open_cache,(uchar*) unused_tables))
printf("Warning: Couldn't delete open table from hash\n");
#else
- VOID(hash_delete(&open_cache,(uchar*) unused_tables));
+ VOID(my_hash_delete(&open_cache,(uchar*) unused_tables));
#endif
}
/* Free table shares */
while (oldest_unused_share->next)
{
pthread_mutex_lock(&oldest_unused_share->mutex);
- VOID(hash_delete(&table_def_cache, (uchar*) oldest_unused_share));
+ VOID(my_hash_delete(&table_def_cache, (uchar*) oldest_unused_share));
}
DBUG_PRINT("tcache", ("incremented global refresh_version to: %lu",
refresh_version));
@@ -918,7 +926,7 @@ bool close_cached_tables(THD *thd, TABLE_LIST *tables, bool have_lock,
*/
for (uint idx=0 ; idx < open_cache.records ; idx++)
{
- TABLE *table=(TABLE*) hash_element(&open_cache,idx);
+ TABLE *table=(TABLE*) my_hash_element(&open_cache,idx);
if (table->in_use)
table->in_use->some_tables_deleted= 1;
}
@@ -963,7 +971,7 @@ bool close_cached_tables(THD *thd, TABLE_LIST *tables, bool have_lock,
found=0;
for (uint idx=0 ; idx < open_cache.records ; idx++)
{
- TABLE *table=(TABLE*) hash_element(&open_cache,idx);
+ TABLE *table=(TABLE*) my_hash_element(&open_cache,idx);
/* Avoid a self-deadlock. */
if (table->in_use == thd)
continue;
@@ -1045,7 +1053,7 @@ bool close_cached_connection_tables(THD *thd, bool if_wait_for_refresh,
for (idx= 0; idx < table_def_cache.records; idx++)
{
- TABLE_SHARE *share= (TABLE_SHARE *) hash_element(&table_def_cache, idx);
+ TABLE_SHARE *share= (TABLE_SHARE *) my_hash_element(&table_def_cache, idx);
/* Ignore if table is not open or does not have a connect_string */
if (!share->connect_string.length || !share->ref_count)
@@ -1201,7 +1209,7 @@ static void close_open_tables(THD *thd)
/* Free tables to hold down open files */
while (open_cache.records > table_cache_size && unused_tables)
- VOID(hash_delete(&open_cache,(uchar*) unused_tables)); /* purecov: tested */
+ VOID(my_hash_delete(&open_cache,(uchar*) unused_tables)); /* purecov: tested */
check_unused();
if (found_old_table)
{
@@ -1284,9 +1292,9 @@ void close_thread_tables(THD *thd)
*/
if (!(thd->state_flags & Open_tables_state::BACKUPS_AVAIL))
{
- thd->main_da.can_overwrite_status= TRUE;
+ thd->stmt_da->can_overwrite_status= TRUE;
ha_autocommit_or_rollback(thd, thd->is_error());
- thd->main_da.can_overwrite_status= FALSE;
+ thd->stmt_da->can_overwrite_status= FALSE;
/*
Reset transaction state, but only if we're not inside a
@@ -1387,7 +1395,7 @@ bool close_thread_table(THD *thd, TABLE **table_ptr)
if (table->needs_reopen_or_name_lock() ||
thd->version != refresh_version || !table->db_stat)
{
- VOID(hash_delete(&open_cache,(uchar*) table));
+ VOID(my_hash_delete(&open_cache,(uchar*) table));
found_old_table=1;
}
else
@@ -2091,7 +2099,7 @@ void unlink_open_table(THD *thd, TABLE *find, bool unlock)
/* Remove table from open_tables list. */
*prev= list->next;
/* Close table. */
- VOID(hash_delete(&open_cache,(uchar*) list)); // Close table
+ VOID(my_hash_delete(&open_cache,(uchar*) list)); // Close table
}
else
{
@@ -2397,7 +2405,7 @@ bool lock_table_name_if_not_cached(THD *thd, const char *db,
key_length= (uint)(strmov(strmov(key, db) + 1, table_name) - key) + 1;
VOID(pthread_mutex_lock(&LOCK_open));
- if (hash_search(&open_cache, (uchar *)key, key_length))
+ if (my_hash_search(&open_cache, (uchar *)key, key_length))
{
VOID(pthread_mutex_unlock(&LOCK_open));
DBUG_PRINT("info", ("Table is cached, name-lock is not obtained"));
@@ -2752,11 +2760,11 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
an implicit "pending locks queue" - see
wait_for_locked_table_names for details.
*/
- for (table= (TABLE*) hash_first(&open_cache, (uchar*) key, key_length,
- &state);
+ for (table= (TABLE*) my_hash_first(&open_cache, (uchar*) key, key_length,
+ &state);
table && table->in_use ;
- table= (TABLE*) hash_next(&open_cache, (uchar*) key, key_length,
- &state))
+ table= (TABLE*) my_hash_next(&open_cache, (uchar*) key, key_length,
+ &state))
{
DBUG_PRINT("tcache", ("in_use table: '%s'.'%s' 0x%lx", table->s->db.str,
table->s->table_name.str, (long) table));
@@ -2868,7 +2876,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
DBUG_PRINT("tcache", ("opening new table"));
/* Free cache if too big */
while (open_cache.records > table_cache_size && unused_tables)
- VOID(hash_delete(&open_cache,(uchar*) unused_tables)); /* purecov: tested */
+ VOID(my_hash_delete(&open_cache,(uchar*) unused_tables)); /* purecov: tested */
if (table_list->create)
{
@@ -2926,7 +2934,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
Set 1 as a flag here
*/
if (error < 0)
- table_list->view= (st_lex*)1;
+ table_list->view= (LEX*)1;
my_free((uchar*)table, MYF(0));
VOID(pthread_mutex_unlock(&LOCK_open));
@@ -3331,7 +3339,7 @@ bool reopen_tables(THD *thd, bool get_locks, bool mark_share_as_old)
*/
if (table->child_l || table->parent)
detach_merge_children(table, TRUE);
- VOID(hash_delete(&open_cache,(uchar*) table));
+ VOID(my_hash_delete(&open_cache,(uchar*) table));
error=1;
}
else
@@ -3360,7 +3368,7 @@ bool reopen_tables(THD *thd, bool get_locks, bool mark_share_as_old)
{
while (err_tables)
{
- VOID(hash_delete(&open_cache, (uchar*) err_tables));
+ VOID(my_hash_delete(&open_cache, (uchar*) err_tables));
err_tables= err_tables->next;
}
}
@@ -3512,11 +3520,11 @@ bool table_is_used(TABLE *table, bool wait_for_name_lock)
DBUG_PRINT("loop", ("table_name: %s", table->alias));
HASH_SEARCH_STATE state;
- for (TABLE *search= (TABLE*) hash_first(&open_cache, (uchar*) key,
- key_length, &state);
+ for (TABLE *search= (TABLE*) my_hash_first(&open_cache, (uchar*) key,
+ key_length, &state);
search ;
- search= (TABLE*) hash_next(&open_cache, (uchar*) key,
- key_length, &state))
+ search= (TABLE*) my_hash_next(&open_cache, (uchar*) key,
+ key_length, &state))
{
DBUG_PRINT("info", ("share: 0x%lx "
"open_placeholder: %d locked_by_name: %d "
@@ -3642,7 +3650,7 @@ TABLE *drop_locked_tables(THD *thd,const char *db, const char *table_name)
else
{
/* We already have a name lock, remove copy */
- VOID(hash_delete(&open_cache,(uchar*) table));
+ VOID(my_hash_delete(&open_cache,(uchar*) table));
}
}
else
@@ -3957,7 +3965,7 @@ retry:
release_table_share(share, RELEASE_WAIT_FOR_DROP);
if (!thd->killed)
{
- mysql_reset_errors(thd, 1); // Clear warnings
+ thd->warning_info->clear_warning_info(thd->query_id);
thd->clear_error(); // Clear error message
goto retry;
}
@@ -4521,9 +4529,6 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags)
*/
for (tables= *start; tables ;tables= tables->next_global)
{
- DBUG_PRINT("tcache", ("opening table: '%s'.'%s' item: 0x%lx",
- tables->db, tables->table_name, (long) tables));
-
safe_to_ignore_table= FALSE;
/*
@@ -4560,6 +4565,8 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags)
}
DBUG_RETURN(-1);
}
+ DBUG_PRINT("tcache", ("opening table: '%s'.'%s' item: 0x%lx",
+ tables->db, tables->table_name, (long) tables));
(*counter)++;
/*
@@ -4616,7 +4623,7 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags)
Let us free memory used by 'sroutines' hash here since we never
call destructor for this LEX.
*/
- hash_free(&tables->view->sroutines);
+ my_hash_free(&tables->view->sroutines);
goto process_view_routines;
}
@@ -5893,8 +5900,8 @@ find_field_in_table(THD *thd, TABLE *table, const char *name, uint length,
field_ptr= table->field + cached_field_index;
else if (table->s->name_hash.records)
{
- field_ptr= (Field**) hash_search(&table->s->name_hash, (uchar*) name,
- length);
+ field_ptr= (Field**) my_hash_search(&table->s->name_hash, (uchar*) name,
+ length);
if (field_ptr)
{
/*
@@ -6021,7 +6028,9 @@ find_field_in_table_ref(THD *thd, TABLE_LIST *table_list,
table_name && table_name[0] &&
(my_strcasecmp(table_alias_charset, table_list->alias, table_name) ||
(db_name && db_name[0] && table_list->db && table_list->db[0] &&
- strcmp(db_name, table_list->db))))
+ (table_list->schema_table ?
+ my_strcasecmp(system_charset_info, db_name, table_list->db) :
+ strcmp(db_name, table_list->db)))))
DBUG_RETURN(0);
*actual_table= NULL;
@@ -6140,8 +6149,8 @@ Field *find_field_in_table_sef(TABLE *table, const char *name)
Field **field_ptr;
if (table->s->name_hash.records)
{
- field_ptr= (Field**)hash_search(&table->s->name_hash,(uchar*) name,
- strlen(name));
+ field_ptr= (Field**)my_hash_search(&table->s->name_hash,(uchar*) name,
+ strlen(name));
if (field_ptr)
{
/*
@@ -8383,7 +8392,7 @@ void remove_db_from_cache(const char *db)
{
for (uint idx=0 ; idx < open_cache.records ; idx++)
{
- TABLE *table=(TABLE*) hash_element(&open_cache,idx);
+ TABLE *table=(TABLE*) my_hash_element(&open_cache,idx);
if (!strcmp(table->s->db.str, db))
{
table->s->version= 0L; /* Free when thread is ready */
@@ -8392,7 +8401,7 @@ void remove_db_from_cache(const char *db)
}
}
while (unused_tables && !unused_tables->s->version)
- VOID(hash_delete(&open_cache,(uchar*) unused_tables));
+ VOID(my_hash_delete(&open_cache,(uchar*) unused_tables));
}
@@ -8408,7 +8417,7 @@ void flush_tables()
{
(void) pthread_mutex_lock(&LOCK_open);
while (unused_tables)
- hash_delete(&open_cache,(uchar*) unused_tables);
+ my_hash_delete(&open_cache,(uchar*) unused_tables);
(void) pthread_mutex_unlock(&LOCK_open);
}
@@ -8445,11 +8454,11 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name,
HASH_SEARCH_STATE state;
result= signalled= 0;
- for (table= (TABLE*) hash_first(&open_cache, (uchar*) key, key_length,
- &state);
+ for (table= (TABLE*) my_hash_first(&open_cache, (uchar*) key, key_length,
+ &state);
table;
- table= (TABLE*) hash_next(&open_cache, (uchar*) key, key_length,
- &state))
+ table= (TABLE*) my_hash_next(&open_cache, (uchar*) key, key_length,
+ &state))
{
THD *in_use;
DBUG_PRINT("tcache", ("found table: '%s'.'%s' 0x%lx", table->s->db.str,
@@ -8516,12 +8525,12 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name,
}
}
while (unused_tables && !unused_tables->s->version)
- VOID(hash_delete(&open_cache,(uchar*) unused_tables));
+ VOID(my_hash_delete(&open_cache,(uchar*) unused_tables));
DBUG_PRINT("info", ("Removing table from table_def_cache"));
/* Remove table from table definition cache if it's not in use */
- if ((share= (TABLE_SHARE*) hash_search(&table_def_cache,(uchar*) key,
- key_length)))
+ if ((share= (TABLE_SHARE*) my_hash_search(&table_def_cache,(uchar*) key,
+ key_length)))
{
DBUG_PRINT("info", ("share version: %lu ref_count: %u",
share->version, share->ref_count));
@@ -8529,7 +8538,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name,
if (share->ref_count == 0)
{
pthread_mutex_lock(&share->mutex);
- VOID(hash_delete(&table_def_cache, (uchar*) share));
+ VOID(my_hash_delete(&table_def_cache, (uchar*) share));
}
}
@@ -8777,11 +8786,11 @@ void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table
key_length=(uint) (strmov(strmov(key,lpt->db)+1,lpt->table_name)-key)+1;
VOID(pthread_mutex_lock(&LOCK_open));
HASH_SEARCH_STATE state;
- for (table= (TABLE*) hash_first(&open_cache,(uchar*) key,key_length,
- &state) ;
+ for (table= (TABLE*) my_hash_first(&open_cache,(uchar*) key,key_length,
+ &state) ;
table;
- table= (TABLE*) hash_next(&open_cache,(uchar*) key,key_length,
- &state))
+ table= (TABLE*) my_hash_next(&open_cache,(uchar*) key,key_length,
+ &state))
{
THD *in_use= table->in_use;
table->s->version= 0L;
diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc
index ee51480411b..58c309ef57b 100644
--- a/sql/sql_binlog.cc
+++ b/sql/sql_binlog.cc
@@ -61,7 +61,7 @@ void mysql_client_binlog_statement(THD* thd)
rli= thd->rli_fake;
if (!rli)
{
- rli= thd->rli_fake= new Relay_log_info;
+ rli= thd->rli_fake= new Relay_log_info(FALSE);
#ifdef HAVE_purify
rli->is_fake= TRUE;
#endif
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 8114e0221d6..5942696b86c 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -286,6 +286,7 @@ functions:
if (and only if) this query has a registered result set writer
(thd->net.query_cache_query).
4. Query_cache::invalidate
+ Query_cache::invalidate_locked_for_write
- Called from various places to invalidate query cache based on data-
base, table and myisam file name. During an on going invalidation
the query cache is temporarily disabled.
@@ -812,19 +813,20 @@ uchar *query_cache_query_get_key(const uchar *record, size_t *length,
Note on double-check locking (DCL) usage.
Below, in query_cache_insert(), query_cache_abort() and
- query_cache_end_of_result() we use what is called double-check
- locking (DCL) for NET::query_cache_query. I.e. we test it first
- without a lock, and, if positive, test again under the lock.
+ Query_cache::end_of_result() we use what is called double-check
+ locking (DCL) for Query_cache_tls::first_query_block.
+ I.e. we test it first without a lock, and, if positive, test again
+ under the lock.
- This means that if we see 'NET::query_cache_query == 0' without a
+ This means that if we see 'first_query_block == 0' without a
lock we will skip the operation. But this is safe here: when we
started to cache a query, we called Query_cache::store_query(), and
- NET::query_cache_query was set to non-zero in this thread (and the
+ 'first_query_block' was set to non-zero in this thread (and the
thread always sees results of its memory operations, mutex or not).
- If later we see 'NET::query_cache_query == 0' without locking a
+ If later we see 'first_query_block == 0' without locking a
mutex, that may only mean that some other thread have reset it by
invalidating the query. Skipping the operation in this case is the
- right thing to do, as NET::query_cache_query won't get non-zero for
+ right thing to do, as first_query_block won't get non-zero for
this query again.
See also comments in Query_cache::store_query() and
@@ -833,56 +835,71 @@ uchar *query_cache_query_get_key(const uchar *record, size_t *length,
NOTE, however, that double-check locking is not applicable in
'invalidate' functions, as we may erroneously skip invalidation,
because the thread doing invalidation may never see non-zero
- NET::query_cache_query.
+ 'first_query_block'.
*/
-void query_cache_init_query(NET *net)
+/**
+ libmysql convenience wrapper to insert data into query cache.
+*/
+void query_cache_insert(const char *packet, ulong length,
+ unsigned pkt_nr)
{
+ THD *thd= current_thd;
+
/*
- It is safe to initialize 'NET::query_cache_query' without a lock
- here, because before it will be accessed from different threads it
- will be set in this thread under a lock, and access from the same
- thread is always safe.
+ Current_thd can be NULL when a new connection is immediately ended
+ due to "Too many connections". thd->store_globals() has not been
+ called at this time and hence my_pthread_setspecific_ptr(THR_THD,
+ this) has not been called for this thread.
*/
- net->query_cache_query= 0;
+
+ if (!thd)
+ return;
+
+ query_cache.insert(&thd->query_cache_tls,
+ packet, length,
+ pkt_nr);
}
-/*
+/**
Insert the packet into the query cache.
*/
-void query_cache_insert(NET *net, const char *packet, ulong length)
+void
+Query_cache::insert(Query_cache_tls *query_cache_tls,
+ const char *packet, ulong length,
+ unsigned pkt_nr)
{
- DBUG_ENTER("query_cache_insert");
+ DBUG_ENTER("Query_cache::insert");
/* See the comment on double-check locking usage above. */
- if (net->query_cache_query == 0)
+ if (is_disabled() || query_cache_tls->first_query_block == NULL)
DBUG_VOID_RETURN;
DBUG_EXECUTE_IF("wait_in_query_cache_insert",
debug_wait_for_kill("wait_in_query_cache_insert"); );
- if (query_cache.try_lock())
+
+ if (try_lock())
DBUG_VOID_RETURN;
- Query_cache_block *query_block= (Query_cache_block*)net->query_cache_query;
- if (!query_block)
+ Query_cache_block *query_block = query_cache_tls->first_query_block;
+ if (query_block == NULL)
{
/*
We lost the writer and the currently processed query has been
invalidated; there is nothing left to do.
*/
- query_cache.unlock();
+ unlock();
DBUG_VOID_RETURN;
}
-
BLOCK_LOCK_WR(query_block);
Query_cache_query *header= query_block->query();
Query_cache_block *result= header->result();
- DUMP(&query_cache);
+ DUMP(this);
DBUG_PRINT("qcache", ("insert packet %lu bytes long",length));
/*
@@ -890,8 +907,8 @@ void query_cache_insert(NET *net, const char *packet, ulong length)
still need structure_guard_mutex to free the query, and therefore unlock
it later in this function.
*/
- if (!query_cache.append_result_data(&result, length, (uchar*) packet,
- query_block))
+ if (!append_result_data(&result, length, (uchar*) packet,
+ query_block))
{
DBUG_PRINT("warning", ("Can't append data"));
header->result(result);
@@ -900,80 +917,83 @@ void query_cache_insert(NET *net, const char *packet, ulong length)
query_cache.free_query(query_block);
query_cache.refused++;
// append_result_data no success => we need unlock
- query_cache.unlock();
+ unlock();
DBUG_VOID_RETURN;
}
header->result(result);
- header->last_pkt_nr= net->pkt_nr;
+ header->last_pkt_nr= pkt_nr;
BLOCK_UNLOCK_WR(query_block);
- DBUG_EXECUTE("check_querycache",query_cache.check_integrity(0););
+ DBUG_EXECUTE("check_querycache",check_integrity(0););
DBUG_VOID_RETURN;
}
-void query_cache_abort(NET *net)
+void
+Query_cache::abort(Query_cache_tls *query_cache_tls)
{
DBUG_ENTER("query_cache_abort");
THD *thd= current_thd;
/* See the comment on double-check locking usage above. */
- if (net->query_cache_query == 0)
+ if (is_disabled() || query_cache_tls->first_query_block == NULL)
DBUG_VOID_RETURN;
- if (query_cache.try_lock())
+ if (try_lock())
DBUG_VOID_RETURN;
/*
While we were waiting another thread might have changed the status
of the writer. Make sure the writer still exists before continue.
*/
- Query_cache_block *query_block= ((Query_cache_block*)
- net->query_cache_query);
+ Query_cache_block *query_block= query_cache_tls->first_query_block;
if (query_block)
{
thd_proc_info(thd, "storing result in query cache");
- DUMP(&query_cache);
+ DUMP(this);
BLOCK_LOCK_WR(query_block);
// The following call will remove the lock on query_block
- query_cache.free_query(query_block);
- net->query_cache_query= 0;
- DBUG_EXECUTE("check_querycache",query_cache.check_integrity(1););
+ free_query(query_block);
+ query_cache_tls->first_query_block= NULL;
+ DBUG_EXECUTE("check_querycache", check_integrity(1););
}
- query_cache.unlock();
+ unlock();
+
DBUG_VOID_RETURN;
}
-void query_cache_end_of_result(THD *thd)
+void Query_cache::end_of_result(THD *thd)
{
Query_cache_block *query_block;
- DBUG_ENTER("query_cache_end_of_result");
+ Query_cache_tls *query_cache_tls= &thd->query_cache_tls;
+ ulonglong limit_found_rows= thd->limit_found_rows;
+ DBUG_ENTER("Query_cache::end_of_result");
/* See the comment on double-check locking usage above. */
- if (thd->net.query_cache_query == 0)
+ if (query_cache_tls->first_query_block == NULL)
DBUG_VOID_RETURN;
/* Ensure that only complete results are cached. */
- DBUG_ASSERT(thd->main_da.is_eof());
+ DBUG_ASSERT(thd->stmt_da->is_eof());
if (thd->killed)
{
- query_cache_abort(&thd->net);
+ query_cache_abort(&thd->query_cache_tls);
DBUG_VOID_RETURN;
}
#ifdef EMBEDDED_LIBRARY
- query_cache_insert(&thd->net, (char*)thd,
- emb_count_querycache_size(thd));
+ insert(query_cache_tls, (char*)thd,
+ emb_count_querycache_size(thd), 0);
#endif
- if (query_cache.try_lock())
+ if (try_lock())
DBUG_VOID_RETURN;
- query_block= ((Query_cache_block*) thd->net.query_cache_query);
+ query_block= query_cache_tls->first_query_block;
if (query_block)
{
/*
@@ -982,7 +1002,7 @@ void query_cache_end_of_result(THD *thd)
block, the writer should be dropped.
*/
thd_proc_info(thd, "storing result in query cache");
- DUMP(&query_cache);
+ DUMP(this);
BLOCK_LOCK_WR(query_block);
Query_cache_query *header= query_block->query();
Query_cache_block *last_result_block;
@@ -999,8 +1019,8 @@ void query_cache_end_of_result(THD *thd)
and removed from QC.
*/
DBUG_ASSERT(0);
- query_cache.free_query(query_block);
- query_cache.unlock();
+ free_query(query_block);
+ unlock();
DBUG_VOID_RETURN;
}
last_result_block= header->result()->prev;
@@ -1009,17 +1029,17 @@ void query_cache_end_of_result(THD *thd)
if (last_result_block->length >= query_cache.min_allocation_unit + len)
query_cache.split_block(last_result_block,len);
- header->found_rows(current_thd->limit_found_rows);
+ header->found_rows(limit_found_rows);
header->result()->type= Query_cache_block::RESULT;
/* Drop the writer. */
header->writer(0);
- thd->net.query_cache_query= 0;
+ query_cache_tls->first_query_block= NULL;
BLOCK_UNLOCK_WR(query_block);
- DBUG_EXECUTE("check_querycache",query_cache.check_integrity(1););
-
+ DBUG_EXECUTE("check_querycache", check_integrity(1););
}
- query_cache.unlock();
+
+ unlock();
DBUG_VOID_RETURN;
}
@@ -1059,7 +1079,7 @@ Query_cache::Query_cache(ulong query_cache_limit_arg,
min_result_data_size(ALIGN_SIZE(min_result_data_size_arg)),
def_query_hash_size(ALIGN_SIZE(def_query_hash_size_arg)),
def_table_hash_size(ALIGN_SIZE(def_table_hash_size_arg)),
- initialized(0)
+ initialized(0), m_query_cache_is_disabled(FALSE)
{
ulong min_needed= (ALIGN_SIZE(sizeof(Query_cache_block)) +
ALIGN_SIZE(sizeof(Query_cache_block_table)) +
@@ -1097,7 +1117,7 @@ ulong Query_cache::resize(ulong query_cache_size_arg)
Drop the writer; this will cancel any attempts to store
the processed statement associated with this writer.
*/
- query->writer()->query_cache_query= 0;
+ query->writer()->first_query_block= NULL;
query->writer(0);
refused++;
}
@@ -1159,7 +1179,9 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used)
protocol (COM_EXECUTE) cannot be served to statements asking for results
in the text protocol (COM_QUERY) and vice-versa.
*/
- flags.result_in_binary_protocol= (unsigned int) thd->protocol->type();
+ flags.protocol_type= (unsigned int) thd->protocol->type();
+ /* PROTOCOL_LOCAL results are not cached. */
+ DBUG_ASSERT(flags.protocol_type != (unsigned int) Protocol::PROTOCOL_LOCAL);
flags.more_results_exists= test(thd->server_status &
SERVER_MORE_RESULTS_EXISTS);
flags.in_trans= test(thd->server_status & SERVER_STATUS_IN_TRANS);
@@ -1188,7 +1210,7 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu, div_precision: %lu, \
def_week_frmt: %lu, in_trans: %d, autocommit: %d",
(int)flags.client_long_flag,
(int)flags.client_protocol_41,
- (int)flags.result_in_binary_protocol,
+ (int)flags.protocol_type,
(int)flags.more_results_exists,
flags.pkt_nr,
flags.character_set_client_num,
@@ -1260,7 +1282,7 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
/* Check if another thread is processing the same query? */
Query_cache_block *competitor = (Query_cache_block *)
- hash_search(&queries, (uchar*) thd->query(), tot_length);
+ my_hash_search(&queries, (uchar*) thd->query(), tot_length);
DBUG_PRINT("qcache", ("competitor 0x%lx", (ulong) competitor));
if (competitor == 0)
{
@@ -1289,7 +1311,7 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
{
refused++;
DBUG_PRINT("warning", ("tables list including failed"));
- hash_delete(&queries, (uchar *) query_block);
+ my_hash_delete(&queries, (uchar *) query_block);
header->unlock_n_destroy();
free_memory_block(query_block);
unlock();
@@ -1298,8 +1320,8 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
double_linked_list_simple_include(query_block, &queries_blocks);
inserts++;
queries_in_cache++;
- net->query_cache_query= (uchar*) query_block;
- header->writer(net);
+ thd->query_cache_tls.first_query_block= query_block;
+ header->writer(&thd->query_cache_tls);
header->tables_type(tables_type);
unlock();
@@ -1355,7 +1377,10 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
{
ulonglong engine_data;
Query_cache_query *query;
- Query_cache_block *first_result_block, *result_block;
+#ifndef EMBEDDED_LIBRARY
+ Query_cache_block *first_result_block;
+#endif
+ Query_cache_block *result_block;
Query_cache_block_table *block_table, *block_table_end;
ulong tot_length;
Query_cache_query_flags flags;
@@ -1368,8 +1393,8 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
See also a note on double-check locking usage above.
*/
- if (thd->locked_tables || thd->variables.query_cache_type == 0 ||
- query_cache_size == 0)
+ if (is_disabled() || thd->locked_tables ||
+ thd->variables.query_cache_type == 0 || query_cache_size == 0)
goto err;
if (!thd->lex->safe_to_cache_query)
@@ -1428,12 +1453,6 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
if (query_cache_size == 0)
goto err_unlock;
- /*
- Check that we haven't forgot to reset the query cache variables;
- make sure there are no attached query cache writer to this thread.
- */
- DBUG_ASSERT(thd->net.query_cache_query == 0);
-
Query_cache_block *query_block;
tot_length= query_length + thd->db_length + 1 + QUERY_CACHE_FLAGS_SIZE;
@@ -1455,7 +1474,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
flags.client_long_flag= test(thd->client_capabilities & CLIENT_LONG_FLAG);
flags.client_protocol_41= test(thd->client_capabilities &
CLIENT_PROTOCOL_41);
- flags.result_in_binary_protocol= (unsigned int)thd->protocol->type();
+ flags.protocol_type= (unsigned int) thd->protocol->type();
flags.more_results_exists= test(thd->server_status &
SERVER_MORE_RESULTS_EXISTS);
flags.in_trans= test(thd->server_status & SERVER_STATUS_IN_TRANS);
@@ -1482,7 +1501,7 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu, div_precision: %lu, \
def_week_frmt: %lu, in_trans: %d, autocommit: %d",
(int)flags.client_long_flag,
(int)flags.client_protocol_41,
- (int)flags.result_in_binary_protocol,
+ (int)flags.protocol_type,
(int)flags.more_results_exists,
flags.pkt_nr,
flags.character_set_client_num,
@@ -1499,8 +1518,8 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
(int)flags.autocommit));
memcpy((uchar *)(sql + (tot_length - QUERY_CACHE_FLAGS_SIZE)),
(uchar*) &flags, QUERY_CACHE_FLAGS_SIZE);
- query_block = (Query_cache_block *) hash_search(&queries, (uchar*) sql,
- tot_length);
+ query_block = (Query_cache_block *) my_hash_search(&queries, (uchar*) sql,
+ tot_length);
/* Quick abort on unlocked data */
if (query_block == 0 ||
query_block->query()->result() == 0 ||
@@ -1515,7 +1534,10 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
BLOCK_LOCK_RD(query_block);
query = query_block->query();
- result_block= first_result_block= query->result();
+ result_block= query->result();
+#ifndef EMBEDDED_LIBRARY
+ first_result_block= result_block;
+#endif
if (result_block == 0 || result_block->type != Query_cache_block::RESULT)
{
@@ -1577,7 +1599,7 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
table_list.db = table->db();
table_list.alias= table_list.table_name= table->table();
#ifndef NO_EMBEDDED_ACCESS_CHECKS
- if (check_table_access(thd,SELECT_ACL,&table_list, 1, TRUE))
+ if (check_table_access(thd,SELECT_ACL,&table_list, FALSE, 1,TRUE))
{
DBUG_PRINT("qcache",
("probably no SELECT access to %s.%s => return to normal processing",
@@ -1658,7 +1680,7 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
thd->limit_found_rows = query->found_rows();
thd->status_var.last_query_cost= 0.0;
- thd->main_da.disable_status();
+ thd->stmt_da->disable_status();
BLOCK_UNLOCK_RD(query_block);
MYSQL_QUERY_CACHE_HIT(thd->query(), (ulong) thd->limit_found_rows);
@@ -1680,6 +1702,8 @@ void Query_cache::invalidate(THD *thd, TABLE_LIST *tables_used,
my_bool using_transactions)
{
DBUG_ENTER("Query_cache::invalidate (table list)");
+ if (is_disabled())
+ DBUG_VOID_RETURN;
using_transactions= using_transactions &&
(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
@@ -1710,6 +1734,9 @@ void Query_cache::invalidate(THD *thd, TABLE_LIST *tables_used,
void Query_cache::invalidate(CHANGED_TABLE_LIST *tables_used)
{
DBUG_ENTER("Query_cache::invalidate (changed table list)");
+ if (is_disabled())
+ DBUG_VOID_RETURN;
+
THD *thd= current_thd;
for (; tables_used; tables_used= tables_used->next)
{
@@ -1735,8 +1762,11 @@ void Query_cache::invalidate(CHANGED_TABLE_LIST *tables_used)
*/
void Query_cache::invalidate_locked_for_write(TABLE_LIST *tables_used)
{
- THD *thd= current_thd;
DBUG_ENTER("Query_cache::invalidate_locked_for_write");
+ if (is_disabled())
+ DBUG_VOID_RETURN;
+
+ THD *thd= current_thd;
for (; tables_used; tables_used= tables_used->next_local)
{
thd_proc_info(thd, "invalidating query cache entries (table)");
@@ -1757,7 +1787,9 @@ void Query_cache::invalidate(THD *thd, TABLE *table,
my_bool using_transactions)
{
DBUG_ENTER("Query_cache::invalidate (table)");
-
+ if (is_disabled())
+ DBUG_VOID_RETURN;
+
using_transactions= using_transactions &&
(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
if (using_transactions &&
@@ -1774,6 +1806,8 @@ void Query_cache::invalidate(THD *thd, const char *key, uint32 key_length,
my_bool using_transactions)
{
DBUG_ENTER("Query_cache::invalidate (key)");
+ if (is_disabled())
+ DBUG_VOID_RETURN;
using_transactions= using_transactions &&
(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
@@ -1792,9 +1826,12 @@ void Query_cache::invalidate(THD *thd, const char *key, uint32 key_length,
void Query_cache::invalidate(char *db)
{
- bool restart= FALSE;
+
DBUG_ENTER("Query_cache::invalidate (db)");
+ if (is_disabled())
+ DBUG_VOID_RETURN;
+ bool restart= FALSE;
/*
Lock the query cache and queue all invalidation attempts to avoid
the risk of a race between invalidation, cache inserts and flushes.
@@ -1879,6 +1916,9 @@ void Query_cache::invalidate_by_MyISAM_filename(const char *filename)
void Query_cache::flush()
{
DBUG_ENTER("Query_cache::flush");
+ if (is_disabled())
+ DBUG_VOID_RETURN;
+
DBUG_EXECUTE_IF("wait_in_query_cache_flush1",
debug_wait_for_kill("wait_in_query_cache_flush1"););
@@ -1910,6 +1950,9 @@ void Query_cache::pack(ulong join_limit, uint iteration_limit)
{
DBUG_ENTER("Query_cache::pack");
+ if (is_disabled())
+ DBUG_VOID_RETURN;
+
/*
If the entire qc is being invalidated we can bail out early
instead of waiting for the lock.
@@ -1967,6 +2010,15 @@ void Query_cache::init()
pthread_cond_init(&COND_cache_status_changed, NULL);
m_cache_lock_status= Query_cache::UNLOCKED;
initialized = 1;
+ /*
+ If we explicitly turn off query cache from the command line query cache will
+ be disabled for the reminder of the server life time. This is because we
+ want to avoid locking the QC specific mutex if query cache isn't going to
+ be used.
+ */
+ if (global_system_variables.query_cache_type == 0)
+ query_cache.disable_query_cache();
+
DBUG_VOID_RETURN;
}
@@ -2105,8 +2157,8 @@ ulong Query_cache::init_cache()
DUMP(this);
- VOID(hash_init(&queries, &my_charset_bin, def_query_hash_size, 0, 0,
- query_cache_query_get_key, 0, 0));
+ VOID(my_hash_init(&queries, &my_charset_bin, def_query_hash_size, 0, 0,
+ query_cache_query_get_key, 0, 0));
#ifndef FN_NO_CASE_SENCE
/*
If lower_case_table_names!=0 then db and table names are already
@@ -2116,8 +2168,8 @@ ulong Query_cache::init_cache()
lower_case_table_names == 0 then we should distinguish my_table
and MY_TABLE cases and so again can use binary collation.
*/
- VOID(hash_init(&tables, &my_charset_bin, def_table_hash_size, 0, 0,
- query_cache_table_get_key, 0, 0));
+ VOID(my_hash_init(&tables, &my_charset_bin, def_table_hash_size, 0, 0,
+ query_cache_table_get_key, 0, 0));
#else
/*
On windows, OS/2, MacOS X with HFS+ or any other case insensitive
@@ -2127,10 +2179,10 @@ ulong Query_cache::init_cache()
file system) and so should use case insensitive collation for
comparison.
*/
- VOID(hash_init(&tables,
- lower_case_table_names ? &my_charset_bin :
- files_charset_info,
- def_table_hash_size, 0, 0,query_cache_table_get_key, 0, 0));
+ VOID(my_hash_init(&tables,
+ lower_case_table_names ? &my_charset_bin :
+ files_charset_info,
+ def_table_hash_size, 0, 0,query_cache_table_get_key, 0, 0));
#endif
queries_in_cache = 0;
@@ -2180,8 +2232,8 @@ void Query_cache::free_cache()
my_free((uchar*) cache, MYF(MY_ALLOW_ZERO_PTR));
make_disabled();
- hash_free(&queries);
- hash_free(&tables);
+ my_hash_free(&queries);
+ my_hash_free(&tables);
DBUG_VOID_RETURN;
}
@@ -2293,7 +2345,7 @@ void Query_cache::free_query_internal(Query_cache_block *query_block)
if (query->writer() != 0)
{
/* Tell MySQL that this query should not be cached anymore */
- query->writer()->query_cache_query= 0;
+ query->writer()->first_query_block= NULL;
query->writer(0);
}
double_linked_list_exclude(query_block, &queries_blocks);
@@ -2356,7 +2408,7 @@ void Query_cache::free_query(Query_cache_block *query_block)
(ulong) query_block,
query_block->query()->length() ));
- hash_delete(&queries,(uchar *) query_block);
+ my_hash_delete(&queries,(uchar *) query_block);
free_query_internal(query_block);
DBUG_VOID_RETURN;
@@ -2685,7 +2737,7 @@ void
Query_cache::invalidate_table_internal(THD *thd, uchar *key, uint32 key_length)
{
Query_cache_block *table_block=
- (Query_cache_block*)hash_search(&tables, key, key_length);
+ (Query_cache_block*)my_hash_search(&tables, key, key_length);
if (table_block)
{
Query_cache_block_table *list_root= table_block->table(0);
@@ -2883,7 +2935,7 @@ Query_cache::insert_table(uint key_len, char *key,
THD *thd= current_thd;
Query_cache_block *table_block=
- (Query_cache_block *)hash_search(&tables, (uchar*) key, key_len);
+ (Query_cache_block *) my_hash_search(&tables, (uchar*) key, key_len);
if (table_block &&
table_block->table()->engine_data() != engine_data)
@@ -2999,7 +3051,7 @@ void Query_cache::unlink_table(Query_cache_block_table *node)
Query_cache_block *table_block= neighbour->block();
double_linked_list_exclude(table_block,
&tables_blocks);
- hash_delete(&tables,(uchar *) table_block);
+ my_hash_delete(&tables,(uchar *) table_block);
free_memory_block(table_block);
}
DBUG_VOID_RETURN;
@@ -3506,7 +3558,8 @@ Query_cache::process_and_count_tables(THD *thd, TABLE_LIST *tables_used,
*/
TABLE_COUNTER_TYPE
-Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex,
+Query_cache::is_cacheable(THD *thd, size_t query_len, const char *query,
+ LEX *lex,
TABLE_LIST *tables_used, uint8 *tables_type)
{
TABLE_COUNTER_TYPE table_count;
@@ -3683,7 +3736,7 @@ my_bool Query_cache::move_by_type(uchar **border,
uchar *key;
size_t key_length;
key=query_cache_table_get_key((uchar*) block, &key_length, 0);
- hash_first(&tables, (uchar*) key, key_length, &record_idx);
+ my_hash_first(&tables, (uchar*) key, key_length, &record_idx);
block->destroy();
new_block->init(len);
@@ -3717,7 +3770,7 @@ my_bool Query_cache::move_by_type(uchar **border,
/* Fix pointer to table name */
new_block->table()->table(new_block->table()->db() + tablename_offset);
/* Fix hash to point at moved block */
- hash_replace(&tables, &record_idx, (uchar*) new_block);
+ my_hash_replace(&tables, &record_idx, (uchar*) new_block);
DBUG_PRINT("qcache", ("moved %lu bytes to 0x%lx, new gap at 0x%lx",
len, (ulong) new_block, (ulong) *border));
@@ -3743,7 +3796,7 @@ my_bool Query_cache::move_by_type(uchar **border,
uchar *key;
size_t key_length;
key=query_cache_query_get_key((uchar*) block, &key_length, 0);
- hash_first(&queries, (uchar*) key, key_length, &record_idx);
+ my_hash_first(&queries, (uchar*) key, key_length, &record_idx);
// Move table of used tables
memmove((char*) new_block->table(0), (char*) block->table(0),
ALIGN_SIZE(n_tables*sizeof(Query_cache_block_table)));
@@ -3805,13 +3858,13 @@ my_bool Query_cache::move_by_type(uchar **border,
If someone is writing to this block, inform the writer that the block
has been moved.
*/
- NET *net = new_block->query()->writer();
- if (net != 0)
+ Query_cache_tls *query_cache_tls= new_block->query()->writer();
+ if (query_cache_tls != NULL)
{
- net->query_cache_query= (uchar*) new_block;
+ query_cache_tls->first_query_block= new_block;
}
/* Fix hash to point at moved block */
- hash_replace(&queries, &record_idx, (uchar*) new_block);
+ my_hash_replace(&queries, &record_idx, (uchar*) new_block);
DBUG_PRINT("qcache", ("moved %lu bytes to 0x%lx, new gap at 0x%lx",
len, (ulong) new_block, (ulong) *border));
break;
@@ -4222,13 +4275,13 @@ my_bool Query_cache::check_integrity(bool locked)
if (!locked)
lock_and_suspend();
- if (hash_check(&queries))
+ if (my_hash_check(&queries))
{
DBUG_PRINT("error", ("queries hash is damaged"));
result = 1;
}
- if (hash_check(&tables))
+ if (my_hash_check(&tables))
{
DBUG_PRINT("error", ("tables hash is damaged"));
result = 1;
@@ -4395,7 +4448,7 @@ my_bool Query_cache::check_integrity(bool locked)
(ulong) block, (uint) block->type));
size_t length;
uchar *key = query_cache_query_get_key((uchar*) block, &length, 0);
- uchar* val = hash_search(&queries, key, length);
+ uchar* val = my_hash_search(&queries, key, length);
if (((uchar*)block) != val)
{
DBUG_PRINT("error", ("block 0x%lx found in queries hash like 0x%lx",
@@ -4430,7 +4483,7 @@ my_bool Query_cache::check_integrity(bool locked)
(ulong) block, (uint) block->type));
size_t length;
uchar *key = query_cache_table_get_key((uchar*) block, &length, 0);
- uchar* val = hash_search(&tables, key, length);
+ uchar* val = my_hash_search(&tables, key, length);
if (((uchar*)block) != val)
{
DBUG_PRINT("error", ("block 0x%lx found in tables hash like 0x%lx",
@@ -4670,3 +4723,4 @@ err2:
#endif /* DBUG_OFF */
#endif /*HAVE_QUERY_CACHE*/
+
diff --git a/sql/sql_cache.h b/sql/sql_cache.h
index 44fc3123b98..695d6fb4db3 100644
--- a/sql/sql_cache.h
+++ b/sql/sql_cache.h
@@ -64,6 +64,8 @@ struct Query_cache_table;
struct Query_cache_query;
struct Query_cache_result;
class Query_cache;
+struct Query_cache_tls;
+struct LEX;
/**
This class represents a node in the linked chain of queries
@@ -137,7 +139,7 @@ struct Query_cache_query
ulonglong limit_found_rows;
rw_lock_t lock;
Query_cache_block *res;
- NET *wri;
+ Query_cache_tls *wri;
ulong len;
uint8 tbls_type;
unsigned int last_pkt_nr;
@@ -149,8 +151,8 @@ struct Query_cache_query
inline void found_rows(ulonglong rows) { limit_found_rows= rows; }
inline Query_cache_block *result() { return res; }
inline void result(Query_cache_block *p) { res= p; }
- inline NET *writer() { return wri; }
- inline void writer(NET *p) { wri= p; }
+ inline Query_cache_tls *writer() { return wri; }
+ inline void writer(Query_cache_tls *p) { wri= p; }
inline uint8 tables_type() { return tbls_type; }
inline void tables_type(uint8 type) { tbls_type= type; }
inline ulong length() { return len; }
@@ -279,8 +281,11 @@ private:
enum Cache_lock_status { UNLOCKED, LOCKED_NO_WAIT, LOCKED };
Cache_lock_status m_cache_lock_status;
+ bool m_query_cache_is_disabled;
+
void free_query_internal(Query_cache_block *point);
void invalidate_table_internal(THD *thd, uchar *key, uint32 key_length);
+ void disable_query_cache(void) { m_query_cache_is_disabled= TRUE; }
protected:
/*
@@ -407,7 +412,8 @@ protected:
If query is cacheable return number tables in query
(query without tables not cached)
*/
- TABLE_COUNTER_TYPE is_cacheable(THD *thd, uint32 query_len, char *query,
+ TABLE_COUNTER_TYPE is_cacheable(THD *thd, size_t query_len,
+ const char *query,
LEX *lex, TABLE_LIST *tables_used,
uint8 *tables_type);
TABLE_COUNTER_TYPE process_and_count_tables(THD *thd,
@@ -423,6 +429,8 @@ protected:
uint def_query_hash_size = QUERY_CACHE_DEF_QUERY_HASH_SIZE,
uint def_table_hash_size = QUERY_CACHE_DEF_TABLE_HASH_SIZE);
+ bool is_disabled(void) { return m_query_cache_is_disabled; }
+
/* initialize cache (mutex) */
void init();
/* resize query cache (return real query size, 0 if disabled) */
@@ -462,10 +470,13 @@ protected:
void destroy();
- friend void query_cache_init_query(NET *net);
- friend void query_cache_insert(NET *net, const char *packet, ulong length);
- friend void query_cache_end_of_result(THD *thd);
- friend void query_cache_abort(NET *net);
+ void insert(Query_cache_tls *query_cache_tls,
+ const char *packet,
+ ulong length,
+ unsigned pkt_nr);
+
+ void end_of_result(THD *thd);
+ void abort(Query_cache_tls *query_cache_tls);
/*
The following functions are only used when debugging
@@ -493,9 +504,4 @@ protected:
extern Query_cache query_cache;
extern TYPELIB query_cache_type_typelib;
-void query_cache_init_query(NET *net);
-void query_cache_insert(NET *net, const char *packet, ulong length);
-void query_cache_end_of_result(THD *thd);
-void query_cache_abort(NET *net);
-
#endif
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 7b37a3f6e93..d37058d2167 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1,4 +1,4 @@
-/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc.
+/* Copyright (C) 2000-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -91,7 +91,9 @@ extern "C" void free_user_var(user_var_entry *entry)
bool Key_part_spec::operator==(const Key_part_spec& other) const
{
- return length == other.length && !strcmp(field_name, other.field_name);
+ return length == other.length &&
+ !my_strcasecmp(system_charset_info, field_name.str,
+ other.field_name.str);
}
/**
@@ -200,19 +202,6 @@ bool foreign_key_prefix(Key *a, Key *b)
** Thread specific functions
****************************************************************************/
-/** Push an error to the error stack and return TRUE for now. */
-
-bool
-Reprepare_observer::report_error(THD *thd)
-{
- my_error(ER_NEED_REPREPARE, MYF(ME_NO_WARNING_FOR_ERROR|ME_NO_SP_HANDLER));
-
- m_invalidated= TRUE;
-
- return TRUE;
-}
-
-
Open_tables_state::Open_tables_state(ulong version_arg)
:version(version_arg), state_flags(0U)
{
@@ -271,7 +260,7 @@ const char *set_thd_proc_info(THD *thd, const char *info,
const char *old_info= thd->proc_info;
DBUG_PRINT("proc_info", ("%s:%d %s", calling_file, calling_line,
(info != NULL) ? info : "(null)"));
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
thd->profiling.status_change(info, calling_function, calling_file, calling_line);
#endif
thd->proc_info= info;
@@ -279,6 +268,26 @@ const char *set_thd_proc_info(THD *thd, const char *info,
}
extern "C"
+const char* thd_enter_cond(MYSQL_THD thd, pthread_cond_t *cond,
+ pthread_mutex_t *mutex, const char *msg)
+{
+ if (!thd)
+ thd= current_thd;
+
+ return thd->enter_cond(cond, mutex, msg);
+}
+
+extern "C"
+void thd_exit_cond(MYSQL_THD thd, const char *old_msg)
+{
+ if (!thd)
+ thd= current_thd;
+
+ thd->exit_cond(old_msg);
+ return;
+}
+
+extern "C"
void **thd_ha_data(const THD *thd, const struct handlerton *hton)
{
return (void **) &thd->ha_data[hton->slot].ha_ptr;
@@ -305,7 +314,7 @@ int thd_tx_isolation(const THD *thd)
extern "C"
void thd_inc_row_count(THD *thd)
{
- thd->row_count++;
+ thd->warning_info->inc_current_row_for_warning();
}
@@ -407,7 +416,7 @@ char *thd_security_context(THD *thd, char *buffer, unsigned int length,
/**
- Implementation of Drop_table_error_handler::handle_error().
+ Implementation of Drop_table_error_handler::handle_condition().
The reason in having this implementation is to silence technical low-level
warnings during DROP TABLE operation. Currently we don't want to expose
the following warnings during DROP TABLE:
@@ -420,150 +429,19 @@ char *thd_security_context(THD *thd, char *buffer, unsigned int length,
@return TRUE if the condition is handled.
*/
-bool Drop_table_error_handler::handle_error(uint sql_errno,
- const char *message,
- MYSQL_ERROR::enum_warning_level level,
- THD *thd)
-{
+bool Drop_table_error_handler::handle_condition(THD *thd,
+ uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl)
+{
+ *cond_hdl= NULL;
return ((sql_errno == EE_DELETE && my_errno == ENOENT) ||
sql_errno == ER_TRG_NO_DEFINER);
}
-/**
- Clear this diagnostics area.
-
- Normally called at the end of a statement.
-*/
-
-void
-Diagnostics_area::reset_diagnostics_area()
-{
-#ifdef DBUG_OFF
- can_overwrite_status= FALSE;
- /** Don't take chances in production */
- m_message[0]= '\0';
- m_sql_errno= 0;
- m_server_status= 0;
- m_affected_rows= 0;
- m_last_insert_id= 0;
- m_total_warn_count= 0;
-#endif
- is_sent= FALSE;
- /** Tiny reset in debug mode to see garbage right away */
- m_status= DA_EMPTY;
-}
-
-
-/**
- Set OK status -- ends commands that do not return a
- result set, e.g. INSERT/UPDATE/DELETE.
-*/
-
-void
-Diagnostics_area::set_ok_status(THD *thd, ha_rows affected_rows_arg,
- ulonglong last_insert_id_arg,
- const char *message_arg)
-{
- DBUG_ASSERT(! is_set());
-#ifdef DBUG_OFF
- /*
- In production, refuse to overwrite an error or a custom response
- with an OK packet.
- */
- if (is_error() || is_disabled())
- return;
-#endif
- /** Only allowed to report success if has not yet reported an error */
-
- m_server_status= thd->server_status;
- m_total_warn_count= thd->total_warn_count;
- m_affected_rows= affected_rows_arg;
- m_last_insert_id= last_insert_id_arg;
- if (message_arg)
- strmake(m_message, message_arg, sizeof(m_message) - 1);
- else
- m_message[0]= '\0';
- m_status= DA_OK;
-}
-
-
-/**
- Set EOF status.
-*/
-
-void
-Diagnostics_area::set_eof_status(THD *thd)
-{
- /** Only allowed to report eof if has not yet reported an error */
-
- DBUG_ASSERT(! is_set());
-#ifdef DBUG_OFF
- /*
- In production, refuse to overwrite an error or a custom response
- with an EOF packet.
- */
- if (is_error() || is_disabled())
- return;
-#endif
-
- m_server_status= thd->server_status;
- /*
- If inside a stored procedure, do not return the total
- number of warnings, since they are not available to the client
- anyway.
- */
- m_total_warn_count= thd->spcont ? 0 : thd->total_warn_count;
-
- m_status= DA_EOF;
-}
-
-/**
- Set ERROR status.
-*/
-
-void
-Diagnostics_area::set_error_status(THD *thd, uint sql_errno_arg,
- const char *message_arg)
-{
- /*
- Only allowed to report error if has not yet reported a success
- The only exception is when we flush the message to the client,
- an error can happen during the flush.
- */
- DBUG_ASSERT(! is_set() || can_overwrite_status);
-#ifdef DBUG_OFF
- /*
- In production, refuse to overwrite a custom response with an
- ERROR packet.
- */
- if (is_disabled())
- return;
-#endif
-
- m_sql_errno= sql_errno_arg;
- strmake(m_message, message_arg, sizeof(m_message) - 1);
-
- m_status= DA_ERROR;
-}
-
-
-/**
- Mark the diagnostics area as 'DISABLED'.
-
- This is used in rare cases when the COM_ command at hand sends a response
- in a custom format. One example is the query cache, another is
- COM_STMT_PREPARE.
-*/
-
-void
-Diagnostics_area::disable_status()
-{
- DBUG_ASSERT(! is_set());
- m_status= DA_DISABLED;
-}
-
-
THD::THD()
:Statement(&main_lex, &main_mem_root, CONVENTIONAL_EXECUTION,
/* statement id */ 0),
@@ -579,6 +457,8 @@ THD::THD()
first_successful_insert_id_in_cur_stmt(0),
stmt_depends_on_first_successful_insert_id_in_prev_stmt(FALSE),
examined_row_count(0),
+ warning_info(&main_warning_info),
+ stmt_da(&main_da),
global_read_lock(0),
is_fatal_error(0),
transaction_rollback_request(0),
@@ -589,10 +469,11 @@ THD::THD()
bootstrap(0),
derived_tables_processing(FALSE),
spcont(NULL),
- m_parser_state(NULL)
+ m_parser_state(NULL),
#if defined(ENABLED_DEBUG_SYNC)
- , debug_sync_control(0)
+ debug_sync_control(0),
#endif /* defined(ENABLED_DEBUG_SYNC) */
+ main_warning_info(0)
{
ulong tmp;
@@ -613,16 +494,14 @@ THD::THD()
killed= NOT_KILLED;
col_access=0;
is_slave_error= thread_specific_used= FALSE;
- hash_clear(&handler_tables_hash);
+ my_hash_clear(&handler_tables_hash);
tmp_table=0;
used_tables=0;
- cuted_fields= sent_row_count= row_count= 0L;
+ cuted_fields= 0L;
+ sent_row_count= 0L;
limit_found_rows= 0;
row_count_func= -1;
statement_id_counter= 0UL;
-#ifdef ERROR_INJECT_SUPPORT
- error_inject_value= 0UL;
-#endif
// Must be reset to handle error with THD's created for init of mysqld
lex->current_select= 0;
start_time=(time_t) 0;
@@ -636,7 +515,6 @@ THD::THD()
file_id = 0;
query_id= 0;
query_name_consts= 0;
- warn_id= 0;
db_charset= global_system_variables.collation_database;
bzero(ha_data, sizeof(ha_data));
mysys_var=0;
@@ -649,9 +527,6 @@ THD::THD()
net.vio=0;
#endif
client_capabilities= 0; // minimalistic client
-#ifdef HAVE_QUERY_CACHE
- query_cache_init_query(&net); // If error on boot
-#endif
ull=0;
system_thread= NON_SYSTEM_THREAD;
cleanup_done= abort_on_warning= no_warnings_for_error= 0;
@@ -672,15 +547,13 @@ THD::THD()
*scramble= '\0';
init();
- /* Initialize sub structures */
- init_sql_alloc(&warn_root, WARN_ALLOC_BLOCK_SIZE, WARN_ALLOC_PREALLOC_SIZE);
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
profiling.set_thd(this);
#endif
user_connect=(USER_CONN *)0;
- hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0,
- (hash_get_key) get_var_key,
- (hash_free_key) free_user_var, 0);
+ my_hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0,
+ (my_hash_get_key) get_var_key,
+ (my_hash_free_key) free_user_var, 0);
sp_proc_cache= NULL;
sp_func_cache= NULL;
@@ -721,19 +594,27 @@ void THD::push_internal_handler(Internal_error_handler *handler)
}
}
-
-bool THD::handle_error(uint sql_errno, const char *message,
- MYSQL_ERROR::enum_warning_level level)
+bool THD::handle_condition(uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl)
{
if (!m_internal_handler)
+ {
+ *cond_hdl= NULL;
return FALSE;
+ }
for (Internal_error_handler *error_handler= m_internal_handler;
error_handler;
error_handler= m_internal_handler->m_prev_internal_handler)
{
- if (error_handler->handle_error(sql_errno, message, level, this))
- return TRUE;
+ if (error_handler-> handle_condition(this, sql_errno, sqlstate, level, msg,
+ cond_hdl))
+ {
+ return TRUE;
+ }
}
return FALSE;
@@ -746,6 +627,207 @@ void THD::pop_internal_handler()
m_internal_handler= m_internal_handler->m_prev_internal_handler;
}
+
+void THD::raise_error(uint sql_errno)
+{
+ const char* msg= ER(sql_errno);
+ (void) raise_condition(sql_errno,
+ NULL,
+ MYSQL_ERROR::WARN_LEVEL_ERROR,
+ msg);
+}
+
+void THD::raise_error_printf(uint sql_errno, ...)
+{
+ va_list args;
+ char ebuff[MYSQL_ERRMSG_SIZE];
+ DBUG_ENTER("THD::raise_error_printf");
+ DBUG_PRINT("my", ("nr: %d errno: %d", sql_errno, errno));
+ const char* format= ER(sql_errno);
+ va_start(args, sql_errno);
+ my_vsnprintf(ebuff, sizeof(ebuff), format, args);
+ va_end(args);
+ (void) raise_condition(sql_errno,
+ NULL,
+ MYSQL_ERROR::WARN_LEVEL_ERROR,
+ ebuff);
+ DBUG_VOID_RETURN;
+}
+
+void THD::raise_warning(uint sql_errno)
+{
+ const char* msg= ER(sql_errno);
+ (void) raise_condition(sql_errno,
+ NULL,
+ MYSQL_ERROR::WARN_LEVEL_WARN,
+ msg);
+}
+
+void THD::raise_warning_printf(uint sql_errno, ...)
+{
+ va_list args;
+ char ebuff[MYSQL_ERRMSG_SIZE];
+ DBUG_ENTER("THD::raise_warning_printf");
+ DBUG_PRINT("enter", ("warning: %u", sql_errno));
+ const char* format= ER(sql_errno);
+ va_start(args, sql_errno);
+ my_vsnprintf(ebuff, sizeof(ebuff), format, args);
+ va_end(args);
+ (void) raise_condition(sql_errno,
+ NULL,
+ MYSQL_ERROR::WARN_LEVEL_WARN,
+ ebuff);
+ DBUG_VOID_RETURN;
+}
+
+void THD::raise_note(uint sql_errno)
+{
+ DBUG_ENTER("THD::raise_note");
+ DBUG_PRINT("enter", ("code: %d", sql_errno));
+ if (!(this->options & OPTION_SQL_NOTES))
+ DBUG_VOID_RETURN;
+ const char* msg= ER(sql_errno);
+ (void) raise_condition(sql_errno,
+ NULL,
+ MYSQL_ERROR::WARN_LEVEL_NOTE,
+ msg);
+ DBUG_VOID_RETURN;
+}
+
+void THD::raise_note_printf(uint sql_errno, ...)
+{
+ va_list args;
+ char ebuff[MYSQL_ERRMSG_SIZE];
+ DBUG_ENTER("THD::raise_note_printf");
+ DBUG_PRINT("enter",("code: %u", sql_errno));
+ if (!(this->options & OPTION_SQL_NOTES))
+ DBUG_VOID_RETURN;
+ const char* format= ER(sql_errno);
+ va_start(args, sql_errno);
+ my_vsnprintf(ebuff, sizeof(ebuff), format, args);
+ va_end(args);
+ (void) raise_condition(sql_errno,
+ NULL,
+ MYSQL_ERROR::WARN_LEVEL_NOTE,
+ ebuff);
+ DBUG_VOID_RETURN;
+}
+
+MYSQL_ERROR* THD::raise_condition(uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg)
+{
+ MYSQL_ERROR *cond= NULL;
+ DBUG_ENTER("THD::raise_condition");
+
+ if (!(this->options & OPTION_SQL_NOTES) &&
+ (level == MYSQL_ERROR::WARN_LEVEL_NOTE))
+ DBUG_RETURN(NULL);
+
+ warning_info->opt_clear_warning_info(query_id);
+
+ /*
+ TODO: replace by DBUG_ASSERT(sql_errno != 0) once all bugs similar to
+ Bug#36768 are fixed: a SQL condition must have a real (!=0) error number
+ so that it can be caught by handlers.
+ */
+ if (sql_errno == 0)
+ sql_errno= ER_UNKNOWN_ERROR;
+ if (msg == NULL)
+ msg= ER(sql_errno);
+ if (sqlstate == NULL)
+ sqlstate= mysql_errno_to_sqlstate(sql_errno);
+
+ if ((level == MYSQL_ERROR::WARN_LEVEL_WARN) &&
+ really_abort_on_warning())
+ {
+ /*
+ FIXME:
+ push_warning and strict SQL_MODE case.
+ */
+ level= MYSQL_ERROR::WARN_LEVEL_ERROR;
+ killed= THD::KILL_BAD_DATA;
+ }
+
+ switch (level)
+ {
+ case MYSQL_ERROR::WARN_LEVEL_NOTE:
+ case MYSQL_ERROR::WARN_LEVEL_WARN:
+ got_warning= 1;
+ break;
+ case MYSQL_ERROR::WARN_LEVEL_ERROR:
+ break;
+ default:
+ DBUG_ASSERT(FALSE);
+ }
+
+ if (handle_condition(sql_errno, sqlstate, level, msg, &cond))
+ DBUG_RETURN(cond);
+
+ if (level == MYSQL_ERROR::WARN_LEVEL_ERROR)
+ {
+ is_slave_error= 1; // needed to catch query errors during replication
+
+ /*
+ thd->lex->current_select == 0 if lex structure is not inited
+ (not query command (COM_QUERY))
+ */
+ if (lex->current_select &&
+ lex->current_select->no_error && !is_fatal_error)
+ {
+ DBUG_PRINT("error",
+ ("Error converted to warning: current_select: no_error %d "
+ "fatal_error: %d",
+ (lex->current_select ?
+ lex->current_select->no_error : 0),
+ (int) is_fatal_error));
+ }
+ else
+ {
+ if (! stmt_da->is_error())
+ stmt_da->set_error_status(this, sql_errno, msg, sqlstate);
+ }
+ }
+
+ /*
+ If a continue handler is found, the error message will be cleared
+ by the stored procedures code.
+ */
+ if (!is_fatal_error && spcont &&
+ spcont->handle_condition(this, sql_errno, sqlstate, level, msg, &cond))
+ {
+ /*
+ Do not push any warnings, a handled error must be completely
+ silenced.
+ */
+ DBUG_RETURN(cond);
+ }
+
+ /* Un-handled conditions */
+
+ cond= raise_condition_no_handler(sql_errno, sqlstate, level, msg);
+ DBUG_RETURN(cond);
+}
+
+MYSQL_ERROR*
+THD::raise_condition_no_handler(uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg)
+{
+ MYSQL_ERROR *cond= NULL;
+ DBUG_ENTER("THD::raise_condition_no_handler");
+
+ query_cache_abort(&query_cache_tls);
+
+ /* FIXME: broken special case */
+ if (no_warnings_for_error && (level == MYSQL_ERROR::WARN_LEVEL_ERROR))
+ DBUG_RETURN(NULL);
+
+ cond= warning_info->push_warning(this, sql_errno, sqlstate, level, msg);
+ DBUG_RETURN(cond);
+}
extern "C"
void *thd_alloc(MYSQL_THD thd, unsigned int size)
{
@@ -834,9 +916,6 @@ void THD::init(void)
TL_WRITE_LOW_PRIORITY :
TL_WRITE);
session_tx_isolation= (enum_tx_isolation) variables.tx_isolation;
- warn_list.empty();
- bzero((char*) warn_count, sizeof(warn_count));
- total_warn_count= 0;
update_charset();
reset_current_stmt_binlog_row_based();
bzero((char *) &status_var, sizeof(status_var));
@@ -894,9 +973,9 @@ void THD::change_user(void)
cleanup_done= 0;
init();
stmt_map.reset();
- hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0,
- (hash_get_key) get_var_key,
- (hash_free_key) free_user_var, 0);
+ my_hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0,
+ (my_hash_get_key) get_var_key,
+ (my_hash_free_key) free_user_var, 0);
sp_cache_clear(&sp_proc_cache);
sp_cache_clear(&sp_func_cache);
}
@@ -933,7 +1012,7 @@ void THD::cleanup(void)
mysql_ha_cleanup(this);
delete_dynamic(&user_var_events);
- hash_free(&user_vars);
+ my_hash_free(&user_vars);
close_temporary_tables(this);
my_free((char*) variables.time_format, MYF(MY_ALLOW_ZERO_PTR));
my_free((char*) variables.date_format, MYF(MY_ALLOW_ZERO_PTR));
@@ -985,7 +1064,6 @@ THD::~THD()
DBUG_PRINT("info", ("freeing security context"));
main_security_ctx.destroy();
safeFree(db);
- free_root(&warn_root,MYF(0));
#ifdef USING_TRANSACTIONS
free_root(&transaction.mem_root,MYF(0));
#endif
@@ -1452,8 +1530,8 @@ int THD::send_explain_fields(select_result *result)
}
item->maybe_null= 1;
field_list.push_back(new Item_empty_string("Extra", 255, cs));
- return (result->send_fields(field_list,
- Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF));
+ return (result->send_result_set_metadata(field_list,
+ Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF));
}
#ifdef SIGNAL_WITH_VIO_CLOSE
@@ -1560,13 +1638,17 @@ bool select_result::check_simple_select() const
static String default_line_term("\n",default_charset_info);
static String default_escaped("\\",default_charset_info);
static String default_field_term("\t",default_charset_info);
+static String default_xml_row_term("<row>", default_charset_info);
-sql_exchange::sql_exchange(char *name,bool flag)
+sql_exchange::sql_exchange(char *name, bool flag,
+ enum enum_filetype filetype_arg)
:file_name(name), opt_enclosed(0), dumpfile(flag), skip_lines(0)
{
+ filetype= filetype_arg;
field_term= &default_field_term;
enclosed= line_start= &my_empty_string;
- line_term= &default_line_term;
+ line_term= filetype == FILETYPE_CSV ?
+ &default_line_term : &default_xml_row_term;
escaped= &default_escaped;
cs= NULL;
}
@@ -1577,10 +1659,10 @@ bool sql_exchange::escaped_given(void)
}
-bool select_send::send_fields(List<Item> &list, uint flags)
+bool select_send::send_result_set_metadata(List<Item> &list, uint flags)
{
bool res;
- if (!(res= thd->protocol->send_fields(&list, flags)))
+ if (!(res= thd->protocol->send_result_set_metadata(&list, flags)))
is_result_set_started= 1;
return res;
}
@@ -1588,21 +1670,19 @@ bool select_send::send_fields(List<Item> &list, uint flags)
void select_send::abort()
{
DBUG_ENTER("select_send::abort");
- if (is_result_set_started && thd->spcont &&
- thd->spcont->find_handler(thd, thd->main_da.sql_errno(),
- MYSQL_ERROR::WARN_LEVEL_ERROR))
+
+ if (is_result_set_started && thd->spcont)
{
/*
We're executing a stored procedure, have an open result
- set, an SQL exception condition and a handler for it.
- In this situation we must abort the current statement,
- silence the error and start executing the continue/exit
- handler.
+ set and an SQL exception condition. In this situation we
+ must abort the current statement, silence the error and
+ start executing the continue/exit handler if one is found.
Before aborting the statement, let's end the open result set, as
otherwise the client will hang due to the violation of the
client/server protocol.
*/
- thd->protocol->end_partial_result_set(thd);
+ thd->spcont->end_partial_result_set= TRUE;
}
DBUG_VOID_RETURN;
}
@@ -1623,10 +1703,13 @@ void select_send::cleanup()
bool select_send::send_data(List<Item> &items)
{
+ Protocol *protocol= thd->protocol;
+ DBUG_ENTER("select_send::send_data");
+
if (unit->offset_limit_cnt)
{ // using limit offset,count
unit->offset_limit_cnt--;
- return 0;
+ DBUG_RETURN(FALSE);
}
/*
@@ -1636,36 +1719,18 @@ bool select_send::send_data(List<Item> &items)
*/
ha_release_temporary_latches(thd);
- List_iterator_fast<Item> li(items);
- Protocol *protocol= thd->protocol;
- char buff[MAX_FIELD_WIDTH];
- String buffer(buff, sizeof(buff), &my_charset_bin);
- DBUG_ENTER("select_send::send_data");
-
protocol->prepare_for_resend();
- Item *item;
- while ((item=li++))
- {
- if (item->send(protocol, &buffer))
- {
- protocol->free(); // Free used buffer
- my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0));
- break;
- }
- /*
- Reset buffer to its original state, as it may have been altered in
- Item::send().
- */
- buffer.set(buff, sizeof(buff), &my_charset_bin);
- }
- thd->sent_row_count++;
- if (thd->is_error())
+ if (protocol->send_result_set_row(&items))
{
protocol->remove_last_row();
- DBUG_RETURN(1);
+ DBUG_RETURN(TRUE);
}
+
+ thd->sent_row_count++;
+
if (thd->vio_ok())
DBUG_RETURN(protocol->write());
+
DBUG_RETURN(0);
}
@@ -2571,12 +2636,12 @@ Statement_map::Statement_map() :
START_STMT_HASH_SIZE = 16,
START_NAME_HASH_SIZE = 16
};
- hash_init(&st_hash, &my_charset_bin, START_STMT_HASH_SIZE, 0, 0,
- get_statement_id_as_hash_key,
- delete_statement_as_hash_key, MYF(0));
- hash_init(&names_hash, system_charset_info, START_NAME_HASH_SIZE, 0, 0,
- (hash_get_key) get_stmt_name_hash_key,
- NULL,MYF(0));
+ my_hash_init(&st_hash, &my_charset_bin, START_STMT_HASH_SIZE, 0, 0,
+ get_statement_id_as_hash_key,
+ delete_statement_as_hash_key, MYF(0));
+ my_hash_init(&names_hash, system_charset_info, START_NAME_HASH_SIZE, 0, 0,
+ (my_hash_get_key) get_stmt_name_hash_key,
+ NULL,MYF(0));
}
@@ -2641,9 +2706,9 @@ int Statement_map::insert(THD *thd, Statement *statement)
err_max:
if (statement->name.str)
- hash_delete(&names_hash, (uchar*) statement);
+ my_hash_delete(&names_hash, (uchar*) statement);
err_names_hash:
- hash_delete(&st_hash, (uchar*) statement);
+ my_hash_delete(&st_hash, (uchar*) statement);
err_st_hash:
return 1;
}
@@ -2664,9 +2729,9 @@ void Statement_map::erase(Statement *statement)
if (statement == last_found_statement)
last_found_statement= 0;
if (statement->name.str)
- hash_delete(&names_hash, (uchar *) statement);
+ my_hash_delete(&names_hash, (uchar *) statement);
- hash_delete(&st_hash, (uchar *) statement);
+ my_hash_delete(&st_hash, (uchar *) statement);
pthread_mutex_lock(&LOCK_prepared_stmt_count);
DBUG_ASSERT(prepared_stmt_count > 0);
prepared_stmt_count--;
@@ -2696,8 +2761,8 @@ Statement_map::~Statement_map()
prepared_stmt_count-= st_hash.records;
pthread_mutex_unlock(&LOCK_prepared_stmt_count);
- hash_free(&names_hash);
- hash_free(&st_hash);
+ my_hash_free(&names_hash);
+ my_hash_free(&st_hash);
}
bool select_dumpvar::send_data(List<Item> &items)
@@ -3265,15 +3330,15 @@ void xid_free_hash(void *ptr)
bool xid_cache_init()
{
pthread_mutex_init(&LOCK_xid_cache, MY_MUTEX_INIT_FAST);
- return hash_init(&xid_cache, &my_charset_bin, 100, 0, 0,
- xid_get_hash_key, xid_free_hash, 0) != 0;
+ return my_hash_init(&xid_cache, &my_charset_bin, 100, 0, 0,
+ xid_get_hash_key, xid_free_hash, 0) != 0;
}
void xid_cache_free()
{
- if (hash_inited(&xid_cache))
+ if (my_hash_inited(&xid_cache))
{
- hash_free(&xid_cache);
+ my_hash_free(&xid_cache);
pthread_mutex_destroy(&LOCK_xid_cache);
}
}
@@ -3281,7 +3346,8 @@ void xid_cache_free()
XID_STATE *xid_cache_search(XID *xid)
{
pthread_mutex_lock(&LOCK_xid_cache);
- XID_STATE *res=(XID_STATE *)hash_search(&xid_cache, xid->key(), xid->key_length());
+ XID_STATE *res=(XID_STATE *)my_hash_search(&xid_cache, xid->key(),
+ xid->key_length());
pthread_mutex_unlock(&LOCK_xid_cache);
return res;
}
@@ -3292,7 +3358,7 @@ bool xid_cache_insert(XID *xid, enum xa_states xa_state)
XID_STATE *xs;
my_bool res;
pthread_mutex_lock(&LOCK_xid_cache);
- if (hash_search(&xid_cache, xid->key(), xid->key_length()))
+ if (my_hash_search(&xid_cache, xid->key(), xid->key_length()))
res=0;
else if (!(xs=(XID_STATE *)my_malloc(sizeof(*xs), MYF(MY_WME))))
res=1;
@@ -3311,8 +3377,8 @@ bool xid_cache_insert(XID *xid, enum xa_states xa_state)
bool xid_cache_insert(XID_STATE *xid_state)
{
pthread_mutex_lock(&LOCK_xid_cache);
- DBUG_ASSERT(hash_search(&xid_cache, xid_state->xid.key(),
- xid_state->xid.key_length())==0);
+ DBUG_ASSERT(my_hash_search(&xid_cache, xid_state->xid.key(),
+ xid_state->xid.key_length())==0);
my_bool res=my_hash_insert(&xid_cache, (uchar*)xid_state);
pthread_mutex_unlock(&LOCK_xid_cache);
return res;
@@ -3322,7 +3388,7 @@ bool xid_cache_insert(XID_STATE *xid_state)
void xid_cache_delete(XID_STATE *xid_state)
{
pthread_mutex_lock(&LOCK_xid_cache);
- hash_delete(&xid_cache, (uchar *)xid_state);
+ my_hash_delete(&xid_cache, (uchar *)xid_state);
pthread_mutex_unlock(&LOCK_xid_cache);
}
diff --git a/sql/sql_class.h b/sql/sql_class.h
index f5d56192140..01ac3c12641 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1,4 +1,4 @@
-/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc.
+/* Copyright (C) 2000-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -14,6 +14,9 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#ifndef SQL_CLASS_INCLUDED
+#define SQL_CLASS_INCLUDED
+
/* Classes in mysql */
#ifdef USE_PRAGMA_INTERFACE
@@ -23,51 +26,8 @@
#include "log.h"
#include "rpl_tblmap.h"
-/**
- An interface that is used to take an action when
- the locking module notices that a table version has changed
- since the last execution. "Table" here may refer to any kind of
- table -- a base table, a temporary table, a view or an
- information schema table.
-
- When we open and lock tables for execution of a prepared
- statement, we must verify that they did not change
- since statement prepare. If some table did change, the statement
- parse tree *may* be no longer valid, e.g. in case it contains
- optimizations that depend on table metadata.
-
- This class provides an interface (a method) that is
- invoked when such a situation takes place.
- The implementation of the method simply reports an error, but
- the exact details depend on the nature of the SQL statement.
-
- At most 1 instance of this class is active at a time, in which
- case THD::m_reprepare_observer is not NULL.
-
- @sa check_and_update_table_version() for details of the
- version tracking algorithm
-
- @sa Open_tables_state::m_reprepare_observer for the life cycle
- of metadata observers.
-*/
-
-class Reprepare_observer
-{
-public:
- /**
- Check if a change of metadata is OK. In future
- the signature of this method may be extended to accept the old
- and the new versions, but since currently the check is very
- simple, we only need the THD to report an error.
- */
- bool report_error(THD *thd);
- bool is_invalidated() const { return m_invalidated; }
- void reset_reprepare_observer() { m_invalidated= FALSE; }
-private:
- bool m_invalidated;
-};
-
+class Reprepare_observer;
class Relay_log_info;
class Query_log_event;
@@ -88,6 +48,7 @@ enum enum_slave_exec_mode { SLAVE_EXEC_MODE_STRICT,
SLAVE_EXEC_MODE_LAST_BIT};
enum enum_mark_columns
{ MARK_COLUMNS_NONE, MARK_COLUMNS_READ, MARK_COLUMNS_WRITE};
+enum enum_filetype { FILETYPE_CSV, FILETYPE_XML };
extern char internal_table_name[2];
extern char empty_c_string[1];
@@ -146,9 +107,14 @@ typedef struct st_copy_info {
class Key_part_spec :public Sql_alloc {
public:
- const char *field_name;
+ LEX_STRING field_name;
uint length;
- Key_part_spec(const char *name,uint len=0) :field_name(name), length(len) {}
+ Key_part_spec(const LEX_STRING &name, uint len)
+ : field_name(name), length(len)
+ {}
+ Key_part_spec(const char *name, const size_t name_len, uint len)
+ : length(len)
+ { field_name.str= (char *)name; field_name.length= name_len; }
bool operator==(const Key_part_spec& other) const;
/**
Construct a copy of this Key_part_spec. field_name is copied
@@ -201,15 +167,24 @@ public:
enum Keytype type;
KEY_CREATE_INFO key_create_info;
List<Key_part_spec> columns;
- const char *name;
+ LEX_STRING name;
bool generated;
- Key(enum Keytype type_par, const char *name_arg,
+ Key(enum Keytype type_par, const LEX_STRING &name_arg,
KEY_CREATE_INFO *key_info_arg,
bool generated_arg, List<Key_part_spec> &cols)
:type(type_par), key_create_info(*key_info_arg), columns(cols),
name(name_arg), generated(generated_arg)
{}
+ Key(enum Keytype type_par, const char *name_arg, size_t name_len_arg,
+ KEY_CREATE_INFO *key_info_arg, bool generated_arg,
+ List<Key_part_spec> &cols)
+ :type(type_par), key_create_info(*key_info_arg), columns(cols),
+ generated(generated_arg)
+ {
+ name.str= (char *)name_arg;
+ name.length= name_len_arg;
+ }
Key(const Key &rhs, MEM_ROOT *mem_root);
virtual ~Key() {}
/* Equality comparison of keys (ignoring name) */
@@ -234,7 +209,7 @@ public:
Table_ident *ref_table;
List<Key_part_spec> ref_columns;
uint delete_opt, update_opt, match_opt;
- Foreign_key(const char *name_arg, List<Key_part_spec> &cols,
+ Foreign_key(const LEX_STRING &name_arg, List<Key_part_spec> &cols,
Table_ident *table, List<Key_part_spec> &ref_cols,
uint delete_opt_arg, uint update_opt_arg, uint match_opt_arg)
:Key(FOREIGN_KEY, name_arg, &default_key_create_info, 0, cols),
@@ -267,6 +242,62 @@ public:
LEX_COLUMN (const String& x,const uint& y ): column (x),rights (y) {}
};
+/**
+ Query_cache_tls -- query cache thread local data.
+*/
+
+struct Query_cache_block;
+
+struct Query_cache_tls
+{
+ /*
+ 'first_query_block' should be accessed only via query cache
+ functions and methods to maintain proper locking.
+ */
+ Query_cache_block *first_query_block;
+ void set_first_query_block(Query_cache_block *first_query_block_arg)
+ {
+ first_query_block= first_query_block_arg;
+ }
+
+ Query_cache_tls() :first_query_block(NULL) {}
+};
+
+/* SIGNAL / RESIGNAL / GET DIAGNOSTICS */
+
+/**
+ This enumeration list all the condition item names of a condition in the
+ SQL condition area.
+*/
+typedef enum enum_diag_condition_item_name
+{
+ /*
+ Conditions that can be set by the user (SIGNAL/RESIGNAL),
+ and by the server implementation.
+ */
+
+ DIAG_CLASS_ORIGIN= 0,
+ FIRST_DIAG_SET_PROPERTY= DIAG_CLASS_ORIGIN,
+ DIAG_SUBCLASS_ORIGIN= 1,
+ DIAG_CONSTRAINT_CATALOG= 2,
+ DIAG_CONSTRAINT_SCHEMA= 3,
+ DIAG_CONSTRAINT_NAME= 4,
+ DIAG_CATALOG_NAME= 5,
+ DIAG_SCHEMA_NAME= 6,
+ DIAG_TABLE_NAME= 7,
+ DIAG_COLUMN_NAME= 8,
+ DIAG_CURSOR_NAME= 9,
+ DIAG_MESSAGE_TEXT= 10,
+ DIAG_MYSQL_ERRNO= 11,
+ LAST_DIAG_SET_PROPERTY= DIAG_MYSQL_ERRNO
+} Diag_condition_item_name;
+
+/**
+ Name of each diagnostic condition item.
+ This array is indexed by Diag_condition_item_name.
+*/
+extern const LEX_STRING Diag_condition_item_names[];
+
#include "sql_lex.h" /* Must be here */
class Delayed_insert;
@@ -390,6 +421,8 @@ struct system_variables
CHARSET_INFO *collation_database;
CHARSET_INFO *collation_connection;
+ /* Error messages */
+ MY_LOCALE *lc_messages;
/* Locale Support */
MY_LOCALE *lc_time_names;
@@ -706,8 +739,8 @@ public:
Statement *find_by_name(LEX_STRING *name)
{
Statement *stmt;
- stmt= (Statement*)hash_search(&names_hash, (uchar*)name->str,
- name->length);
+ stmt= (Statement*)my_hash_search(&names_hash, (uchar*)name->str,
+ name->length);
return stmt;
}
@@ -716,7 +749,7 @@ public:
if (last_found_statement == 0 || id != last_found_statement->id)
{
Statement *stmt;
- stmt= (Statement *) hash_search(&st_hash, (uchar *) &id, sizeof(id));
+ stmt= (Statement *) my_hash_search(&st_hash, (uchar *) &id, sizeof(id));
if (stmt && stmt->name.str)
return NULL;
last_found_statement= stmt;
@@ -1043,12 +1076,12 @@ protected:
public:
/**
- Handle an error condition.
+ Handle a sql condition.
This method can be implemented by a subclass to achieve any of the
following:
- - mask an error internally, prevent exposing it to the user,
- - mask an error and throw another one instead.
- When this method returns true, the error condition is considered
+ - mask a warning/error internally, prevent exposing it to the user,
+ - mask a warning/error and throw another one instead.
+ When this method returns true, the sql condition is considered
'handled', and will not be propagated to upper layers.
It is the responsability of the code installing an internal handler
to then check for trapped conditions, and implement logic to recover
@@ -1062,15 +1095,17 @@ public:
before removing it from the exception stack with
<code>THD::pop_internal_handler()</code>.
- @param sql_errno the error number
- @param level the error level
@param thd the calling thread
- @return true if the error is handled
+ @param cond the condition raised.
+ @return true if the condition is handled
*/
- virtual bool handle_error(uint sql_errno,
- const char *message,
- MYSQL_ERROR::enum_warning_level level,
- THD *thd) = 0;
+ virtual bool handle_condition(THD *thd,
+ uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl) = 0;
+
private:
Internal_error_handler *m_prev_internal_handler;
friend class THD;
@@ -1085,10 +1120,12 @@ private:
class Dummy_error_handler : public Internal_error_handler
{
public:
- bool handle_error(uint sql_errno,
- const char *message,
- MYSQL_ERROR::enum_warning_level level,
- THD *thd)
+ bool handle_condition(THD *thd,
+ uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl)
{
/* Ignore error */
return TRUE;
@@ -1097,7 +1134,7 @@ public:
/**
- This class is an internal error handler implementation for
+ This class is an internal error handler implementation for
DROP TABLE statements. The thing is that there may be warnings during
execution of these statements, which should not be exposed to the user.
This class is intended to silence such warnings.
@@ -1111,10 +1148,12 @@ public:
{ }
public:
- bool handle_error(uint sql_errno,
- const char *message,
- MYSQL_ERROR::enum_warning_level level,
- THD *thd);
+ bool handle_condition(THD *thd,
+ uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl);
private:
Internal_error_handler *m_err_handler;
@@ -1122,123 +1161,6 @@ private:
/**
- Stores status of the currently executed statement.
- Cleared at the beginning of the statement, and then
- can hold either OK, ERROR, or EOF status.
- Can not be assigned twice per statement.
-*/
-
-class Diagnostics_area
-{
-public:
- enum enum_diagnostics_status
- {
- /** The area is cleared at start of a statement. */
- DA_EMPTY= 0,
- /** Set whenever one calls my_ok(). */
- DA_OK,
- /** Set whenever one calls my_eof(). */
- DA_EOF,
- /** Set whenever one calls my_error() or my_message(). */
- DA_ERROR,
- /** Set in case of a custom response, such as one from COM_STMT_PREPARE. */
- DA_DISABLED
- };
- /** True if status information is sent to the client. */
- bool is_sent;
- /** Set to make set_error_status after set_{ok,eof}_status possible. */
- bool can_overwrite_status;
-
- void set_ok_status(THD *thd, ha_rows affected_rows_arg,
- ulonglong last_insert_id_arg,
- const char *message);
- void set_eof_status(THD *thd);
- void set_error_status(THD *thd, uint sql_errno_arg, const char *message_arg);
-
- void disable_status();
-
- void reset_diagnostics_area();
-
- bool is_set() const { return m_status != DA_EMPTY; }
- bool is_error() const { return m_status == DA_ERROR; }
- bool is_eof() const { return m_status == DA_EOF; }
- bool is_ok() const { return m_status == DA_OK; }
- bool is_disabled() const { return m_status == DA_DISABLED; }
- enum_diagnostics_status status() const { return m_status; }
-
- const char *message() const
- { DBUG_ASSERT(m_status == DA_ERROR || m_status == DA_OK); return m_message; }
-
- uint sql_errno() const
- { DBUG_ASSERT(m_status == DA_ERROR); return m_sql_errno; }
-
- uint server_status() const
- {
- DBUG_ASSERT(m_status == DA_OK || m_status == DA_EOF);
- return m_server_status;
- }
-
- ha_rows affected_rows() const
- { DBUG_ASSERT(m_status == DA_OK); return m_affected_rows; }
-
- ulonglong last_insert_id() const
- { DBUG_ASSERT(m_status == DA_OK); return m_last_insert_id; }
-
- uint total_warn_count() const
- {
- DBUG_ASSERT(m_status == DA_OK || m_status == DA_EOF);
- return m_total_warn_count;
- }
-
- Diagnostics_area() { reset_diagnostics_area(); }
-
-private:
- /** Message buffer. Can be used by OK or ERROR status. */
- char m_message[MYSQL_ERRMSG_SIZE];
- /**
- SQL error number. One of ER_ codes from share/errmsg.txt.
- Set by set_error_status.
- */
- uint m_sql_errno;
-
- /**
- Copied from thd->server_status when the diagnostics area is assigned.
- We need this member as some places in the code use the following pattern:
- thd->server_status|= ...
- my_eof(thd);
- thd->server_status&= ~...
- Assigned by OK, EOF or ERROR.
- */
- uint m_server_status;
- /**
- The number of rows affected by the last statement. This is
- semantically close to thd->row_count_func, but has a different
- life cycle. thd->row_count_func stores the value returned by
- function ROW_COUNT() and is cleared only by statements that
- update its value, such as INSERT, UPDATE, DELETE and few others.
- This member is cleared at the beginning of the next statement.
-
- We could possibly merge the two, but life cycle of thd->row_count_func
- can not be changed.
- */
- ha_rows m_affected_rows;
- /**
- Similarly to the previous member, this is a replacement of
- thd->first_successful_insert_id_in_prev_stmt, which is used
- to implement LAST_INSERT_ID().
- */
- ulonglong m_last_insert_id;
- /** The total number of warnings. */
- uint m_total_warn_count;
- enum_diagnostics_status m_status;
- /**
- @todo: the following THD members belong here:
- - warn_list, warn_count,
- */
-};
-
-
-/**
Storage engine specific thread local data.
*/
@@ -1264,6 +1186,7 @@ struct Ha_data
Ha_data() :ha_ptr(NULL) {}
};
+extern "C" void my_message_sql(uint error, const char *str, myf MyFlags);
/**
@class THD
@@ -1305,8 +1228,10 @@ public:
*/
struct st_mysql_stmt *current_stmt;
#endif
+#ifdef HAVE_QUERY_CACHE
+ Query_cache_tls query_cache_tls;
+#endif
NET net; // client connection descriptor
- MEM_ROOT warn_root; // For warnings and errors
Protocol *protocol; // Current protocol
Protocol_text protocol_text; // Normal protocol
Protocol_binary protocol_binary; // Binary protocol
@@ -1722,17 +1647,9 @@ public:
table_map used_tables;
USER_CONN *user_connect;
CHARSET_INFO *db_charset;
- /*
- FIXME: this, and some other variables like 'count_cuted_fields'
- maybe should be statement/cursor local, that is, moved to Statement
- class. With current implementation warnings produced in each prepared
- statement/cursor settle here.
- */
- List <MYSQL_ERROR> warn_list;
- uint warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_END];
- uint total_warn_count;
- Diagnostics_area main_da;
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+ Warning_info *warning_info;
+ Diagnostics_area *stmt_da;
+#if defined(ENABLED_PROFILING)
PROFILING profiling;
#endif
@@ -1744,20 +1661,12 @@ public:
from table are necessary for this select, to check if it's necessary to
update auto-updatable fields (like auto_increment and timestamp).
*/
- query_id_t query_id, warn_id;
+ query_id_t query_id;
ulong col_access;
-#ifdef ERROR_INJECT_SUPPORT
- ulong error_inject_value;
-#endif
/* Statement id is thread-wide. This counter is used to generate ids */
ulong statement_id_counter;
ulong rand_saved_seed1, rand_saved_seed2;
- /*
- Row counter, mainly for errors and warnings. Not increased in
- create_sort_index(); may differ from examined_row_count.
- */
- ulong row_count;
pthread_t real_id; /* For debugging */
my_thread_id thread_id;
uint tmp_table, global_read_lock;
@@ -1985,7 +1894,7 @@ public:
inline void exit_cond(const char* old_msg)
{
/*
- Putting the mutex unlock in exit_cond() ensures that
+ Putting the mutex unlock in thd->exit_cond() ensures that
mysys_var->current_mutex is always unlocked _before_ mysys_var->mutex is
locked (if that would not be the case, you'll get a deadlock if someone
does a THD::awake() on you).
@@ -1996,6 +1905,7 @@ public:
mysys_var->current_cond = 0;
proc_info = old_msg;
pthread_mutex_unlock(&mysys_var->mutex);
+ return;
}
inline time_t query_start() { query_start_used=1; return start_time; }
inline void set_time()
@@ -2066,8 +1976,8 @@ public:
inline void clear_error()
{
DBUG_ENTER("clear_error");
- if (main_da.is_error())
- main_da.reset_diagnostics_area();
+ if (stmt_da->is_error())
+ stmt_da->reset_diagnostics_area();
is_slave_error= 0;
DBUG_VOID_RETURN;
}
@@ -2099,7 +2009,7 @@ public:
To raise this flag, use my_error().
*/
- inline bool is_error() const { return main_da.is_error(); }
+ inline bool is_error() const { return stmt_da->is_error(); }
inline CHARSET_INFO *charset() { return variables.character_set_client; }
void update_charset();
@@ -2299,19 +2209,107 @@ public:
void push_internal_handler(Internal_error_handler *handler);
/**
- Handle an error condition.
- @param sql_errno the error number
- @param level the error level
- @return true if the error is handled
- */
- virtual bool handle_error(uint sql_errno, const char *message,
- MYSQL_ERROR::enum_warning_level level);
+ Handle a sql condition.
+ @param sql_errno the condition error number
+ @param sqlstate the condition sqlstate
+ @param level the condition level
+ @param msg the condition message text
+ @param[out] cond_hdl the sql condition raised, if any
+ @return true if the condition is handled
+ */
+ virtual bool handle_condition(uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl);
/**
Remove the error handler last pushed.
*/
void pop_internal_handler();
+ /**
+ Raise an exception condition.
+ @param code the MYSQL_ERRNO error code of the error
+ */
+ void raise_error(uint code);
+
+ /**
+ Raise an exception condition, with a formatted message.
+ @param code the MYSQL_ERRNO error code of the error
+ */
+ void raise_error_printf(uint code, ...);
+
+ /**
+ Raise a completion condition (warning).
+ @param code the MYSQL_ERRNO error code of the warning
+ */
+ void raise_warning(uint code);
+
+ /**
+ Raise a completion condition (warning), with a formatted message.
+ @param code the MYSQL_ERRNO error code of the warning
+ */
+ void raise_warning_printf(uint code, ...);
+
+ /**
+ Raise a completion condition (note), with a fixed message.
+ @param code the MYSQL_ERRNO error code of the note
+ */
+ void raise_note(uint code);
+
+ /**
+ Raise an completion condition (note), with a formatted message.
+ @param code the MYSQL_ERRNO error code of the note
+ */
+ void raise_note_printf(uint code, ...);
+
+private:
+ /*
+ Only the implementation of the SIGNAL and RESIGNAL statements
+ is permitted to raise SQL conditions in a generic way,
+ or to raise them by bypassing handlers (RESIGNAL).
+ To raise a SQL condition, the code should use the public
+ raise_error() or raise_warning() methods provided by class THD.
+ */
+ friend class Signal_common;
+ friend class Signal_statement;
+ friend class Resignal_statement;
+ friend void push_warning(THD*, MYSQL_ERROR::enum_warning_level, uint, const char*);
+ friend void my_message_sql(uint, const char *, myf);
+
+ /**
+ Raise a generic SQL condition.
+ @param sql_errno the condition error number
+ @param sqlstate the condition SQLSTATE
+ @param level the condition level
+ @param msg the condition message text
+ @return The condition raised, or NULL
+ */
+ MYSQL_ERROR*
+ raise_condition(uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg);
+
+ /**
+ Raise a generic SQL condition, without activation any SQL condition
+ handlers.
+ This method is necessary to support the RESIGNAL statement,
+ which is allowed to bypass SQL exception handlers.
+ @param sql_errno the condition error number
+ @param sqlstate the condition SQLSTATE
+ @param level the condition level
+ @param msg the condition message text
+ @return The condition raised, or NULL
+ */
+ MYSQL_ERROR*
+ raise_condition_no_handler(uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg);
+
+public:
/** Overloaded to guard query/query_length fields */
virtual void set_statement(Statement *stmt);
@@ -2339,25 +2337,27 @@ private:
tree itself is reused between executions and thus is stored elsewhere.
*/
MEM_ROOT main_mem_root;
+ Warning_info main_warning_info;
+ Diagnostics_area main_da;
};
-/** A short cut for thd->main_da.set_ok_status(). */
+/** A short cut for thd->stmt_da->set_ok_status(). */
inline void
-my_ok(THD *thd, ha_rows affected_rows= 0, ulonglong id= 0,
+my_ok(THD *thd, ulonglong affected_rows= 0, ulonglong id= 0,
const char *message= NULL)
{
- thd->main_da.set_ok_status(thd, affected_rows, id, message);
+ thd->stmt_da->set_ok_status(thd, affected_rows, id, message);
}
-/** A short cut for thd->main_da.set_eof_status(). */
+/** A short cut for thd->stmt_da->set_eof_status(). */
inline void
my_eof(THD *thd)
{
- thd->main_da.set_eof_status(thd);
+ thd->stmt_da->set_eof_status(thd);
}
#define tmp_disable_binlog(A) \
@@ -2376,13 +2376,15 @@ my_eof(THD *thd)
class sql_exchange :public Sql_alloc
{
public:
+ enum enum_filetype filetype; /* load XML, Added by Arnold & Erik */
char *file_name;
String *field_term,*enclosed,*line_term,*line_start,*escaped;
bool opt_enclosed;
bool dumpfile;
ulong skip_lines;
CHARSET_INFO *cs;
- sql_exchange(char *name,bool dumpfile_flag);
+ sql_exchange(char *name, bool dumpfile_flag,
+ enum_filetype filetype_arg= FILETYPE_CSV);
bool escaped_given(void);
};
@@ -2415,7 +2417,7 @@ public:
*/
virtual uint field_count(List<Item> &fields) const
{ return fields.elements; }
- virtual bool send_fields(List<Item> &list, uint flags)=0;
+ virtual bool send_result_set_metadata(List<Item> &list, uint flags)=0;
virtual bool send_data(List<Item> &items)=0;
virtual bool initialize_tables (JOIN *join=0) { return 0; }
virtual void send_error(uint errcode,const char *err);
@@ -2460,7 +2462,7 @@ class select_result_interceptor: public select_result
public:
select_result_interceptor() {} /* Remove gcc warning */
uint field_count(List<Item> &fields) const { return 0; }
- bool send_fields(List<Item> &fields, uint flag) { return FALSE; }
+ bool send_result_set_metadata(List<Item> &fields, uint flag) { return FALSE; }
};
@@ -2473,7 +2475,7 @@ class select_send :public select_result {
bool is_result_set_started;
public:
select_send() :is_result_set_started(FALSE) {}
- bool send_fields(List<Item> &list, uint flags);
+ bool send_result_set_metadata(List<Item> &list, uint flags);
bool send_data(List<Item> &items);
bool send_eof();
virtual bool check_simple_select() const { return FALSE; }
@@ -3051,11 +3053,11 @@ public:
/* Bits in sql_command_flags */
-#define CF_CHANGES_DATA 1
-#define CF_HAS_ROW_COUNT 2
-#define CF_STATUS_COMMAND 4
-#define CF_SHOW_TABLE_COMMAND 8
-#define CF_WRITE_LOGS_COMMAND 16
+#define CF_CHANGES_DATA (1U << 0)
+#define CF_HAS_ROW_COUNT (1U << 1)
+#define CF_STATUS_COMMAND (1U << 2)
+#define CF_SHOW_TABLE_COMMAND (1U << 3)
+#define CF_WRITE_LOGS_COMMAND (1U << 4)
/**
Must be set for SQL statements that may contain
Item expressions and/or use joins and tables.
@@ -3069,7 +3071,17 @@ public:
reprepare. Consequently, complex item expressions and
joins are currently prohibited in these statements.
*/
-#define CF_REEXECUTION_FRAGILE 32
+#define CF_REEXECUTION_FRAGILE (1U << 5)
+
+/**
+ Diagnostic statement.
+ Diagnostic statements:
+ - SHOW WARNING
+ - SHOW ERROR
+ - GET DIAGNOSTICS (WL#2111)
+ do not modify the diagnostics area during execution.
+*/
+#define CF_DIAGNOSTIC_STMT (1U << 8)
/* Functions in sql_class.cc */
@@ -3080,3 +3092,4 @@ void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
void mark_transaction_to_rollback(THD *thd, bool all);
#endif /* MYSQL_SERVER */
+#endif /* SQL_CLASS_INCLUDED */
diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc
index 5bca766114e..959209df412 100644
--- a/sql/sql_connect.cc
+++ b/sql/sql_connect.cc
@@ -39,10 +39,6 @@
#define MIN_HANDSHAKE_SIZE 6
#endif /* HAVE_OPENSSL */
-#ifdef __WIN__
-extern void win_install_sigabrt_handler();
-#endif
-
/*
Get structure for logging connection data for the current user
*/
@@ -65,7 +61,7 @@ static int get_or_create_user_conn(THD *thd, const char *user,
user_len= strlen(user);
temp_len= (strmov(strmov(temp_user, user)+1, host) - temp_user)+1;
(void) pthread_mutex_lock(&LOCK_user_conn);
- if (!(uc = (struct user_conn *) hash_search(&hash_user_connections,
+ if (!(uc = (struct user_conn *) my_hash_search(&hash_user_connections,
(uchar*) temp_user, temp_len)))
{
/* First connection for user; Create a user connection object */
@@ -155,7 +151,15 @@ int check_for_max_user_connections(THD *thd, USER_CONN *uc)
end:
if (error)
+ {
uc->connections--; // no need for decrease_user_connections() here
+ /*
+ The thread may returned back to the pool and assigned to a user
+ that doesn't have a limit. Ensure the user is not using resources
+ of someone else.
+ */
+ thd->user_connect= NULL;
+ }
(void) pthread_mutex_unlock(&LOCK_user_conn);
DBUG_RETURN(error);
}
@@ -187,7 +191,7 @@ void decrease_user_connections(USER_CONN *uc)
if (!--uc->connections && !mqh_used)
{
/* Last connection for user; Delete it */
- (void) hash_delete(&hash_user_connections,(uchar*) uc);
+ (void) my_hash_delete(&hash_user_connections,(uchar*) uc);
}
(void) pthread_mutex_unlock(&LOCK_user_conn);
DBUG_VOID_RETURN;
@@ -466,7 +470,10 @@ check_user(THD *thd, enum enum_server_command command,
{
/* mysql_change_db() has pushed the error message. */
if (thd->user_connect)
+ {
decrease_user_connections(thd->user_connect);
+ thd->user_connect= 0;
+ }
DBUG_RETURN(1);
}
}
@@ -490,6 +497,18 @@ check_user(THD *thd, enum enum_server_command command,
thd->main_security_ctx.user,
thd->main_security_ctx.host_or_ip,
passwd_len ? ER(ER_YES) : ER(ER_NO));
+ /*
+ log access denied messages to the error log when log-warnings = 2
+ so that the overhead of the general query log is not required to track
+ failed connections
+ */
+ if (global_system_variables.log_warnings > 1)
+ {
+ sql_print_warning(ER(ER_ACCESS_DENIED_ERROR),
+ thd->main_security_ctx.user,
+ thd->main_security_ctx.host_or_ip,
+ passwd_len ? ER(ER_YES) : ER(ER_NO));
+ }
DBUG_RETURN(1);
#endif /* NO_EMBEDDED_ACCESS_CHECKS */
}
@@ -517,10 +536,10 @@ extern "C" void free_user(struct user_conn *uc)
void init_max_user_conn(void)
{
#ifndef NO_EMBEDDED_ACCESS_CHECKS
- (void) hash_init(&hash_user_connections,system_charset_info,max_connections,
- 0,0,
- (hash_get_key) get_key_conn, (hash_free_key) free_user,
- 0);
+ (void)
+ my_hash_init(&hash_user_connections,system_charset_info,max_connections,
+ 0,0, (my_hash_get_key) get_key_conn,
+ (my_hash_free_key) free_user, 0);
#endif
}
@@ -528,7 +547,7 @@ void init_max_user_conn(void)
void free_max_user_conn(void)
{
#ifndef NO_EMBEDDED_ACCESS_CHECKS
- hash_free(&hash_user_connections);
+ my_hash_free(&hash_user_connections);
#endif /* NO_EMBEDDED_ACCESS_CHECKS */
}
@@ -546,8 +565,9 @@ void reset_mqh(LEX_USER *lu, bool get_them= 0)
memcpy(temp_user,lu->user.str,lu->user.length);
memcpy(temp_user+lu->user.length+1,lu->host.str,lu->host.length);
temp_user[lu->user.length]='\0'; temp_user[temp_len-1]=0;
- if ((uc = (struct user_conn *) hash_search(&hash_user_connections,
- (uchar*) temp_user, temp_len)))
+ if ((uc = (struct user_conn *) my_hash_search(&hash_user_connections,
+ (uchar*) temp_user,
+ temp_len)))
{
uc->questions=0;
get_mqh(temp_user,&temp_user[lu->user.length+1],uc);
@@ -560,8 +580,8 @@ void reset_mqh(LEX_USER *lu, bool get_them= 0)
/* for FLUSH PRIVILEGES and FLUSH USER_RESOURCES */
for (uint idx=0;idx < hash_user_connections.records; idx++)
{
- USER_CONN *uc=(struct user_conn *) hash_element(&hash_user_connections,
- idx);
+ USER_CONN *uc=(struct user_conn *)
+ my_hash_element(&hash_user_connections, idx);
if (get_them)
get_mqh(uc->user,uc->host,uc);
uc->questions=0;
@@ -612,13 +632,8 @@ void thd_init_client_charset(THD *thd, uint cs_number)
bool init_new_connection_handler_thread()
{
pthread_detach_this_thread();
-#if defined(__WIN__)
- win_install_sigabrt_handler();
-#else
- /* Win32 calls this in pthread_create */
if (my_thread_init())
return 1;
-#endif /* __WIN__ */
return 0;
}
@@ -954,11 +969,11 @@ static bool login_connection(THD *thd)
my_net_set_write_timeout(net, connect_timeout);
error= check_connection(thd);
- net_end_statement(thd);
+ thd->protocol->end_statement();
if (error)
{ // Wrong permissions
-#ifdef __NT__
+#ifdef _WIN32
if (vio_type(net->vio) == VIO_TYPE_NAMEDPIPE)
my_sleep(1000); /* must wait after eof() */
#endif
@@ -984,7 +999,15 @@ static void end_connection(THD *thd)
NET *net= &thd->net;
plugin_thdvar_cleanup(thd);
if (thd->user_connect)
+ {
decrease_user_connections(thd->user_connect);
+ /*
+ The thread may returned back to the pool and assigned to a user
+ that doesn't have a limit. Ensure the user is not using resources
+ of someone else.
+ */
+ thd->user_connect= NULL;
+ }
if (thd->killed || (net->error && net->vio != 0))
{
@@ -1001,7 +1024,7 @@ static void end_connection(THD *thd)
thd->thread_id,(thd->db ? thd->db : "unconnected"),
sctx->user ? sctx->user : "unauthenticated",
sctx->host_or_ip,
- (thd->main_da.is_error() ? thd->main_da.message() :
+ (thd->stmt_da->is_error() ? thd->stmt_da->message() :
ER(ER_UNKNOWN_ERROR)));
}
}
@@ -1046,7 +1069,7 @@ static void prepare_new_connection_state(THD* thd)
thd->thread_id,(thd->db ? thd->db : "unconnected"),
sctx->user ? sctx->user : "unauthenticated",
sctx->host_or_ip, "init_connect command failed");
- sql_print_warning("%s", thd->main_da.message());
+ sql_print_warning("%s", thd->stmt_da->message());
}
thd->proc_info=0;
thd->set_time();
diff --git a/sql/sql_crypt.h b/sql/sql_crypt.h
index a5a6bee8a58..8d5a761cbdf 100644
--- a/sql/sql_crypt.h
+++ b/sql/sql_crypt.h
@@ -1,3 +1,6 @@
+#ifndef SQL_CRYPT_INCLUDED
+#define SQL_CRYPT_INCLUDED
+
/* Copyright (C) 2000-2001, 2005 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -35,3 +38,5 @@ class SQL_CRYPT :public Sql_alloc
void encode(char *str, uint length);
void decode(char *str, uint length);
};
+
+#endif /* SQL_CRYPT_INCLUDED */
diff --git a/sql/sql_cursor.cc b/sql/sql_cursor.cc
index 51d5a1cec23..ffc3fafe55f 100644
--- a/sql/sql_cursor.cc
+++ b/sql/sql_cursor.cc
@@ -90,7 +90,7 @@ class Materialized_cursor: public Server_side_cursor
public:
Materialized_cursor(select_result *result, TABLE *table);
- int fill_item_list(THD *thd, List<Item> &send_fields);
+ int fill_item_list(THD *thd, List<Item> &send_result_set_metadata);
virtual bool is_open() const { return table != 0; }
virtual int open(JOIN *join __attribute__((unused)));
virtual void fetch(ulong num_rows);
@@ -115,7 +115,7 @@ public:
Materialized_cursor *materialized_cursor;
Select_materialize(select_result *result_arg)
:result(result_arg), materialized_cursor(0) {}
- virtual bool send_fields(List<Item> &list, uint flags);
+ virtual bool send_result_set_metadata(List<Item> &list, uint flags);
};
@@ -376,12 +376,12 @@ Sensitive_cursor::open(JOIN *join_arg)
join->change_result(result);
/*
Send fields description to the client; server_status is sent
- in 'EOF' packet, which follows send_fields().
- We don't simply use SEND_EOF flag of send_fields because we also
+ in 'EOF' packet, which follows send_result_set_metadata().
+ We don't simply use SEND_EOF flag of send_result_set_metadata because we also
want to flush the network buffer, which is done only in a standalone
send_eof().
*/
- result->send_fields(*join->fields, Protocol::SEND_NUM_ROWS);
+ result->send_result_set_metadata(*join->fields, Protocol::SEND_NUM_ROWS);
thd->server_status|= SERVER_STATUS_CURSOR_EXISTS;
result->send_eof();
thd->server_status&= ~SERVER_STATUS_CURSOR_EXISTS;
@@ -566,14 +566,14 @@ Materialized_cursor::Materialized_cursor(select_result *result_arg,
Preserve the original metadata that would be sent to the client.
@param thd Thread identifier.
- @param send_fields List of fields that would be sent.
+ @param send_result_set_metadata List of fields that would be sent.
*/
-int Materialized_cursor::fill_item_list(THD *thd, List<Item> &send_fields)
+int Materialized_cursor::fill_item_list(THD *thd, List<Item> &send_result_set_metadata)
{
Query_arena backup_arena;
int rc;
- List_iterator_fast<Item> it_org(send_fields);
+ List_iterator_fast<Item> it_org(send_result_set_metadata);
List_iterator_fast<Item> it_dst(item_list);
Item *item_org;
Item *item_dst;
@@ -583,7 +583,7 @@ int Materialized_cursor::fill_item_list(THD *thd, List<Item> &send_fields)
if ((rc= table->fill_item_list(&item_list)))
goto end;
- DBUG_ASSERT(send_fields.elements == item_list.elements);
+ DBUG_ASSERT(send_result_set_metadata.elements == item_list.elements);
/*
Unless we preserve the original metadata, it will be lost,
@@ -623,17 +623,17 @@ int Materialized_cursor::open(JOIN *join __attribute__((unused)))
{
/*
Now send the result set metadata to the client. We need to
- do it here, as in Select_materialize::send_fields the items
- for column types are not yet created (send_fields requires
+ do it here, as in Select_materialize::send_result_set_metadata the items
+ for column types are not yet created (send_result_set_metadata requires
a list of items). The new types may differ from the original
ones sent at prepare if some of them were altered by MySQL
HEAP tables mechanism -- used when create_tmp_field_from_item
may alter the original column type.
- We can't simply supply SEND_EOF flag to send_fields, because
- send_fields doesn't flush the network buffer.
+ We can't simply supply SEND_EOF flag to send_result_set_metadata, because
+ send_result_set_metadata doesn't flush the network buffer.
*/
- rc= result->send_fields(item_list, Protocol::SEND_NUM_ROWS);
+ rc= result->send_result_set_metadata(item_list, Protocol::SEND_NUM_ROWS);
thd->server_status|= SERVER_STATUS_CURSOR_EXISTS;
result->send_eof();
thd->server_status&= ~SERVER_STATUS_CURSOR_EXISTS;
@@ -717,7 +717,7 @@ Materialized_cursor::~Materialized_cursor()
Select_materialize
****************************************************************************/
-bool Select_materialize::send_fields(List<Item> &list, uint flags)
+bool Select_materialize::send_result_set_metadata(List<Item> &list, uint flags)
{
DBUG_ASSERT(table == 0);
if (create_result_table(unit->thd, unit->get_unit_column_types(),
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index e6ccd9aa594..3198791d5d1 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -105,8 +105,8 @@ static my_bool lock_db_insert(const char *dbname, uint length)
safe_mutex_assert_owner(&LOCK_lock_db);
- if (!(opt= (my_dblock_t*) hash_search(&lock_db_cache,
- (uchar*) dbname, length)))
+ if (!(opt= (my_dblock_t*) my_hash_search(&lock_db_cache,
+ (uchar*) dbname, length)))
{
/* Db is not in the hash, insert it */
char *tmp_name;
@@ -139,9 +139,9 @@ void lock_db_delete(const char *name, uint length)
{
my_dblock_t *opt;
safe_mutex_assert_owner(&LOCK_lock_db);
- if ((opt= (my_dblock_t *)hash_search(&lock_db_cache,
- (const uchar*) name, length)))
- hash_delete(&lock_db_cache, (uchar*) opt);
+ if ((opt= (my_dblock_t *)my_hash_search(&lock_db_cache,
+ (const uchar*) name, length)))
+ my_hash_delete(&lock_db_cache, (uchar*) opt);
}
@@ -221,14 +221,14 @@ bool my_database_names_init(void)
if (!dboptions_init)
{
dboptions_init= 1;
- error= hash_init(&dboptions, lower_case_table_names ?
- &my_charset_bin : system_charset_info,
- 32, 0, 0, (hash_get_key) dboptions_get_key,
- free_dbopt,0) ||
- hash_init(&lock_db_cache, lower_case_table_names ?
- &my_charset_bin : system_charset_info,
- 32, 0, 0, (hash_get_key) lock_db_get_key,
- lock_db_free_element,0);
+ error= my_hash_init(&dboptions, lower_case_table_names ?
+ &my_charset_bin : system_charset_info,
+ 32, 0, 0, (my_hash_get_key) dboptions_get_key,
+ free_dbopt,0) ||
+ my_hash_init(&lock_db_cache, lower_case_table_names ?
+ &my_charset_bin : system_charset_info,
+ 32, 0, 0, (my_hash_get_key) lock_db_get_key,
+ lock_db_free_element,0);
}
return error;
@@ -245,9 +245,9 @@ void my_database_names_free(void)
if (dboptions_init)
{
dboptions_init= 0;
- hash_free(&dboptions);
+ my_hash_free(&dboptions);
(void) rwlock_destroy(&LOCK_dboptions);
- hash_free(&lock_db_cache);
+ my_hash_free(&lock_db_cache);
}
}
@@ -259,11 +259,11 @@ void my_database_names_free(void)
void my_dbopt_cleanup(void)
{
rw_wrlock(&LOCK_dboptions);
- hash_free(&dboptions);
- hash_init(&dboptions, lower_case_table_names ?
- &my_charset_bin : system_charset_info,
- 32, 0, 0, (hash_get_key) dboptions_get_key,
- free_dbopt,0);
+ my_hash_free(&dboptions);
+ my_hash_init(&dboptions, lower_case_table_names ?
+ &my_charset_bin : system_charset_info,
+ 32, 0, 0, (my_hash_get_key) dboptions_get_key,
+ free_dbopt,0);
rw_unlock(&LOCK_dboptions);
}
@@ -289,7 +289,7 @@ static my_bool get_dbopt(const char *dbname, HA_CREATE_INFO *create)
length= (uint) strlen(dbname);
rw_rdlock(&LOCK_dboptions);
- if ((opt= (my_dbopt_t*) hash_search(&dboptions, (uchar*) dbname, length)))
+ if ((opt= (my_dbopt_t*) my_hash_search(&dboptions, (uchar*) dbname, length)))
{
create->default_table_charset= opt->charset;
error= 0;
@@ -321,7 +321,8 @@ static my_bool put_dbopt(const char *dbname, HA_CREATE_INFO *create)
length= (uint) strlen(dbname);
rw_wrlock(&LOCK_dboptions);
- if (!(opt= (my_dbopt_t*) hash_search(&dboptions, (uchar*) dbname, length)))
+ if (!(opt= (my_dbopt_t*) my_hash_search(&dboptions, (uchar*) dbname,
+ length)))
{
/* Options are not in the hash, insert them */
char *tmp_name;
@@ -361,9 +362,9 @@ void del_dbopt(const char *path)
{
my_dbopt_t *opt;
rw_wrlock(&LOCK_dboptions);
- if ((opt= (my_dbopt_t *)hash_search(&dboptions, (const uchar*) path,
- strlen(path))))
- hash_delete(&dboptions, (uchar*) opt);
+ if ((opt= (my_dbopt_t *)my_hash_search(&dboptions, (const uchar*) path,
+ strlen(path))))
+ my_hash_delete(&dboptions, (uchar*) opt);
rw_unlock(&LOCK_dboptions);
}
@@ -1027,7 +1028,7 @@ exit:
SELECT DATABASE() in the future). For this we free() thd->db and set
it to 0.
*/
- if (thd->db && !strcmp(thd->db, db))
+ if (thd->db && !strcmp(thd->db, db) && error == 0)
mysql_change_db_impl(thd, NULL, 0, thd->variables.collation_server);
VOID(pthread_mutex_unlock(&LOCK_mysql_create_db));
start_waiting_global_read_lock(thd);
@@ -1719,8 +1720,8 @@ lock_databases(THD *thd, const char *db1, uint length1,
{
pthread_mutex_lock(&LOCK_lock_db);
while (!thd->killed &&
- (hash_search(&lock_db_cache,(uchar*) db1, length1) ||
- hash_search(&lock_db_cache,(uchar*) db2, length2)))
+ (my_hash_search(&lock_db_cache,(uchar*) db1, length1) ||
+ my_hash_search(&lock_db_cache,(uchar*) db2, length2)))
{
wait_for_condition(thd, &LOCK_lock_db, &COND_refresh);
pthread_mutex_lock(&LOCK_lock_db);
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 6b9a83e695b..6d7ece3912b 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -1104,6 +1104,7 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok)
is_temporary_table= true;
handlerton *table_type= table->s->db_type();
TABLE_SHARE *share= table->s;
+ /* Note that a temporary table cannot be partitioned */
if (!ha_check_storage_engine_flag(table_type, HTON_CAN_RECREATE))
goto trunc_by_del;
@@ -1142,8 +1143,22 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok)
table_list->db, table_list->table_name);
DBUG_RETURN(TRUE);
}
- if (!ha_check_storage_engine_flag(ha_resolve_by_legacy_type(thd, table_type),
- HTON_CAN_RECREATE))
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ /*
+ TODO: Add support for TRUNCATE PARTITION for NDB and other engines
+ supporting native partitioning
+ */
+ if (table_type != DB_TYPE_PARTITION_DB &&
+ thd->lex->alter_info.flags & ALTER_ADMIN_PARTITION)
+ {
+ my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+#endif
+ if (!ha_check_storage_engine_flag(ha_resolve_by_legacy_type(thd,
+ table_type),
+ HTON_CAN_RECREATE) ||
+ thd->lex->alter_info.flags & ALTER_ADMIN_PARTITION)
goto trunc_by_del;
if (lock_and_wait_for_table_name(thd, table_list))
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index 37adf5c403a..9b747759ece 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -178,9 +178,9 @@ exit:
if (orig_table_list->view)
{
if (thd->is_error() &&
- (thd->main_da.sql_errno() == ER_BAD_FIELD_ERROR ||
- thd->main_da.sql_errno() == ER_FUNC_INEXISTENT_NAME_COLLISION ||
- thd->main_da.sql_errno() == ER_SP_DOES_NOT_EXIST))
+ (thd->stmt_da->sql_errno() == ER_BAD_FIELD_ERROR ||
+ thd->stmt_da->sql_errno() == ER_FUNC_INEXISTENT_NAME_COLLISION ||
+ thd->stmt_da->sql_errno() == ER_SP_DOES_NOT_EXIST))
{
thd->clear_error();
my_error(ER_VIEW_INVALID, MYF(0), orig_table_list->db,
diff --git a/sql/sql_error.cc b/sql/sql_error.cc
index 9ea7facbe41..eeefdb99eed 100644
--- a/sql/sql_error.cc
+++ b/sql/sql_error.cc
@@ -1,4 +1,5 @@
-/* Copyright (C) 1995-2002 MySQL AB
+/* Copyright (C) 1995-2002 MySQL AB,
+ Copyright (C) 2008-2009 Sun Microsystems, Inc
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -42,133 +43,577 @@ This file contains the implementation of error and warnings related
***********************************************************************/
#include "mysql_priv.h"
+#include "sql_error.h"
#include "sp_rcontext.h"
/*
- Store a new message in an error object
-
- This is used to in group_concat() to register how many warnings we actually
- got after the query has been executed.
+ Design notes about MYSQL_ERROR::m_message_text.
+
+ The member MYSQL_ERROR::m_message_text contains the text associated with
+ an error, warning or note (which are all SQL 'conditions')
+
+ Producer of MYSQL_ERROR::m_message_text:
+ ----------------------------------------
+
+ (#1) the server implementation itself, when invoking functions like
+ my_error() or push_warning()
+
+ (#2) user code in stored programs, when using the SIGNAL statement.
+
+ (#3) user code in stored programs, when using the RESIGNAL statement.
+
+ When invoking my_error(), the error number and message is typically
+ provided like this:
+ - my_error(ER_WRONG_DB_NAME, MYF(0), ...);
+ - my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0));
+
+ In both cases, the message is retrieved from ER(ER_XXX), which in turn
+ is read from the resource file errmsg.sys at server startup.
+ The strings stored in the errmsg.sys file are expressed in the character set
+ that corresponds to the server --language start option
+ (see error_message_charset_info).
+
+ When executing:
+ - a SIGNAL statement,
+ - a RESIGNAL statement,
+ the message text is provided by the user logic, and is expressed in UTF8.
+
+ Storage of MYSQL_ERROR::m_message_text:
+ ---------------------------------------
+
+ (#4) The class MYSQL_ERROR is used to hold the message text member.
+ This class represents a single SQL condition.
+
+ (#5) The class Warning_info represents a SQL condition area, and contains
+ a collection of SQL conditions in the Warning_info::m_warn_list
+
+ Consumer of MYSQL_ERROR::m_message_text:
+ ----------------------------------------
+
+ (#6) The statements SHOW WARNINGS and SHOW ERRORS display the content of
+ the warning list.
+
+ (#7) The GET DIAGNOSTICS statement (planned, not implemented yet) will
+ also read the content of:
+ - the top level statement condition area (when executed in a query),
+ - a sub statement (when executed in a stored program)
+ and return the data stored in a MYSQL_ERROR.
+
+ (#8) The RESIGNAL statement reads the MYSQL_ERROR caught by an exception
+ handler, to raise a new or modified condition (in #3).
+
+ The big picture
+ ---------------
+ --------------
+ | ^
+ V |
+ my_error(#1) SIGNAL(#2) RESIGNAL(#3) |
+ |(#A) |(#B) |(#C) |
+ | | | |
+ ----------------------------|---------------------------- |
+ | |
+ V |
+ MYSQL_ERROR(#4) |
+ | |
+ | |
+ V |
+ Warning_info(#5) |
+ | |
+ ----------------------------------------------------- |
+ | | | |
+ | | | |
+ | | | |
+ V V V |
+ SHOW WARNINGS(#6) GET DIAGNOSTICS(#7) RESIGNAL(#8) |
+ | | | | |
+ | -------- | V |
+ | | | --------------
+ V | |
+ Connectors | |
+ | | |
+ -------------------------
+ |
+ V
+ Client application
+
+ Current implementation status
+ -----------------------------
+
+ (#1) (my_error) produces data in the 'error_message_charset_info' CHARSET
+
+ (#2) and (#3) (SIGNAL, RESIGNAL) produces data internally in UTF8
+
+ (#6) (SHOW WARNINGS) produces data in the 'error_message_charset_info' CHARSET
+
+ (#7) (GET DIAGNOSTICS) is not implemented.
+
+ (#8) (RESIGNAL) produces data internally in UTF8 (see #3)
+
+ As a result, the design choice for (#4) and (#5) is to store data in
+ the 'error_message_charset_info' CHARSET, to minimize impact on the code base.
+ This is implemented by using 'String MYSQL_ERROR::m_message_text'.
+
+ The UTF8 -> error_message_charset_info conversion is implemented in
+ Signal_common::eval_signal_informations() (for path #B and #C).
+
+ Future work
+ -----------
+
+ - Change (#1) (my_error) to generate errors in UTF8.
+ See WL#751 (Recoding of error messages)
+
+ - Change (#4 and #5) to store message text in UTF8 natively.
+ In practice, this means changing the type of the message text to
+ '<UTF8 String 128 class> MYSQL_ERROR::m_message_text', and is a direct
+ consequence of WL#751.
+
+ - Implement (#9) (GET DIAGNOSTICS).
+ See WL#2111 (Stored Procedures: Implement GET DIAGNOSTICS)
*/
-void MYSQL_ERROR::set_msg(THD *thd, const char *msg_arg)
+MYSQL_ERROR::MYSQL_ERROR()
+ : Sql_alloc(),
+ m_class_origin((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_subclass_origin((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_constraint_catalog((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_constraint_schema((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_constraint_name((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_catalog_name((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_schema_name((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_table_name((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_column_name((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_cursor_name((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_message_text(),
+ m_sql_errno(0),
+ m_level(MYSQL_ERROR::WARN_LEVEL_ERROR),
+ m_mem_root(NULL)
{
- msg= strdup_root(&thd->warn_root, msg_arg);
+ memset(m_returned_sqlstate, 0, sizeof(m_returned_sqlstate));
}
+void MYSQL_ERROR::init(MEM_ROOT *mem_root)
+{
+ DBUG_ASSERT(mem_root != NULL);
+ DBUG_ASSERT(m_mem_root == NULL);
+ m_mem_root= mem_root;
+}
-/*
- Reset all warnings for the thread
-
- SYNOPSIS
- mysql_reset_errors()
- thd Thread handle
- force Reset warnings even if it has been done before
+void MYSQL_ERROR::clear()
+{
+ m_class_origin.length(0);
+ m_subclass_origin.length(0);
+ m_constraint_catalog.length(0);
+ m_constraint_schema.length(0);
+ m_constraint_name.length(0);
+ m_catalog_name.length(0);
+ m_schema_name.length(0);
+ m_table_name.length(0);
+ m_column_name.length(0);
+ m_cursor_name.length(0);
+ m_message_text.length(0);
+ m_sql_errno= 0;
+ m_level= MYSQL_ERROR::WARN_LEVEL_ERROR;
+}
- IMPLEMENTATION
- Don't reset warnings if this has already been called for this query.
- This may happen if one gets a warning during the parsing stage,
- in which case push_warnings() has already called this function.
-*/
+MYSQL_ERROR::MYSQL_ERROR(MEM_ROOT *mem_root)
+ : Sql_alloc(),
+ m_class_origin((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_subclass_origin((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_constraint_catalog((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_constraint_schema((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_constraint_name((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_catalog_name((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_schema_name((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_table_name((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_column_name((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_cursor_name((const char*) NULL, 0, & my_charset_utf8_bin),
+ m_message_text(),
+ m_sql_errno(0),
+ m_level(MYSQL_ERROR::WARN_LEVEL_ERROR),
+ m_mem_root(mem_root)
+{
+ DBUG_ASSERT(mem_root != NULL);
+ memset(m_returned_sqlstate, 0, sizeof(m_returned_sqlstate));
+}
-void mysql_reset_errors(THD *thd, bool force)
+static void copy_string(MEM_ROOT *mem_root, String* dst, const String* src)
{
- DBUG_ENTER("mysql_reset_errors");
- if (thd->query_id != thd->warn_id || force)
+ size_t len= src->length();
+ if (len)
{
- thd->warn_id= thd->query_id;
- free_root(&thd->warn_root,MYF(0));
- bzero((char*) thd->warn_count, sizeof(thd->warn_count));
- if (force)
- thd->total_warn_count= 0;
- thd->warn_list.empty();
- thd->row_count= 1; // by default point to row 1
+ char* copy= (char*) alloc_root(mem_root, len + 1);
+ if (copy)
+ {
+ memcpy(copy, src->ptr(), len);
+ copy[len]= '\0';
+ dst->set(copy, len, src->charset());
+ }
}
+ else
+ dst->length(0);
+}
+
+void
+MYSQL_ERROR::copy_opt_attributes(const MYSQL_ERROR *cond)
+{
+ DBUG_ASSERT(this != cond);
+ copy_string(m_mem_root, & m_class_origin, & cond->m_class_origin);
+ copy_string(m_mem_root, & m_subclass_origin, & cond->m_subclass_origin);
+ copy_string(m_mem_root, & m_constraint_catalog, & cond->m_constraint_catalog);
+ copy_string(m_mem_root, & m_constraint_schema, & cond->m_constraint_schema);
+ copy_string(m_mem_root, & m_constraint_name, & cond->m_constraint_name);
+ copy_string(m_mem_root, & m_catalog_name, & cond->m_catalog_name);
+ copy_string(m_mem_root, & m_schema_name, & cond->m_schema_name);
+ copy_string(m_mem_root, & m_table_name, & cond->m_table_name);
+ copy_string(m_mem_root, & m_column_name, & cond->m_column_name);
+ copy_string(m_mem_root, & m_cursor_name, & cond->m_cursor_name);
+}
+
+void
+MYSQL_ERROR::set(uint sql_errno, const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level, const char* msg)
+{
+ DBUG_ASSERT(sql_errno != 0);
+ DBUG_ASSERT(sqlstate != NULL);
+ DBUG_ASSERT(msg != NULL);
+
+ m_sql_errno= sql_errno;
+ memcpy(m_returned_sqlstate, sqlstate, SQLSTATE_LENGTH);
+ m_returned_sqlstate[SQLSTATE_LENGTH]= '\0';
+
+ set_builtin_message_text(msg);
+ m_level= level;
+}
+
+void
+MYSQL_ERROR::set_builtin_message_text(const char* str)
+{
+ /*
+ See the comments
+ "Design notes about MYSQL_ERROR::m_message_text."
+ */
+ const char* copy;
+
+ copy= strdup_root(m_mem_root, str);
+ m_message_text.set(copy, strlen(copy), error_message_charset_info);
+ DBUG_ASSERT(! m_message_text.is_alloced());
+}
+
+const char*
+MYSQL_ERROR::get_message_text() const
+{
+ return m_message_text.ptr();
+}
+
+int
+MYSQL_ERROR::get_message_octet_length() const
+{
+ return m_message_text.length();
+}
+
+void
+MYSQL_ERROR::set_sqlstate(const char* sqlstate)
+{
+ memcpy(m_returned_sqlstate, sqlstate, SQLSTATE_LENGTH);
+ m_returned_sqlstate[SQLSTATE_LENGTH]= '\0';
+}
+
+/**
+ Clear this diagnostics area.
+
+ Normally called at the end of a statement.
+*/
+
+void
+Diagnostics_area::reset_diagnostics_area()
+{
+ DBUG_ENTER("reset_diagnostics_area");
+#ifdef DBUG_OFF
+ can_overwrite_status= FALSE;
+ /** Don't take chances in production */
+ m_message[0]= '\0';
+ m_sql_errno= 0;
+ m_server_status= 0;
+ m_affected_rows= 0;
+ m_last_insert_id= 0;
+ m_statement_warn_count= 0;
+#endif
+ is_sent= FALSE;
+ /** Tiny reset in debug mode to see garbage right away */
+ m_status= DA_EMPTY;
DBUG_VOID_RETURN;
}
-/*
- Push the warning/error to error list if there is still room in the list
+/**
+ Set OK status -- ends commands that do not return a
+ result set, e.g. INSERT/UPDATE/DELETE.
+*/
- SYNOPSIS
- push_warning()
- thd Thread handle
- level Severity of warning (note, warning, error ...)
- code Error number
- msg Clear error message
-
- RETURN
- pointer on MYSQL_ERROR object
+void
+Diagnostics_area::set_ok_status(THD *thd, ulonglong affected_rows_arg,
+ ulonglong last_insert_id_arg,
+ const char *message_arg)
+{
+ DBUG_ENTER("set_ok_status");
+ DBUG_ASSERT(! is_set());
+ /*
+ In production, refuse to overwrite an error or a custom response
+ with an OK packet.
+ */
+ if (is_error() || is_disabled())
+ return;
+
+ m_server_status= thd->server_status;
+ m_statement_warn_count= thd->warning_info->statement_warn_count();
+ m_affected_rows= affected_rows_arg;
+ m_last_insert_id= last_insert_id_arg;
+ if (message_arg)
+ strmake(m_message, message_arg, sizeof(m_message) - 1);
+ else
+ m_message[0]= '\0';
+ m_status= DA_OK;
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ Set EOF status.
*/
-MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level,
- uint code, const char *msg)
+void
+Diagnostics_area::set_eof_status(THD *thd)
{
- MYSQL_ERROR *err= 0;
- DBUG_ENTER("push_warning");
- DBUG_PRINT("enter", ("code: %d, msg: %s", code, msg));
+ DBUG_ENTER("set_eof_status");
+ /* Only allowed to report eof if has not yet reported an error */
+ DBUG_ASSERT(! is_set());
+ /*
+ In production, refuse to overwrite an error or a custom response
+ with an EOF packet.
+ */
+ if (is_error() || is_disabled())
+ return;
+
+ m_server_status= thd->server_status;
+ /*
+ If inside a stored procedure, do not return the total
+ number of warnings, since they are not available to the client
+ anyway.
+ */
+ m_statement_warn_count= (thd->spcont ?
+ 0 : thd->warning_info->statement_warn_count());
+
+ m_status= DA_EOF;
+ DBUG_VOID_RETURN;
+}
- DBUG_ASSERT(code != 0);
- DBUG_ASSERT(msg != NULL);
+/**
+ Set ERROR status.
+*/
- if (level == MYSQL_ERROR::WARN_LEVEL_NOTE &&
- !(thd->options & OPTION_SQL_NOTES))
- DBUG_RETURN(0);
+void
+Diagnostics_area::set_error_status(THD *thd, uint sql_errno_arg,
+ const char *message_arg,
+ const char *sqlstate)
+{
+ DBUG_ENTER("set_error_status");
+ /*
+ Only allowed to report error if has not yet reported a success
+ The only exception is when we flush the message to the client,
+ an error can happen during the flush.
+ */
+ DBUG_ASSERT(! is_set() || can_overwrite_status);
+#ifdef DBUG_OFF
+ /*
+ In production, refuse to overwrite a custom response with an
+ ERROR packet.
+ */
+ if (is_disabled())
+ return;
+#endif
+
+ if (sqlstate == NULL)
+ sqlstate= mysql_errno_to_sqlstate(sql_errno_arg);
+
+ m_sql_errno= sql_errno_arg;
+ memcpy(m_sqlstate, sqlstate, SQLSTATE_LENGTH);
+ m_sqlstate[SQLSTATE_LENGTH]= '\0';
+ strmake(m_message, message_arg, sizeof(m_message)-1);
+
+ m_status= DA_ERROR;
+ DBUG_VOID_RETURN;
+}
- if (thd->query_id != thd->warn_id && !thd->spcont)
- mysql_reset_errors(thd, 0);
- thd->got_warning= 1;
- /* Abort if we are using strict mode and we are not using IGNORE */
- if ((int) level >= (int) MYSQL_ERROR::WARN_LEVEL_WARN &&
- thd->really_abort_on_warning())
- {
- /* Avoid my_message() calling push_warning */
- bool no_warnings_for_error= thd->no_warnings_for_error;
- sp_rcontext *spcont= thd->spcont;
+/**
+ Mark the diagnostics area as 'DISABLED'.
- thd->no_warnings_for_error= 1;
- thd->spcont= NULL;
+ This is used in rare cases when the COM_ command at hand sends a response
+ in a custom format. One example is the query cache, another is
+ COM_STMT_PREPARE.
+*/
- thd->killed= THD::KILL_BAD_DATA;
- my_message(code, msg, MYF(0));
+void
+Diagnostics_area::disable_status()
+{
+ DBUG_ASSERT(! is_set());
+ m_status= DA_DISABLED;
+}
+
+Warning_info::Warning_info(ulonglong warn_id_arg)
+ :m_statement_warn_count(0),
+ m_current_row_for_warning(1),
+ m_warn_id(warn_id_arg),
+ m_read_only(FALSE)
+{
+ /* Initialize sub structures */
+ init_sql_alloc(&m_warn_root, WARN_ALLOC_BLOCK_SIZE, WARN_ALLOC_PREALLOC_SIZE);
+ m_warn_list.empty();
+ bzero((char*) m_warn_count, sizeof(m_warn_count));
+}
- thd->spcont= spcont;
- thd->no_warnings_for_error= no_warnings_for_error;
- /* Store error in error list (as my_message() didn't do it) */
- level= MYSQL_ERROR::WARN_LEVEL_ERROR;
- }
- if (thd->handle_error(code, msg, level))
- DBUG_RETURN(NULL);
+Warning_info::~Warning_info()
+{
+ free_root(&m_warn_root,MYF(0));
+}
- if (thd->spcont &&
- thd->spcont->handle_error(code, level, thd))
+
+/**
+ Reset the warning information of this connection.
+*/
+
+void Warning_info::clear_warning_info(ulonglong warn_id_arg)
+{
+ m_warn_id= warn_id_arg;
+ free_root(&m_warn_root, MYF(0));
+ bzero((char*) m_warn_count, sizeof(m_warn_count));
+ m_warn_list.empty();
+ m_statement_warn_count= 0;
+ m_current_row_for_warning= 1; /* Start counting from the first row */
+}
+
+void Warning_info::reserve_space(THD *thd, uint count)
+{
+ /* Make room for count conditions */
+ while ((m_warn_list.elements > 0) &&
+ ((m_warn_list.elements + count) > thd->variables.max_error_count))
+ m_warn_list.pop();
+}
+
+/**
+ Append warnings only if the original contents of the routine
+ warning info was replaced.
+*/
+void Warning_info::merge_with_routine_info(THD *thd, Warning_info *source)
+{
+ /*
+ If a routine body is empty or if a routine did not
+ generate any warnings (thus m_warn_id didn't change),
+ do not duplicate our own contents by appending the
+ contents of the called routine. We know that the called
+ routine did not change its warning info.
+
+ On the other hand, if the routine body is not empty and
+ some statement in the routine generates a warning or
+ uses tables, m_warn_id is guaranteed to have changed.
+ In this case we know that the routine warning info
+ contains only new warnings, and thus we perform a copy.
+ */
+ if (m_warn_id != source->m_warn_id)
{
- DBUG_RETURN(NULL);
+ /*
+ If the invocation of the routine was a standalone statement,
+ rather than a sub-statement, in other words, if it's a CALL
+ of a procedure, rather than invocation of a function or a
+ trigger, we need to clear the current contents of the caller's
+ warning info.
+
+ This is per MySQL rules: if a statement generates a warning,
+ warnings from the previous statement are flushed. Normally
+ it's done in push_warning(). However, here we don't use
+ push_warning() to avoid invocation of condition handlers or
+ escalation of warnings to errors.
+ */
+ opt_clear_warning_info(thd->query_id);
+ append_warning_info(thd, source);
}
- query_cache_abort(&thd->net);
+}
+/**
+ Add a warning to the list of warnings. Increment the respective
+ counters.
+*/
+MYSQL_ERROR *Warning_info::push_warning(THD *thd,
+ uint sql_errno, const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char *msg)
+{
+ MYSQL_ERROR *cond= NULL;
- if (thd->warn_list.elements < thd->variables.max_error_count)
+ if (! m_read_only)
{
- /* We have to use warn_root, as mem_root is freed after each query */
- if ((err= new (&thd->warn_root) MYSQL_ERROR(thd, code, level, msg)))
- thd->warn_list.push_back(err, &thd->warn_root);
+ if (m_warn_list.elements < thd->variables.max_error_count)
+ {
+ cond= new (& m_warn_root) MYSQL_ERROR(& m_warn_root);
+ if (cond)
+ {
+ cond->set(sql_errno, sqlstate, level, msg);
+ m_warn_list.push_back(cond, &m_warn_root);
+ }
+ }
+ m_warn_count[(uint) level]++;
}
- thd->warn_count[(uint) level]++;
- thd->total_warn_count++;
- DBUG_RETURN(err);
+
+ m_statement_warn_count++;
+ return cond;
+}
+
+/*
+ Push the warning to error list if there is still room in the list
+
+ SYNOPSIS
+ push_warning()
+ thd Thread handle
+ level Severity of warning (note, warning)
+ code Error number
+ msg Clear error message
+*/
+
+void push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level,
+ uint code, const char *msg)
+{
+ DBUG_ENTER("push_warning");
+ DBUG_PRINT("enter", ("code: %d, msg: %s", code, msg));
+
+ /*
+ Calling push_warning/push_warning_printf with a
+ level of WARN_LEVEL_ERROR *is* a bug.
+ Either use my_error(), or WARN_LEVEL_WARN.
+ Please fix the calling code, and do *NOT*
+ add more work around code in the assert below.
+ */
+ DBUG_ASSERT( (level != MYSQL_ERROR::WARN_LEVEL_ERROR)
+ || (code == ER_CANT_CREATE_TABLE) /* See Bug#47233 */
+ || (code == ER_ILLEGAL_HA_CREATE_OPTION) /* See Bug#47233 */
+ );
+
+ if (level == MYSQL_ERROR::WARN_LEVEL_ERROR)
+ level= MYSQL_ERROR::WARN_LEVEL_WARN;
+
+ (void) thd->raise_condition(code, NULL, level, msg);
+
+ DBUG_VOID_RETURN;
}
+
/*
- Push the warning/error to error list if there is still room in the list
+ Push the warning to error list if there is still room in the list
SYNOPSIS
push_warning_printf()
thd Thread handle
- level Severity of warning (note, warning, error ...)
+ level Severity of warning (note, warning)
code Error number
msg Clear error message
*/
@@ -185,7 +630,8 @@ void push_warning_printf(THD *thd, MYSQL_ERROR::enum_warning_level level,
DBUG_ASSERT(format != NULL);
va_start(args,format);
- my_vsnprintf(warning, sizeof(warning), format, args);
+ my_vsnprintf_ex(&my_charset_utf8_general_ci, warning,
+ sizeof(warning), format, args);
va_end(args);
push_warning(thd, level, code, warning);
DBUG_VOID_RETURN;
@@ -217,44 +663,196 @@ const LEX_STRING warning_level_names[]=
};
bool mysqld_show_warnings(THD *thd, ulong levels_to_show)
-{
+{
List<Item> field_list;
DBUG_ENTER("mysqld_show_warnings");
+ DBUG_ASSERT(thd->warning_info->is_read_only());
+
field_list.push_back(new Item_empty_string("Level", 7));
field_list.push_back(new Item_return_int("Code",4, MYSQL_TYPE_LONG));
field_list.push_back(new Item_empty_string("Message",MYSQL_ERRMSG_SIZE));
- if (thd->protocol->send_fields(&field_list,
+ if (thd->protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
MYSQL_ERROR *err;
SELECT_LEX *sel= &thd->lex->select_lex;
SELECT_LEX_UNIT *unit= &thd->lex->unit;
- ha_rows idx= 0;
+ ulonglong idx= 0;
Protocol *protocol=thd->protocol;
unit->set_limit(sel);
- List_iterator_fast<MYSQL_ERROR> it(thd->warn_list);
+ List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list());
while ((err= it++))
{
/* Skip levels that the user is not interested in */
- if (!(levels_to_show & ((ulong) 1 << err->level)))
+ if (!(levels_to_show & ((ulong) 1 << err->get_level())))
continue;
if (++idx <= unit->offset_limit_cnt)
continue;
if (idx > unit->select_limit_cnt)
break;
protocol->prepare_for_resend();
- protocol->store(warning_level_names[err->level].str,
- warning_level_names[err->level].length, system_charset_info);
- protocol->store((uint32) err->code);
- protocol->store(err->msg, (uint) strlen(err->msg), system_charset_info);
+ protocol->store(warning_level_names[err->get_level()].str,
+ warning_level_names[err->get_level()].length,
+ system_charset_info);
+ protocol->store((uint32) err->get_sql_errno());
+ protocol->store(err->get_message_text(),
+ err->get_message_octet_length(),
+ system_charset_info);
if (protocol->write())
DBUG_RETURN(TRUE);
}
my_eof(thd);
+
+ thd->warning_info->set_read_only(FALSE);
+
DBUG_RETURN(FALSE);
}
+
+
+/**
+ Convert value for dispatch to error message(see WL#751).
+
+ @param to buffer for converted string
+ @param to_length size of the buffer
+ @param from string which should be converted
+ @param from_length string length
+ @param from_cs charset from convert
+
+ @retval
+ result string
+*/
+
+char *err_conv(char *buff, uint to_length, const char *from,
+ uint from_length, CHARSET_INFO *from_cs)
+{
+ char *to= buff;
+ const char *from_start= from;
+ size_t res;
+
+ DBUG_ASSERT(to_length > 0);
+ to_length--;
+ if (from_cs == &my_charset_bin)
+ {
+ uchar char_code;
+ res= 0;
+ while (1)
+ {
+ if ((uint)(from - from_start) >= from_length ||
+ res >= to_length)
+ {
+ *to= 0;
+ break;
+ }
+
+ char_code= ((uchar) *from);
+ if (char_code >= 0x20 && char_code <= 0x7E)
+ {
+ *to++= char_code;
+ from++;
+ res++;
+ }
+ else
+ {
+ if (res + 4 >= to_length)
+ {
+ *to= 0;
+ break;
+ }
+ res+= my_snprintf(to, 5, "\\x%02X", (uint) char_code);
+ to+=4;
+ from++;
+ }
+ }
+ }
+ else
+ {
+ uint errors;
+ res= copy_and_convert(to, to_length, system_charset_info,
+ from, from_length, from_cs, &errors);
+ to[res]= 0;
+ }
+ return buff;
+}
+
+
+/**
+ Convert string for dispatch to client(see WL#751).
+
+ @param to buffer to convert
+ @param to_length buffer length
+ @param to_cs chraset to convert
+ @param from string from convert
+ @param from_length string length
+ @param from_cs charset from convert
+ @param errors count of errors during convertion
+
+ @retval
+ length of converted string
+*/
+
+uint32 convert_error_message(char *to, uint32 to_length, CHARSET_INFO *to_cs,
+ const char *from, uint32 from_length,
+ CHARSET_INFO *from_cs, uint *errors)
+{
+ int cnvres;
+ my_wc_t wc;
+ const uchar *from_end= (const uchar*) from+from_length;
+ char *to_start= to;
+ uchar *to_end= (uchar*) to+to_length;
+ my_charset_conv_mb_wc mb_wc= from_cs->cset->mb_wc;
+ my_charset_conv_wc_mb wc_mb;
+ uint error_count= 0;
+ uint length;
+
+ DBUG_ASSERT(to_length > 0);
+ to_length--;
+
+ if (!to_cs || from_cs == to_cs || to_cs == &my_charset_bin)
+ {
+ length= min(to_length, from_length);
+ memmove(to, from, length);
+ to[length]= 0;
+ return length;
+ }
+
+ wc_mb= to_cs->cset->wc_mb;
+ while (1)
+ {
+ if ((cnvres= (*mb_wc)(from_cs, &wc, (uchar*) from, from_end)) > 0)
+ {
+ if (!wc)
+ break;
+ from+= cnvres;
+ }
+ else if (cnvres == MY_CS_ILSEQ)
+ {
+ wc= (ulong) (uchar) *from;
+ from+=1;
+ }
+ else
+ break;
+
+ if ((cnvres= (*wc_mb)(to_cs, wc, (uchar*) to, to_end)) > 0)
+ to+= cnvres;
+ else if (cnvres == MY_CS_ILUNI)
+ {
+ length= (wc <= 0xFFFF) ? 6/* '\1234' format*/ : 9 /* '\+123456' format*/;
+ if ((uchar*)(to + length) >= to_end)
+ break;
+ cnvres= my_snprintf(to, 9,
+ (wc <= 0xFFFF) ? "\\%04X" : "\\+%06X", (uint) wc);
+ to+= cnvres;
+ }
+ else
+ break;
+ }
+
+ *to= 0;
+ *errors= error_count;
+ return (uint32) (to - to_start);
+}
diff --git a/sql/sql_error.h b/sql/sql_error.h
index f98264dce50..ac86ca4770c 100644
--- a/sql/sql_error.h
+++ b/sql/sql_error.h
@@ -1,4 +1,5 @@
-/* Copyright (C) 2000-2003 MySQL AB
+/* Copyright (C) 2000-2003 MySQL AB,
+ Copyright (C) 2008-2009 Sun Microsystems, Inc
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -13,31 +14,548 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-class MYSQL_ERROR: public Sql_alloc
+#ifndef SQL_ERROR_H
+#define SQL_ERROR_H
+
+#include "sql_list.h" /* Sql_alloc, MEM_ROOT */
+#include "m_string.h" /* LEX_STRING */
+#include "mysql_com.h" /* MYSQL_ERRMSG_SIZE */
+
+class THD;
+
+/**
+ Stores status of the currently executed statement.
+ Cleared at the beginning of the statement, and then
+ can hold either OK, ERROR, or EOF status.
+ Can not be assigned twice per statement.
+*/
+
+class Diagnostics_area
+{
+public:
+ enum enum_diagnostics_status
+ {
+ /** The area is cleared at start of a statement. */
+ DA_EMPTY= 0,
+ /** Set whenever one calls my_ok(). */
+ DA_OK,
+ /** Set whenever one calls my_eof(). */
+ DA_EOF,
+ /** Set whenever one calls my_error() or my_message(). */
+ DA_ERROR,
+ /** Set in case of a custom response, such as one from COM_STMT_PREPARE. */
+ DA_DISABLED
+ };
+ /** True if status information is sent to the client. */
+ bool is_sent;
+ /** Set to make set_error_status after set_{ok,eof}_status possible. */
+ bool can_overwrite_status;
+
+ void set_ok_status(THD *thd, ulonglong affected_rows_arg,
+ ulonglong last_insert_id_arg,
+ const char *message);
+ void set_eof_status(THD *thd);
+ void set_error_status(THD *thd, uint sql_errno_arg, const char *message_arg,
+ const char *sqlstate);
+
+ void disable_status();
+
+ void reset_diagnostics_area();
+
+ bool is_set() const { return m_status != DA_EMPTY; }
+ bool is_error() const { return m_status == DA_ERROR; }
+ bool is_eof() const { return m_status == DA_EOF; }
+ bool is_ok() const { return m_status == DA_OK; }
+ bool is_disabled() const { return m_status == DA_DISABLED; }
+ enum_diagnostics_status status() const { return m_status; }
+
+ const char *message() const
+ { DBUG_ASSERT(m_status == DA_ERROR || m_status == DA_OK); return m_message; }
+
+ uint sql_errno() const
+ { DBUG_ASSERT(m_status == DA_ERROR); return m_sql_errno; }
+
+ const char* get_sqlstate() const
+ { DBUG_ASSERT(m_status == DA_ERROR); return m_sqlstate; }
+
+ uint server_status() const
+ {
+ DBUG_ASSERT(m_status == DA_OK || m_status == DA_EOF);
+ return m_server_status;
+ }
+
+ ulonglong affected_rows() const
+ { DBUG_ASSERT(m_status == DA_OK); return m_affected_rows; }
+
+ ulonglong last_insert_id() const
+ { DBUG_ASSERT(m_status == DA_OK); return m_last_insert_id; }
+
+ uint statement_warn_count() const
+ {
+ DBUG_ASSERT(m_status == DA_OK || m_status == DA_EOF);
+ return m_statement_warn_count;
+ }
+
+ Diagnostics_area() { reset_diagnostics_area(); }
+
+private:
+ /** Message buffer. Can be used by OK or ERROR status. */
+ char m_message[MYSQL_ERRMSG_SIZE];
+ /**
+ SQL error number. One of ER_ codes from share/errmsg.txt.
+ Set by set_error_status.
+ */
+ uint m_sql_errno;
+
+ char m_sqlstate[SQLSTATE_LENGTH+1];
+
+ /**
+ Copied from thd->server_status when the diagnostics area is assigned.
+ We need this member as some places in the code use the following pattern:
+ thd->server_status|= ...
+ my_eof(thd);
+ thd->server_status&= ~...
+ Assigned by OK, EOF or ERROR.
+ */
+ uint m_server_status;
+ /**
+ The number of rows affected by the last statement. This is
+ semantically close to thd->row_count_func, but has a different
+ life cycle. thd->row_count_func stores the value returned by
+ function ROW_COUNT() and is cleared only by statements that
+ update its value, such as INSERT, UPDATE, DELETE and few others.
+ This member is cleared at the beginning of the next statement.
+
+ We could possibly merge the two, but life cycle of thd->row_count_func
+ can not be changed.
+ */
+ ulonglong m_affected_rows;
+ /**
+ Similarly to the previous member, this is a replacement of
+ thd->first_successful_insert_id_in_prev_stmt, which is used
+ to implement LAST_INSERT_ID().
+ */
+ ulonglong m_last_insert_id;
+ /**
+ Number of warnings of this last statement. May differ from
+ the number of warnings returned by SHOW WARNINGS e.g. in case
+ the statement doesn't clear the warnings, and doesn't generate
+ them.
+ */
+ uint m_statement_warn_count;
+ enum_diagnostics_status m_status;
+};
+
+///////////////////////////////////////////////////////////////////////////
+
+/**
+ Representation of a SQL condition.
+ A SQL condition can be a completion condition (note, warning),
+ or an exception condition (error, not found).
+ @note This class is named MYSQL_ERROR instead of SQL_condition for historical reasons,
+ to facilitate merging code with previous releases.
+*/
+class MYSQL_ERROR : public Sql_alloc
{
public:
+ /*
+ Enumeration value describing the severity of the error.
+
+ Note that these enumeration values must correspond to the indices
+ of the sql_print_message_handlers array.
+ */
enum enum_warning_level
{ WARN_LEVEL_NOTE, WARN_LEVEL_WARN, WARN_LEVEL_ERROR, WARN_LEVEL_END};
+ /**
+ Get the MESSAGE_TEXT of this condition.
+ @return the message text.
+ */
+ const char* get_message_text() const;
+
+ /**
+ Get the MESSAGE_OCTET_LENGTH of this condition.
+ @return the length in bytes of the message text.
+ */
+ int get_message_octet_length() const;
+
+ /**
+ Get the SQLSTATE of this condition.
+ @return the sql state.
+ */
+ const char* get_sqlstate() const
+ { return m_returned_sqlstate; }
+
+ /**
+ Get the SQL_ERRNO of this condition.
+ @return the sql error number condition item.
+ */
+ uint get_sql_errno() const
+ { return m_sql_errno; }
+
+ /**
+ Get the error level of this condition.
+ @return the error level condition item.
+ */
+ MYSQL_ERROR::enum_warning_level get_level() const
+ { return m_level; }
+
+private:
+ /*
+ The interface of MYSQL_ERROR is mostly private, by design,
+ so that only the following code:
+ - various raise_error() or raise_warning() methods in class THD,
+ - the implementation of SIGNAL / RESIGNAL
+ - catch / re-throw of SQL conditions in stored procedures (sp_rcontext)
+ is allowed to create / modify a SQL condition.
+ Enforcing this policy prevents confusion, since the only public
+ interface available to the rest of the server implementation
+ is the interface offered by the THD methods (THD::raise_error()),
+ which should be used.
+ */
+ friend class THD;
+ friend class Warning_info;
+ friend class Signal_common;
+ friend class Signal_statement;
+ friend class Resignal_statement;
+ friend class sp_rcontext;
+
+ /**
+ Default constructor.
+ This constructor is usefull when allocating arrays.
+ Note that the init() method should be called to complete the MYSQL_ERROR.
+ */
+ MYSQL_ERROR();
+
+ /**
+ Complete the MYSQL_ERROR initialisation.
+ @param mem_root The memory root to use for the condition items
+ of this condition
+ */
+ void init(MEM_ROOT *mem_root);
+
+ /**
+ Constructor.
+ @param mem_root The memory root to use for the condition items
+ of this condition
+ */
+ MYSQL_ERROR(MEM_ROOT *mem_root);
+
+ /** Destructor. */
+ ~MYSQL_ERROR()
+ {}
+
+ /**
+ Copy optional condition items attributes.
+ @param cond the condition to copy.
+ */
+ void copy_opt_attributes(const MYSQL_ERROR *cond);
+
+ /**
+ Set this condition area with a fixed message text.
+ @param thd the current thread.
+ @param code the error number for this condition.
+ @param str the message text for this condition.
+ @param level the error level for this condition.
+ @param MyFlags additional flags.
+ */
+ void set(uint sql_errno, const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg);
+
+ /**
+ Set the condition message test.
+ @param str Message text, expressed in the character set derived from
+ the server --language option
+ */
+ void set_builtin_message_text(const char* str);
+
+ /** Set the SQLSTATE of this condition. */
+ void set_sqlstate(const char* sqlstate);
+
+ /**
+ Clear this SQL condition.
+ */
+ void clear();
+
+private:
+ /** SQL CLASS_ORIGIN condition item. */
+ String m_class_origin;
+
+ /** SQL SUBCLASS_ORIGIN condition item. */
+ String m_subclass_origin;
+
+ /** SQL CONSTRAINT_CATALOG condition item. */
+ String m_constraint_catalog;
+
+ /** SQL CONSTRAINT_SCHEMA condition item. */
+ String m_constraint_schema;
+
+ /** SQL CONSTRAINT_NAME condition item. */
+ String m_constraint_name;
+
+ /** SQL CATALOG_NAME condition item. */
+ String m_catalog_name;
+
+ /** SQL SCHEMA_NAME condition item. */
+ String m_schema_name;
+
+ /** SQL TABLE_NAME condition item. */
+ String m_table_name;
+
+ /** SQL COLUMN_NAME condition item. */
+ String m_column_name;
+
+ /** SQL CURSOR_NAME condition item. */
+ String m_cursor_name;
+
+ /** Message text, expressed in the character set implied by --language. */
+ String m_message_text;
- uint code;
- enum_warning_level level;
- char *msg;
-
- MYSQL_ERROR(THD *thd, uint code_arg, enum_warning_level level_arg,
- const char *msg_arg)
- :code(code_arg), level(level_arg)
+ /** MySQL extension, MYSQL_ERRNO condition item. */
+ uint m_sql_errno;
+
+ /**
+ SQL RETURNED_SQLSTATE condition item.
+ This member is always NUL terminated.
+ */
+ char m_returned_sqlstate[SQLSTATE_LENGTH+1];
+
+ /** Severity (error, warning, note) of this condition. */
+ MYSQL_ERROR::enum_warning_level m_level;
+
+ /** Memory root to use to hold condition item values. */
+ MEM_ROOT *m_mem_root;
+};
+
+///////////////////////////////////////////////////////////////////////////
+
+/**
+ Information about warnings of the current connection.
+*/
+
+class Warning_info
+{
+ /** A memory root to allocate warnings and errors */
+ MEM_ROOT m_warn_root;
+ /** List of warnings of all severities (levels). */
+ List <MYSQL_ERROR> m_warn_list;
+ /** A break down of the number of warnings per severity (level). */
+ uint m_warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_END];
+ /**
+ The number of warnings of the current statement. Warning_info
+ life cycle differs from statement life cycle -- it may span
+ multiple statements. In that case we get
+ m_statement_warn_count 0, whereas m_warn_list is not empty.
+ */
+ uint m_statement_warn_count;
+ /*
+ Row counter, to print in errors and warnings. Not increased in
+ create_sort_index(); may differ from examined_row_count.
+ */
+ ulong m_current_row_for_warning;
+ /** Used to optionally clear warnings only once per statement. */
+ ulonglong m_warn_id;
+
+private:
+ Warning_info(const Warning_info &rhs); /* Not implemented */
+ Warning_info& operator=(const Warning_info &rhs); /* Not implemented */
+public:
+
+ Warning_info(ulonglong warn_id_arg);
+ ~Warning_info();
+
+ /**
+ Reset the warning information. Clear all warnings,
+ the number of warnings, reset current row counter
+ to point to the first row.
+ */
+ void clear_warning_info(ulonglong warn_id_arg);
+ /**
+ Only clear warning info if haven't yet done that already
+ for the current query. Allows to be issued at any time
+ during the query, without risk of clearing some warnings
+ that have been generated by the current statement.
+
+ @todo: This is a sign of sloppy coding. Instead we need to
+ designate one place in a statement life cycle where we call
+ clear_warning_info().
+ */
+ void opt_clear_warning_info(ulonglong query_id)
+ {
+ if (query_id != m_warn_id)
+ clear_warning_info(query_id);
+ }
+
+ void append_warning_info(THD *thd, Warning_info *source)
+ {
+ append_warnings(thd, & source->warn_list());
+ }
+
+ /**
+ Concatenate the list of warnings.
+ It's considered tolerable to lose a warning.
+ */
+ void append_warnings(THD *thd, List<MYSQL_ERROR> *src)
+ {
+ MYSQL_ERROR *err;
+ MYSQL_ERROR *copy;
+ List_iterator_fast<MYSQL_ERROR> it(*src);
+ /*
+ Don't use ::push_warning() to avoid invocation of condition
+ handlers or escalation of warnings to errors.
+ */
+ while ((err= it++))
+ {
+ copy= Warning_info::push_warning(thd, err->get_sql_errno(), err->get_sqlstate(),
+ err->get_level(), err->get_message_text());
+ if (copy)
+ copy->copy_opt_attributes(err);
+ }
+ }
+
+ /**
+ Conditional merge of related warning information areas.
+ */
+ void merge_with_routine_info(THD *thd, Warning_info *source);
+
+ /**
+ Reset between two COM_ commands. Warnings are preserved
+ between commands, but statement_warn_count indicates
+ the number of warnings of this particular statement only.
+ */
+ void reset_for_next_command() { m_statement_warn_count= 0; }
+
+ /**
+ Used for @@warning_count system variable, which prints
+ the number of rows returned by SHOW WARNINGS.
+ */
+ ulong warn_count() const
+ {
+ /*
+ This may be higher than warn_list.elements if we have
+ had more warnings than thd->variables.max_error_count.
+ */
+ return (m_warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_NOTE] +
+ m_warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_ERROR] +
+ m_warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_WARN]);
+ }
+
+ /**
+ This is for iteration purposes. We return a non-constant reference
+ since List doesn't have constant iterators.
+ */
+ List<MYSQL_ERROR> &warn_list() { return m_warn_list; }
+
+ /**
+ The number of errors, or number of rows returned by SHOW ERRORS,
+ also the value of session variable @@error_count.
+ */
+ ulong error_count() const
{
- if (msg_arg)
- set_msg(thd, msg_arg);
+ return m_warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_ERROR];
}
- void set_msg(THD *thd, const char *msg_arg);
+
+ /** Id of the warning information area. */
+ ulonglong warn_id() const { return m_warn_id; }
+
+ /** Do we have any errors and warnings that we can *show*? */
+ bool is_empty() const { return m_warn_list.elements == 0; }
+
+ /** Increment the current row counter to point at the next row. */
+ void inc_current_row_for_warning() { m_current_row_for_warning++; }
+ /** Reset the current row counter. Start counting from the first row. */
+ void reset_current_row_for_warning() { m_current_row_for_warning= 1; }
+ /** Return the current counter value. */
+ ulong current_row_for_warning() const { return m_current_row_for_warning; }
+
+ ulong statement_warn_count() const { return m_statement_warn_count; }
+
+ /**
+ Reserve some space in the condition area.
+ This is a privileged operation, reserved for the RESIGNAL implementation,
+ as only the RESIGNAL statement is allowed to remove conditions from
+ the condition area.
+ For other statements, new conditions are not added to the condition
+ area once the condition area is full.
+ @param thd The current thread
+ @param count The number of slots to reserve
+ */
+ void reserve_space(THD *thd, uint count);
+
+ /** Add a new condition to the current list. */
+ MYSQL_ERROR *push_warning(THD *thd,
+ uint sql_errno, const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg);
+
+ /**
+ Set the read only status for this statement area.
+ This is a privileged operation, reserved for the implementation of
+ diagnostics related statements, to enforce that the statement area is
+ left untouched during execution.
+ The diagnostics statements are:
+ - SHOW WARNINGS
+ - SHOW ERRORS
+ - GET DIAGNOSTICS
+ @param read_only the read only property to set
+ */
+ void set_read_only(bool read_only)
+ { m_read_only= read_only; }
+
+ /**
+ Read only status.
+ @return the read only property
+ */
+ bool is_read_only() const
+ { return m_read_only; }
+
+private:
+ /** Read only status. */
+ bool m_read_only;
+
+ friend class Resignal_statement;
};
-MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level,
- uint code, const char *msg);
+extern char *err_conv(char *buff, uint to_length, const char *from,
+ uint from_length, CHARSET_INFO *from_cs);
+
+class ErrConvString
+{
+ char err_buffer[MYSQL_ERRMSG_SIZE];
+public:
+
+ ErrConvString(String *str)
+ {
+ (void) err_conv(err_buffer, sizeof(err_buffer), str->ptr(),
+ str->length(), str->charset());
+ }
+
+ ErrConvString(const char *str, CHARSET_INFO* cs)
+ {
+ (void) err_conv(err_buffer, sizeof(err_buffer),
+ str, strlen(str), cs);
+ }
+
+ ErrConvString(const char *str, uint length, CHARSET_INFO* cs)
+ {
+ (void) err_conv(err_buffer, sizeof(err_buffer),
+ str, length, cs);
+ }
+
+ ~ErrConvString() { };
+ char *ptr() { return err_buffer; }
+};
+
+
+void push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level,
+ uint code, const char *msg);
void push_warning_printf(THD *thd, MYSQL_ERROR::enum_warning_level level,
uint code, const char *format, ...);
-void mysql_reset_errors(THD *thd, bool force);
bool mysqld_show_warnings(THD *thd, ulong levels_to_show);
+uint32 convert_error_message(char *to, uint32 to_length, CHARSET_INFO *to_cs,
+ const char *from, uint32 from_length,
+ CHARSET_INFO *from_cs, uint *errors);
extern const LEX_STRING warning_level_names[];
+
+#endif // SQL_ERROR_H
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index 3bbf4b78d07..ab3f2797405 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -207,21 +207,21 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen)
DBUG_RETURN(TRUE);
}
- if (! hash_inited(&thd->handler_tables_hash))
+ if (! my_hash_inited(&thd->handler_tables_hash))
{
/*
HASH entries are of type TABLE_LIST.
*/
- if (hash_init(&thd->handler_tables_hash, &my_charset_latin1,
- HANDLER_TABLES_HASH_SIZE, 0, 0,
- (hash_get_key) mysql_ha_hash_get_key,
- (hash_free_key) mysql_ha_hash_free, 0))
+ if (my_hash_init(&thd->handler_tables_hash, &my_charset_latin1,
+ HANDLER_TABLES_HASH_SIZE, 0, 0,
+ (my_hash_get_key) mysql_ha_hash_get_key,
+ (my_hash_free_key) mysql_ha_hash_free, 0))
goto err;
}
else if (! reopen) /* Otherwise we have 'tables' already. */
{
- if (hash_search(&thd->handler_tables_hash, (uchar*) tables->alias,
- strlen(tables->alias) + 1))
+ if (my_hash_search(&thd->handler_tables_hash, (uchar*) tables->alias,
+ strlen(tables->alias) + 1))
{
DBUG_PRINT("info",("duplicate '%s'", tables->alias));
my_error(ER_NONUNIQ_TABLE, MYF(0), tables->alias);
@@ -367,12 +367,12 @@ bool mysql_ha_close(THD *thd, TABLE_LIST *tables)
DBUG_PRINT("enter",("'%s'.'%s' as '%s'",
tables->db, tables->table_name, tables->alias));
- if ((hash_tables= (TABLE_LIST*) hash_search(&thd->handler_tables_hash,
- (uchar*) tables->alias,
- strlen(tables->alias) + 1)))
+ if ((hash_tables= (TABLE_LIST*) my_hash_search(&thd->handler_tables_hash,
+ (uchar*) tables->alias,
+ strlen(tables->alias) + 1)))
{
mysql_ha_close_table(thd, hash_tables, FALSE);
- hash_delete(&thd->handler_tables_hash, (uchar*) hash_tables);
+ my_hash_delete(&thd->handler_tables_hash, (uchar*) hash_tables);
}
else
{
@@ -436,9 +436,9 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
it++;
retry:
- if ((hash_tables= (TABLE_LIST*) hash_search(&thd->handler_tables_hash,
- (uchar*) tables->alias,
- strlen(tables->alias) + 1)))
+ if ((hash_tables= (TABLE_LIST*) my_hash_search(&thd->handler_tables_hash,
+ (uchar*) tables->alias,
+ strlen(tables->alias) + 1)))
{
table= hash_tables->table;
DBUG_PRINT("info-in-hash",("'%s'.'%s' as '%s' table: 0x%lx",
@@ -460,7 +460,7 @@ retry:
hash_tables->db, hash_tables->table_name,
hash_tables->alias, table));
}
-
+ table->pos_in_table_list= tables;
#if MYSQL_VERSION_ID < 40100
if (*tables->db && strcmp(table->table_cache_key, tables->db))
{
@@ -545,7 +545,7 @@ retry:
tables->db, tables->alias, &it, 0))
goto err;
- protocol->send_fields(&list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF);
+ protocol->send_result_set_metadata(&list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF);
/*
In ::external_lock InnoDB resets the fields which tell it that
@@ -667,18 +667,11 @@ retry:
continue;
if (num_rows >= offset_limit_cnt)
{
- Item *item;
protocol->prepare_for_resend();
- it.rewind();
- while ((item=it++))
- {
- if (item->send(thd->protocol, &buffer))
- {
- protocol->free(); // Free used
- my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0));
- goto err;
- }
- }
+
+ if (protocol->send_result_set_row(&list))
+ goto err;
+
protocol->write();
}
num_rows++;
@@ -716,7 +709,7 @@ static TABLE_LIST *mysql_ha_find(THD *thd, TABLE_LIST *tables)
/* search for all handlers with matching table names */
for (uint i= 0; i < thd->handler_tables_hash.records; i++)
{
- hash_tables= (TABLE_LIST*) hash_element(&thd->handler_tables_hash, i);
+ hash_tables= (TABLE_LIST*) my_hash_element(&thd->handler_tables_hash, i);
for (tables= first; tables; tables= tables->next_local)
{
if ((! *tables->db ||
@@ -760,7 +753,7 @@ void mysql_ha_rm_tables(THD *thd, TABLE_LIST *tables, bool is_locked)
next= hash_tables->next_local;
if (hash_tables->table)
mysql_ha_close_table(thd, hash_tables, is_locked);
- hash_delete(&thd->handler_tables_hash, (uchar*) hash_tables);
+ my_hash_delete(&thd->handler_tables_hash, (uchar*) hash_tables);
hash_tables= next;
}
@@ -786,7 +779,7 @@ void mysql_ha_flush(THD *thd)
for (uint i= 0; i < thd->handler_tables_hash.records; i++)
{
- hash_tables= (TABLE_LIST*) hash_element(&thd->handler_tables_hash, i);
+ hash_tables= (TABLE_LIST*) my_hash_element(&thd->handler_tables_hash, i);
if (hash_tables->table && hash_tables->table->needs_reopen_or_name_lock())
mysql_ha_close_table(thd, hash_tables, TRUE);
}
@@ -810,12 +803,12 @@ void mysql_ha_cleanup(THD *thd)
for (uint i= 0; i < thd->handler_tables_hash.records; i++)
{
- hash_tables= (TABLE_LIST*) hash_element(&thd->handler_tables_hash, i);
+ hash_tables= (TABLE_LIST*) my_hash_element(&thd->handler_tables_hash, i);
if (hash_tables->table)
mysql_ha_close_table(thd, hash_tables, FALSE);
- }
+ }
- hash_free(&thd->handler_tables_hash);
+ my_hash_free(&thd->handler_tables_hash);
DBUG_VOID_RETURN;
}
diff --git a/sql/sql_help.cc b/sql/sql_help.cc
index 2818aa5082c..003741a7ddc 100644
--- a/sql/sql_help.cc
+++ b/sql/sql_help.cc
@@ -431,7 +431,7 @@ int send_answer_1(Protocol *protocol, String *s1, String *s2, String *s3)
field_list.push_back(new Item_empty_string("description",1000));
field_list.push_back(new Item_empty_string("example",1000));
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(1);
@@ -463,7 +463,7 @@ int send_answer_1(Protocol *protocol, String *s1, String *s2, String *s3)
+- -+
RETURN VALUES
- result of protocol->send_fields
+ result of protocol->send_result_set_metadata
*/
int send_header_2(Protocol *protocol, bool for_category)
@@ -474,7 +474,7 @@ int send_header_2(Protocol *protocol, bool for_category)
field_list.push_back(new Item_empty_string("source_category_name",64));
field_list.push_back(new Item_empty_string("name",64));
field_list.push_back(new Item_empty_string("is_it_category",1));
- DBUG_RETURN(protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS |
+ DBUG_RETURN(protocol->send_result_set_metadata(&field_list, Protocol::SEND_NUM_ROWS |
Protocol::SEND_EOF));
}
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 6281dd8168a..d4306ba0c61 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -107,8 +107,8 @@ static bool check_view_insertability(THD *thd, TABLE_LIST *view);
1 Error
*/
-bool check_view_single_update(List<Item> &fields, TABLE_LIST *view,
- table_map *map)
+bool check_view_single_update(List<Item> &fields, List<Item> *values,
+ TABLE_LIST *view, table_map *map)
{
/* it is join view => we need to find the table for update */
List_iterator_fast<Item> it(fields);
@@ -119,6 +119,17 @@ bool check_view_single_update(List<Item> &fields, TABLE_LIST *view,
while ((item= it++))
tables|= item->used_tables();
+ if (values)
+ {
+ it.init(*values);
+ while ((item= it++))
+ tables|= item->used_tables();
+ }
+
+ /* Convert to real table bits */
+ tables&= ~PSEUDO_TABLE_BITS;
+
+
/* Check found map against provided map */
if (*map)
{
@@ -165,7 +176,9 @@ error:
static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
List<Item> &fields, List<Item> &values,
- bool check_unique, table_map *map)
+ bool check_unique,
+ bool fields_and_values_from_different_maps,
+ table_map *map)
{
TABLE *table= table_list->table;
@@ -238,7 +251,10 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
if (table_list->effective_algorithm == VIEW_ALGORITHM_MERGE)
{
- if (check_view_single_update(fields, table_list, map))
+ if (check_view_single_update(fields,
+ fields_and_values_from_different_maps ?
+ (List<Item>*) 0 : &values,
+ table_list, map))
return -1;
table= table_list->table;
}
@@ -298,7 +314,8 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
*/
static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
- List<Item> &update_fields, table_map *map)
+ List<Item> &update_fields,
+ List<Item> &update_values, table_map *map)
{
TABLE *table= insert_table_list->table;
my_bool timestamp_mark= 0;
@@ -318,7 +335,8 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
return -1;
if (insert_table_list->effective_algorithm == VIEW_ALGORITHM_MERGE &&
- check_view_single_update(update_fields, insert_table_list, map))
+ check_view_single_update(update_fields, &update_values,
+ insert_table_list, map))
return -1;
if (table->timestamp_field)
@@ -826,7 +844,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
error=write_record(thd, table ,&info);
if (error)
break;
- thd->row_count++;
+ thd->warning_info->inc_current_row_for_warning();
}
free_underlaid_joins(thd, &thd->lex->select_lex);
@@ -965,10 +983,12 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
if (ignore)
sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records,
(lock_type == TL_WRITE_DELAYED) ? (ulong) 0 :
- (ulong) (info.records - info.copied), (ulong) thd->cuted_fields);
+ (ulong) (info.records - info.copied),
+ (ulong) thd->warning_info->statement_warn_count());
else
sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records,
- (ulong) (info.deleted + updated), (ulong) thd->cuted_fields);
+ (ulong) (info.deleted + updated),
+ (ulong) thd->warning_info->statement_warn_count());
thd->row_count_func= info.copied + info.deleted + updated;
::my_ok(thd, (ulong) thd->row_count_func, id, buff);
}
@@ -1256,9 +1276,9 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
table_list->next_local= 0;
context->resolve_in_table_list_only(table_list);
- res= check_insert_fields(thd, context->table_list, fields, *values,
- !insert_into_view, &map) ||
- setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0);
+ res= (setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0) ||
+ check_insert_fields(thd, context->table_list, fields, *values,
+ !insert_into_view, 0, &map));
if (!res && check_fields)
{
@@ -1271,18 +1291,19 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
thd->abort_on_warning= saved_abort_on_warning;
}
+ if (!res)
+ res= setup_fields(thd, 0, update_values, MARK_COLUMNS_READ, 0, 0);
+
if (!res && duplic == DUP_UPDATE)
{
select_lex->no_wrap_view_item= TRUE;
- res= check_update_fields(thd, context->table_list, update_fields, &map);
+ res= check_update_fields(thd, context->table_list, update_fields,
+ update_values, &map);
select_lex->no_wrap_view_item= FALSE;
}
/* Restore the current context. */
ctx_state.restore_state(context, table_list);
-
- if (!res)
- res= setup_fields(thd, 0, update_values, MARK_COLUMNS_READ, 0, 0);
}
if (res)
@@ -1971,7 +1992,7 @@ bool delayed_get_table(THD *thd, TABLE_LIST *table_list)
main thread. Use of my_message will enable stored
procedures continue handlers.
*/
- my_message(di->thd.main_da.sql_errno(), di->thd.main_da.message(),
+ my_message(di->thd.stmt_da->sql_errno(), di->thd.stmt_da->message(),
MYF(0));
}
di->unlock();
@@ -2048,7 +2069,7 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
goto error;
if (dead)
{
- my_message(thd.main_da.sql_errno(), thd.main_da.message(), MYF(0));
+ my_message(thd.stmt_da->sql_errno(), thd.stmt_da->message(), MYF(0));
goto error;
}
}
@@ -2297,8 +2318,8 @@ static void handle_delayed_insert_impl(THD *thd, Delayed_insert *di)
if (init_thr_lock() || thd->store_globals())
{
/* Can't use my_error since store_globals has perhaps failed */
- thd->main_da.set_error_status(thd, ER_OUT_OF_RESOURCES,
- ER(ER_OUT_OF_RESOURCES));
+ thd->stmt_da->set_error_status(thd, ER_OUT_OF_RESOURCES,
+ ER(ER_OUT_OF_RESOURCES), NULL);
thd->fatal_error();
goto err;
}
@@ -2512,21 +2533,16 @@ pthread_handler_t handle_delayed_insert(void *arg)
since it does not find one in the list.
*/
pthread_mutex_lock(&di->mutex);
-#if !defined( __WIN__) /* Win32 calls this in pthread_create */
if (my_thread_init())
{
/* Can't use my_error since store_globals has not yet been called */
- thd->main_da.set_error_status(thd, ER_OUT_OF_RESOURCES,
- ER(ER_OUT_OF_RESOURCES));
+ thd->stmt_da->set_error_status(thd, ER_OUT_OF_RESOURCES,
+ ER(ER_OUT_OF_RESOURCES), NULL);
goto end;
}
-#endif
-
handle_delayed_insert_impl(thd, di);
-#ifndef __WIN__
end:
-#endif
/*
di should be unlinked from the thread handler list and have no active
clients
@@ -2748,6 +2764,12 @@ bool Delayed_insert::handle_inserts(void)
thread_safe_increment(delayed_insert_writes,&LOCK_delayed_status);
pthread_mutex_lock(&mutex);
+ /*
+ Reset the table->auto_increment_field_not_null as it is valid for
+ only one row.
+ */
+ table->auto_increment_field_not_null= FALSE;
+
delete row;
/*
Let READ clients do something once in a while
@@ -2769,7 +2791,7 @@ bool Delayed_insert::handle_inserts(void)
{
/* This should never happen */
table->file->print_error(error,MYF(0));
- sql_print_error("%s", thd.main_da.message());
+ sql_print_error("%s", thd.stmt_da->message());
DBUG_PRINT("error", ("HA_EXTRA_NO_CACHE failed in loop"));
goto err;
}
@@ -2811,7 +2833,7 @@ bool Delayed_insert::handle_inserts(void)
if ((error=table->file->extra(HA_EXTRA_NO_CACHE)))
{ // This shouldn't happen
table->file->print_error(error,MYF(0));
- sql_print_error("%s", thd.main_da.message());
+ sql_print_error("%s", thd.stmt_da->message());
DBUG_PRINT("error", ("HA_EXTRA_NO_CACHE failed after loop"));
goto err;
}
@@ -2950,9 +2972,9 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
we are fixing fields from insert list.
*/
lex->current_select= &lex->select_lex;
- res= check_insert_fields(thd, table_list, *fields, values,
- !insert_into_view, &map) ||
- setup_fields(thd, 0, values, MARK_COLUMNS_READ, 0, 0);
+ res= (setup_fields(thd, 0, values, MARK_COLUMNS_READ, 0, 0) ||
+ check_insert_fields(thd, table_list, *fields, values,
+ !insert_into_view, 1, &map));
if (!res && fields->elements)
{
@@ -2979,7 +3001,8 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
lex->select_lex.no_wrap_view_item= TRUE;
res= res || check_update_fields(thd, context->table_list,
- *info.update_fields, &map);
+ *info.update_fields, *info.update_values,
+ &map);
lex->select_lex.no_wrap_view_item= FALSE;
/*
When we are not using GROUP BY and there are no ungrouped aggregate functions
@@ -3279,10 +3302,12 @@ bool select_insert::send_eof()
char buff[160];
if (info.ignore)
sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records,
- (ulong) (info.records - info.copied), (ulong) thd->cuted_fields);
+ (ulong) (info.records - info.copied),
+ (ulong) thd->warning_info->statement_warn_count());
else
sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records,
- (ulong) (info.deleted+info.updated), (ulong) thd->cuted_fields);
+ (ulong) (info.deleted+info.updated),
+ (ulong) thd->warning_info->statement_warn_count());
thd->row_count_func= info.copied + info.deleted +
((thd->client_capabilities & CLIENT_FOUND_ROWS) ?
info.touched : info.updated);
@@ -3855,6 +3880,7 @@ void select_create::abort()
{
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
+ table->auto_increment_field_not_null= FALSE;
if (!create_info->table_existed)
drop_open_table(thd, table, create_table->db, create_table->table_name);
table=0; // Safety
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 2adbc44eb12..f6dd1fae90a 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -31,7 +31,14 @@
sys_var *trg_new_row_fake_var= (sys_var*) 0x01;
+/**
+ LEX_STRING constant for null-string to be used in parser and other places.
+*/
+const LEX_STRING null_lex_str= {NULL, 0};
+const LEX_STRING empty_lex_str= { (char*) "", 0 };
+
/* Longest standard keyword name */
+
#define TOCK_NAME_LENGTH 24
/*
@@ -134,6 +141,7 @@ Lex_input_stream::Lex_input_stream(THD *thd,
found_semicolon(NULL),
ignore_space(test(thd->variables.sql_mode & MODE_IGNORE_SPACE)),
stmt_prepare_mode(FALSE),
+ multi_statements(TRUE),
in_comment(NO_COMMENT),
m_underscore_cs(NULL)
{
@@ -1497,7 +1505,7 @@ Alter_info::Alter_info(const Alter_info &rhs, MEM_ROOT *mem_root)
keys_onoff(rhs.keys_onoff),
tablespace_op(rhs.tablespace_op),
partition_names(rhs.partition_names, mem_root),
- no_parts(rhs.no_parts),
+ num_parts(rhs.num_parts),
change_level(rhs.change_level),
datetime_field(rhs.datetime_field),
error_if_not_empty(rhs.error_if_not_empty)
@@ -2091,7 +2099,7 @@ void st_select_lex::print_limit(THD *thd,
to implement the clean up.
*/
-void st_lex::cleanup_lex_after_parse_error(THD *thd)
+void LEX::cleanup_lex_after_parse_error(THD *thd)
{
/*
Delete sphead for the side effect of restoring of the original
@@ -2150,7 +2158,7 @@ void Query_tables_list::reset_query_tables_list(bool init)
We delay real initialization of hash (and therefore related
memory allocation) until first insertion into this hash.
*/
- hash_clear(&sroutines);
+ my_hash_clear(&sroutines);
}
else if (sroutines.records)
{
@@ -2173,7 +2181,7 @@ void Query_tables_list::reset_query_tables_list(bool init)
void Query_tables_list::destroy_query_tables_list()
{
- hash_free(&sroutines);
+ my_hash_free(&sroutines);
}
@@ -2181,7 +2189,7 @@ void Query_tables_list::destroy_query_tables_list()
Initialize LEX object.
SYNOPSIS
- st_lex::st_lex()
+ LEX::LEX()
NOTE
LEX object initialized with this constructor can be used as part of
@@ -2191,7 +2199,7 @@ void Query_tables_list::destroy_query_tables_list()
for this.
*/
-st_lex::st_lex()
+LEX::LEX()
:result(0),
sql_command(SQLCOM_END), option_type(OPT_DEFAULT), is_lex_started(0)
{
@@ -2208,7 +2216,7 @@ st_lex::st_lex()
Check whether the merging algorithm can be used on this VIEW
SYNOPSIS
- st_lex::can_be_merged()
+ LEX::can_be_merged()
DESCRIPTION
We can apply merge algorithm if it is single SELECT view with
@@ -2222,7 +2230,7 @@ st_lex::st_lex()
TRUE - merge algorithm can be used
*/
-bool st_lex::can_be_merged()
+bool LEX::can_be_merged()
{
// TODO: do not forget implement case when select_lex.table_list.elements==0
@@ -2259,19 +2267,19 @@ bool st_lex::can_be_merged()
check if command can use VIEW with MERGE algorithm (for top VIEWs)
SYNOPSIS
- st_lex::can_use_merged()
+ LEX::can_use_merged()
DESCRIPTION
Only listed here commands can use merge algorithm in top level
SELECT_LEX (for subqueries will be used merge algorithm if
- st_lex::can_not_use_merged() is not TRUE).
+ LEX::can_not_use_merged() is not TRUE).
RETURN
FALSE - command can't use merged VIEWs
TRUE - VIEWs with MERGE algorithms can be used
*/
-bool st_lex::can_use_merged()
+bool LEX::can_use_merged()
{
switch (sql_command)
{
@@ -2296,18 +2304,18 @@ bool st_lex::can_use_merged()
Check if command can't use merged views in any part of command
SYNOPSIS
- st_lex::can_not_use_merged()
+ LEX::can_not_use_merged()
DESCRIPTION
Temporary table algorithm will be used on all SELECT levels for queries
- listed here (see also st_lex::can_use_merged()).
+ listed here (see also LEX::can_use_merged()).
RETURN
FALSE - command can't use merged VIEWs
TRUE - VIEWs with MERGE algorithms can be used
*/
-bool st_lex::can_not_use_merged()
+bool LEX::can_not_use_merged()
{
switch (sql_command)
{
@@ -2336,7 +2344,7 @@ bool st_lex::can_not_use_merged()
FALSE no, we need data
*/
-bool st_lex::only_view_structure()
+bool LEX::only_view_structure()
{
switch (sql_command) {
case SQLCOM_SHOW_CREATE:
@@ -2365,7 +2373,7 @@ bool st_lex::only_view_structure()
*/
-bool st_lex::need_correct_ident()
+bool LEX::need_correct_ident()
{
switch(sql_command)
{
@@ -2395,7 +2403,7 @@ bool st_lex::need_correct_ident()
VIEW_CHECK_CASCADED CHECK OPTION CASCADED
*/
-uint8 st_lex::get_effective_with_check(TABLE_LIST *view)
+uint8 LEX::get_effective_with_check(TABLE_LIST *view)
{
if (view->select_lex->master_unit() == &unit &&
which_check_option_applicable())
@@ -2424,7 +2432,7 @@ uint8 st_lex::get_effective_with_check(TABLE_LIST *view)
*/
bool
-st_lex::copy_db_to(char **p_db, size_t *p_db_length) const
+LEX::copy_db_to(char **p_db, size_t *p_db_length) const
{
if (sphead)
{
@@ -2501,7 +2509,7 @@ void st_select_lex_unit::set_limit(st_select_lex *sl)
clause.
*/
-void st_lex::set_trg_event_type_for_tables()
+void LEX::set_trg_event_type_for_tables()
{
uint8 new_trg_event_map= 0;
@@ -2644,7 +2652,7 @@ void st_lex::set_trg_event_type_for_tables()
In this case link_to_local is set.
*/
-TABLE_LIST *st_lex::unlink_first_table(bool *link_to_local)
+TABLE_LIST *LEX::unlink_first_table(bool *link_to_local)
{
TABLE_LIST *first;
if ((first= query_tables))
@@ -2684,7 +2692,7 @@ TABLE_LIST *st_lex::unlink_first_table(bool *link_to_local)
table list
SYNOPSYS
- st_lex::first_lists_tables_same()
+ LEX::first_lists_tables_same()
NOTES
In many cases (for example, usual INSERT/DELETE/...) the first table of
@@ -2695,7 +2703,7 @@ TABLE_LIST *st_lex::unlink_first_table(bool *link_to_local)
the global list first.
*/
-void st_lex::first_lists_tables_same()
+void LEX::first_lists_tables_same()
{
TABLE_LIST *first_table= (TABLE_LIST*) select_lex.table_list.first;
if (query_tables != first_table && first_table != 0)
@@ -2731,7 +2739,7 @@ void st_lex::first_lists_tables_same()
global list
*/
-void st_lex::link_first_table_back(TABLE_LIST *first,
+void LEX::link_first_table_back(TABLE_LIST *first,
bool link_to_local)
{
if (first)
@@ -2758,7 +2766,7 @@ void st_lex::link_first_table_back(TABLE_LIST *first,
cleanup lex for case when we open table by table for processing
SYNOPSIS
- st_lex::cleanup_after_one_table_open()
+ LEX::cleanup_after_one_table_open()
NOTE
This method is mostly responsible for cleaning up of selects lists and
@@ -2766,7 +2774,7 @@ void st_lex::link_first_table_back(TABLE_LIST *first,
to call Query_tables_list::reset_query_tables_list(FALSE).
*/
-void st_lex::cleanup_after_one_table_open()
+void LEX::cleanup_after_one_table_open()
{
/*
thd->lex->derived_tables & additional units may be set if we open
@@ -2801,7 +2809,7 @@ void st_lex::cleanup_after_one_table_open()
backup Pointer to Query_tables_list instance to be used for backup
*/
-void st_lex::reset_n_backup_query_tables_list(Query_tables_list *backup)
+void LEX::reset_n_backup_query_tables_list(Query_tables_list *backup)
{
backup->set_query_tables_list(this);
/*
@@ -2820,7 +2828,7 @@ void st_lex::reset_n_backup_query_tables_list(Query_tables_list *backup)
backup Pointer to Query_tables_list instance used for backup
*/
-void st_lex::restore_backup_query_tables_list(Query_tables_list *backup)
+void LEX::restore_backup_query_tables_list(Query_tables_list *backup)
{
this->destroy_query_tables_list();
this->set_query_tables_list(backup);
@@ -2831,14 +2839,14 @@ void st_lex::restore_backup_query_tables_list(Query_tables_list *backup)
Checks for usage of routines and/or tables in a parsed statement
SYNOPSIS
- st_lex:table_or_sp_used()
+ LEX:table_or_sp_used()
RETURN
FALSE No routines and tables used
TRUE Either or both routines and tables are used.
*/
-bool st_lex::table_or_sp_used()
+bool LEX::table_or_sp_used()
{
DBUG_ENTER("table_or_sp_used");
@@ -2999,7 +3007,7 @@ bool st_select_lex::add_index_hint (THD *thd, char *str, uint length)
@retval FALSE No, not a management partition command
*/
-bool st_lex::is_partition_management() const
+bool LEX::is_partition_management() const
{
return (sql_command == SQLCOM_ALTER_TABLE &&
(alter_info.flags == ALTER_ADD_PARTITION ||
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 4e4794ef2cf..4bf8cd41aee 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -17,6 +17,9 @@
@defgroup Semantic_Analysis Semantic Analysis
*/
+#ifndef SQL_LEX_INCLUDED
+#define SQL_LEX_INCLUDED
+
/* YACC and LEX Definitions */
/* These may not be declared yet */
@@ -94,7 +97,7 @@ enum enum_sql_command {
SQLCOM_SHOW_SLAVE_HOSTS, SQLCOM_DELETE_MULTI, SQLCOM_UPDATE_MULTI,
SQLCOM_SHOW_BINLOG_EVENTS, SQLCOM_SHOW_NEW_MASTER, SQLCOM_DO,
SQLCOM_SHOW_WARNS, SQLCOM_EMPTY_QUERY, SQLCOM_SHOW_ERRORS,
- SQLCOM_SHOW_COLUMN_TYPES, SQLCOM_SHOW_STORAGE_ENGINES, SQLCOM_SHOW_PRIVILEGES,
+ SQLCOM_SHOW_STORAGE_ENGINES, SQLCOM_SHOW_PRIVILEGES,
SQLCOM_HELP, SQLCOM_CREATE_USER, SQLCOM_DROP_USER, SQLCOM_RENAME_USER,
SQLCOM_REVOKE_ALL, SQLCOM_CHECKSUM,
SQLCOM_CREATE_PROCEDURE, SQLCOM_CREATE_SPFUNCTION, SQLCOM_CALL,
@@ -118,7 +121,8 @@ enum enum_sql_command {
SQLCOM_SHOW_CREATE_TRIGGER,
SQLCOM_ALTER_DB_UPGRADE,
SQLCOM_SHOW_PROFILE, SQLCOM_SHOW_PROFILES,
-
+ SQLCOM_SIGNAL, SQLCOM_RESIGNAL,
+ SQLCOM_SHOW_RELAYLOG_EVENTS,
/*
When a command is added here, be sure it's also added in mysqld.cc
in "struct show_var_st status_vars[]= {" ...
@@ -203,17 +207,19 @@ typedef struct st_lex_master_info
{
char *host, *user, *password, *log_file_name;
uint port, connect_retry;
+ float heartbeat_period;
ulonglong pos;
ulong server_id;
/*
Enum is used for making it possible to detect if the user
changed variable or if it should be left at old value
*/
- enum {SSL_UNCHANGED, SSL_DISABLE, SSL_ENABLE}
- ssl, ssl_verify_server_cert;
+ enum {LEX_MI_UNCHANGED, LEX_MI_DISABLE, LEX_MI_ENABLE}
+ ssl, ssl_verify_server_cert, heartbeat_opt, repl_ignore_server_ids_opt;
char *ssl_key, *ssl_cert, *ssl_ca, *ssl_capath, *ssl_cipher;
char *relay_log_name;
ulong relay_log_pos;
+ DYNAMIC_ARRAY repl_ignore_server_ids;
} LEX_MASTER_INFO;
@@ -394,7 +400,7 @@ public:
Base class for st_select_lex (SELECT_LEX) &
st_select_lex_unit (SELECT_LEX_UNIT)
*/
-struct st_lex;
+struct LEX;
class st_select_lex;
class st_select_lex_unit;
class st_select_lex_node {
@@ -464,7 +470,7 @@ public:
virtual void set_lock_for_tables(thr_lock_type lock_type) {}
friend class st_select_lex_unit;
- friend bool mysql_new_select(struct st_lex *lex, bool move_down);
+ friend bool mysql_new_select(LEX *lex, bool move_down);
friend bool mysql_make_view(THD *thd, File_parser *parser,
TABLE_LIST *table, uint flags);
private:
@@ -584,7 +590,7 @@ public:
/* Saved values of the WHERE and HAVING clauses*/
Item::cond_result cond_value, having_value;
/* point on lex in which it was created, used in view subquery detection */
- st_lex *parent_lex;
+ LEX *parent_lex;
enum olap_type olap;
/* FROM clause - points to the beginning of the TABLE_LIST::next_local list. */
SQL_LIST table_list;
@@ -893,7 +899,7 @@ public:
enum enum_enable_or_disable keys_onoff;
enum tablespace_op_type tablespace_op;
List<char> partition_names;
- uint no_parts;
+ uint num_parts;
enum_alter_table_change_level change_level;
Create_field *datetime_field;
bool error_if_not_empty;
@@ -903,7 +909,7 @@ public:
flags(0),
keys_onoff(LEAVE_AS_IS),
tablespace_op(NO_TABLESPACE_OP),
- no_parts(0),
+ num_parts(0),
change_level(ALTER_TABLE_METADATA_ONLY),
datetime_field(NULL),
error_if_not_empty(FALSE)
@@ -918,7 +924,7 @@ public:
flags= 0;
keys_onoff= LEAVE_AS_IS;
tablespace_op= NO_TABLESPACE_OP;
- no_parts= 0;
+ num_parts= 0;
partition_names.empty();
change_level= ALTER_TABLE_METADATA_ONLY;
datetime_field= 0;
@@ -950,6 +956,9 @@ extern sys_var *trg_new_row_fake_var;
enum xa_option_words {XA_NONE, XA_JOIN, XA_RESUME, XA_ONE_PHASE,
XA_SUSPEND, XA_FOR_MIGRATE};
+extern const LEX_STRING null_lex_str;
+extern const LEX_STRING empty_lex_str;
+
/*
Class representing list of all tables used by statement.
@@ -958,7 +967,7 @@ enum xa_option_words {XA_NONE, XA_JOIN, XA_RESUME, XA_ONE_PHASE,
stored functions/triggers to this list in order to pre-open and lock
them.
- Also used by st_lex::reset_n_backup/restore_backup_query_tables_list()
+ Also used by LEX::reset_n_backup/restore_backup_query_tables_list()
methods to save and restore this information.
*/
@@ -1486,9 +1495,13 @@ public:
/**
TRUE if we're parsing a prepared statement: in this mode
- we should allow placeholders and disallow multi-statements.
+ we should allow placeholders.
*/
bool stmt_prepare_mode;
+ /**
+ TRUE if we should allow multi-statements.
+ */
+ bool multi_statements;
/** State of the lexical analyser for comments. */
enum_comment_state in_comment;
@@ -1518,10 +1531,66 @@ public:
CHARSET_INFO *m_underscore_cs;
};
+/**
+ Abstract representation of a statement.
+ This class is an interface between the parser and the runtime.
+ The parser builds the appropriate sub classes of Sql_statement
+ to represent a SQL statement in the parsed tree.
+ The execute() method in the sub classes contain the runtime implementation.
+ Note that this interface is used for SQL statement recently implemented,
+ the code for older statements tend to load the LEX structure with more
+ attributes instead.
+ The recommended way to implement new statements is to sub-class
+ Sql_statement, as this improves code modularity (see the 'big switch' in
+ dispatch_command()), and decrease the total size of the LEX structure
+ (therefore saving memory in stored programs).
+*/
+class Sql_statement : public Sql_alloc
+{
+public:
+ /**
+ Execute this SQL statement.
+ @param thd the current thread.
+ @return 0 on success.
+ */
+ virtual bool execute(THD *thd) = 0;
+
+protected:
+ /**
+ Constructor.
+ @param lex the LEX structure that represents parts of this statement.
+ */
+ Sql_statement(LEX *lex)
+ : m_lex(lex)
+ {}
+
+ /** Destructor. */
+ virtual ~Sql_statement()
+ {
+ /*
+ Sql_statement objects are allocated in thd->mem_root.
+ In MySQL, the C++ destructor is never called, the underlying MEM_ROOT is
+ simply destroyed instead.
+ Do not rely on the destructor for any cleanup.
+ */
+ DBUG_ASSERT(FALSE);
+ }
+
+protected:
+ /**
+ The legacy LEX structure for this statement.
+ The LEX structure contains the existing properties of the parsed tree.
+ TODO: with time, attributes from LEX should move to sub classes of
+ Sql_statement, so that the parser only builds Sql_statement objects
+ with the minimum set of attributes, instead of a LEX structure that
+ contains the collection of every possible attribute.
+ */
+ LEX *m_lex;
+};
/* The state of the lex parsing. This is saved in the THD struct */
-typedef struct st_lex : public Query_tables_list
+struct LEX: public Query_tables_list
{
SELECT_LEX_UNIT unit; /* most upper unit */
SELECT_LEX select_lex; /* first SELECT_LEX */
@@ -1619,6 +1688,9 @@ typedef struct st_lex : public Query_tables_list
*/
nesting_map allow_sum_func;
enum_sql_command sql_command;
+
+ Sql_statement *m_stmt;
+
/*
Usually `expr` rule of yacc is quite reused but some commands better
not support subqueries which comes standard with this rule, like
@@ -1768,9 +1840,9 @@ typedef struct st_lex : public Query_tables_list
*/
bool protect_against_global_read_lock;
- st_lex();
+ LEX();
- virtual ~st_lex()
+ virtual ~LEX()
{
destroy_query_tables_list();
plugin_unlock_list(NULL, (plugin_ref *)plugins.buffer, plugins.elements);
@@ -1812,7 +1884,7 @@ typedef struct st_lex : public Query_tables_list
Is this update command where 'WHITH CHECK OPTION' clause is important
SYNOPSIS
- st_lex::which_check_option_applicable()
+ LEX::which_check_option_applicable()
RETURN
TRUE have to take 'WHITH CHECK OPTION' clause into account
@@ -1884,7 +1956,37 @@ typedef struct st_lex : public Query_tables_list
}
return FALSE;
}
-} LEX;
+};
+
+
+/**
+ Set_signal_information is a container used in the parsed tree to represent
+ the collection of assignments to condition items in the SIGNAL and RESIGNAL
+ statements.
+*/
+class Set_signal_information
+{
+public:
+ /** Constructor. */
+ Set_signal_information();
+
+ /** Copy constructor. */
+ Set_signal_information(const Set_signal_information& set);
+
+ /** Destructor. */
+ ~Set_signal_information()
+ {}
+
+ /** Clear all items. */
+ void clear();
+
+ /**
+ For each contition item assignment, m_item[] contains the parsed tree
+ that represents the expression assigned, if any.
+ m_item[] is an array indexed by Diag_condition_item_name.
+ */
+ Item *m_item[LAST_DIAG_SET_PROPERTY+1];
+};
/**
@@ -1913,6 +2015,12 @@ public:
*/
uchar *yacc_yyvs;
+ /**
+ Fragments of parsed tree,
+ used during the parsing of SIGNAL and RESIGNAL.
+ */
+ Set_signal_information m_set_signal_info;
+
/*
TODO: move more attributes from the LEX structure here.
*/
@@ -1939,7 +2047,7 @@ public:
};
-struct st_lex_local: public st_lex
+struct st_lex_local: public LEX
{
static void *operator new(size_t size) throw()
{
@@ -1969,6 +2077,7 @@ extern bool is_lex_native_function(const LEX_STRING *name);
@} (End of group Semantic_Analysis)
*/
-int my_missing_function_error(const LEX_STRING &token, const char *name);
+void my_missing_function_error(const LEX_STRING &token, const char *name);
#endif /* MYSQL_SERVER */
+#endif /* SQL_LEX_INCLUDED */
diff --git a/sql/sql_list.cc b/sql/sql_list.cc
index 49b649133d0..a256212471d 100644
--- a/sql/sql_list.cc
+++ b/sql/sql_list.cc
@@ -18,7 +18,7 @@
#pragma implementation // gcc: Class implementation
#endif
-#include "mysql_priv.h"
+#include "sql_list.h"
list_node end_of_list;
diff --git a/sql/sql_list.h b/sql/sql_list.h
index 22df77afeb3..74f4cc0ec0d 100644
--- a/sql/sql_list.h
+++ b/sql/sql_list.h
@@ -15,11 +15,17 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#include "my_global.h"
+#include "my_sys.h"
+#include "m_string.h" /* for TRASH */
+
#ifdef USE_PRAGMA_INTERFACE
#pragma interface /* gcc class implementation */
#endif
+void *sql_alloc(size_t);
+
/* mysql standard class memory allocator */
class Sql_alloc
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 8109ca4313e..263fd8e8a08 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -15,6 +15,8 @@
/* Copy data from a textfile to table */
+/* 2006-12 Erik Wetterberg : LOAD XML added */
+
#include "mysql_priv.h"
#include <my_dir.h>
#include <m_ctype.h>
@@ -23,6 +25,23 @@
#include "sp_head.h"
#include "sql_trigger.h"
+class XML_TAG {
+public:
+ int level;
+ String field;
+ String value;
+ XML_TAG(int l, String f, String v);
+};
+
+
+XML_TAG::XML_TAG(int l, String f, String v)
+{
+ level= l;
+ field.append(f);
+ value.append(v);
+}
+
+
class READ_INFO {
File file;
uchar *buffer, /* Buffer for read text */
@@ -37,6 +56,7 @@ class READ_INFO {
bool need_end_io_cache;
IO_CACHE cache;
NET *io_net;
+ int level; /* for load xml */
public:
bool error,line_cuted,found_null,enclosed;
@@ -54,6 +74,12 @@ public:
char unescape(char chr);
int terminator(char *ptr,uint length);
bool find_start_of_fields();
+ /* load xml */
+ List<XML_TAG> taglist;
+ int read_value(int delim, String *val);
+ int read_xml();
+ int clear_level(int level);
+
/*
We need to force cache close before destructor is invoked to log
the last read block
@@ -82,6 +108,13 @@ static int read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
List<Item> &set_values, READ_INFO &read_info,
String &enclosed, ulong skip_lines,
bool ignore_check_option_errors);
+
+static int read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
+ List<Item> &fields_vars, List<Item> &set_fields,
+ List<Item> &set_values, READ_INFO &read_info,
+ String &enclosed, ulong skip_lines,
+ bool ignore_check_option_errors);
+
#ifndef EMBEDDED_LIBRARY
static bool write_execute_load_query_log_event(THD *thd, sql_exchange* ex,
const char* db_arg, /* table's database */
@@ -398,7 +431,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
thd->count_cuted_fields= CHECK_FIELD_WARN; /* calc cuted fields */
thd->cuted_fields=0L;
/* Skip lines if there is a line terminator */
- if (ex->line_term->length())
+ if (ex->line_term->length() && ex->filetype != FILETYPE_XML)
{
/* ex->skip_lines needs to be preserved for logging */
while (skip_lines > 0)
@@ -429,7 +462,11 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
(MODE_STRICT_TRANS_TABLES |
MODE_STRICT_ALL_TABLES)));
- if (!field_term->length() && !enclosed->length())
+ if (ex->filetype == FILETYPE_XML) /* load xml */
+ error= read_xml_field(thd, info, table_list, fields_vars,
+ set_fields, set_values, read_info,
+ *(ex->line_term), skip_lines, ignore);
+ else if (!field_term->length() && !enclosed->length())
error= read_fixed_length(thd, info, table_list, fields_vars,
set_fields, set_values, read_info,
skip_lines, ignore);
@@ -525,7 +562,8 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
goto err;
}
sprintf(name, ER(ER_LOAD_INFO), (ulong) info.records, (ulong) info.deleted,
- (ulong) (info.records - info.copied), (ulong) thd->cuted_fields);
+ (ulong) (info.records - info.copied),
+ (ulong) thd->warning_info->statement_warn_count());
if (thd->transaction.stmt.modified_non_trans_table)
thd->transaction.all.modified_non_trans_table= TRUE;
@@ -757,9 +795,10 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (pos == read_info.row_end)
{
thd->cuted_fields++; /* Not enough fields */
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WARN_TOO_FEW_RECORDS,
- ER(ER_WARN_TOO_FEW_RECORDS), thd->row_count);
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_WARN_TOO_FEW_RECORDS,
+ ER(ER_WARN_TOO_FEW_RECORDS),
+ thd->warning_info->current_row_for_warning());
if (!field->maybe_null() && field->type() == FIELD_TYPE_TIMESTAMP)
((Field_timestamp*) field)->set_time();
}
@@ -780,9 +819,10 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (pos != read_info.row_end)
{
thd->cuted_fields++; /* To long row */
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WARN_TOO_MANY_RECORDS,
- ER(ER_WARN_TOO_MANY_RECORDS), thd->row_count);
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_WARN_TOO_MANY_RECORDS,
+ ER(ER_WARN_TOO_MANY_RECORDS),
+ thd->warning_info->current_row_for_warning());
}
if (thd->killed ||
@@ -815,11 +855,12 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (read_info.line_cuted)
{
thd->cuted_fields++; /* To long row */
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WARN_TOO_MANY_RECORDS,
- ER(ER_WARN_TOO_MANY_RECORDS), thd->row_count);
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_WARN_TOO_MANY_RECORDS,
+ ER(ER_WARN_TOO_MANY_RECORDS),
+ thd->warning_info->current_row_for_warning());
}
- thd->row_count++;
+ thd->warning_info->inc_current_row_for_warning();
continue_loop:;
}
DBUG_RETURN(test(read_info.error));
@@ -885,7 +926,7 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (field->reset())
{
my_error(ER_WARN_NULL_TO_NOTNULL, MYF(0), field->field_name,
- thd->row_count);
+ thd->warning_info->current_row_for_warning());
DBUG_RETURN(1);
}
field->set_null();
@@ -953,7 +994,7 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (field->reset())
{
my_error(ER_WARN_NULL_TO_NOTNULL, MYF(0),field->field_name,
- thd->row_count);
+ thd->warning_info->current_row_for_warning());
DBUG_RETURN(1);
}
if (!field->maybe_null() && field->type() == FIELD_TYPE_TIMESTAMP)
@@ -967,7 +1008,8 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
thd->cuted_fields++;
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARN_TOO_FEW_RECORDS,
- ER(ER_WARN_TOO_FEW_RECORDS), thd->row_count);
+ ER(ER_WARN_TOO_FEW_RECORDS),
+ thd->warning_info->current_row_for_warning());
}
else if (item->type() == Item::STRING_ITEM)
{
@@ -1011,19 +1053,184 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (read_info.line_cuted)
{
thd->cuted_fields++; /* To long row */
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WARN_TOO_MANY_RECORDS, ER(ER_WARN_TOO_MANY_RECORDS),
- thd->row_count);
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_WARN_TOO_MANY_RECORDS, ER(ER_WARN_TOO_MANY_RECORDS),
+ thd->warning_info->current_row_for_warning());
if (thd->killed)
DBUG_RETURN(1);
}
- thd->row_count++;
+ thd->warning_info->inc_current_row_for_warning();
continue_loop:;
}
DBUG_RETURN(test(read_info.error));
}
+/****************************************************************************
+** Read rows in xml format
+****************************************************************************/
+static int
+read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
+ List<Item> &fields_vars, List<Item> &set_fields,
+ List<Item> &set_values, READ_INFO &read_info,
+ String &row_tag, ulong skip_lines,
+ bool ignore_check_option_errors)
+{
+ List_iterator_fast<Item> it(fields_vars);
+ Item *item;
+ TABLE *table= table_list->table;
+ bool no_trans_update_stmt;
+ CHARSET_INFO *cs= read_info.read_charset;
+ DBUG_ENTER("read_xml_field");
+
+ no_trans_update_stmt= !table->file->has_transactions();
+
+ for ( ; ; it.rewind())
+ {
+ if (thd->killed)
+ {
+ thd->send_kill_message();
+ DBUG_RETURN(1);
+ }
+
+ // read row tag and save values into tag list
+ if (read_info.read_xml())
+ break;
+
+ List_iterator_fast<XML_TAG> xmlit(read_info.taglist);
+ xmlit.rewind();
+ XML_TAG *tag= NULL;
+
+#ifndef DBUG_OFF
+ DBUG_PRINT("read_xml_field", ("skip_lines=%d", (int) skip_lines));
+ while ((tag= xmlit++))
+ {
+ DBUG_PRINT("read_xml_field", ("got tag:%i '%s' '%s'",
+ tag->level, tag->field.c_ptr(),
+ tag->value.c_ptr()));
+ }
+#endif
+
+ restore_record(table, s->default_values);
+
+ while ((item= it++))
+ {
+ /* If this line is to be skipped we don't want to fill field or var */
+ if (skip_lines)
+ continue;
+
+ /* find field in tag list */
+ xmlit.rewind();
+ tag= xmlit++;
+
+ while(tag && strcmp(tag->field.c_ptr(), item->name) != 0)
+ tag= xmlit++;
+
+ if (!tag) // found null
+ {
+ if (item->type() == Item::FIELD_ITEM)
+ {
+ Field *field= ((Item_field *) item)->field;
+ field->reset();
+ field->set_null();
+ if (field == table->next_number_field)
+ table->auto_increment_field_not_null= TRUE;
+ if (!field->maybe_null())
+ {
+ if (field->type() == FIELD_TYPE_TIMESTAMP)
+ ((Field_timestamp *) field)->set_time();
+ else if (field != table->next_number_field)
+ field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_WARN_NULL_TO_NOTNULL, 1);
+ }
+ }
+ else
+ ((Item_user_var_as_out_param *) item)->set_null_value(cs);
+ continue;
+ }
+
+ if (item->type() == Item::FIELD_ITEM)
+ {
+
+ Field *field= ((Item_field *)item)->field;
+ field->set_notnull();
+ if (field == table->next_number_field)
+ table->auto_increment_field_not_null= TRUE;
+ field->store((char *) tag->value.ptr(), tag->value.length(), cs);
+ }
+ else
+ ((Item_user_var_as_out_param *) item)->set_value(
+ (char *) tag->value.ptr(),
+ tag->value.length(), cs);
+ }
+
+ if (read_info.error)
+ break;
+
+ if (skip_lines)
+ {
+ skip_lines--;
+ continue;
+ }
+
+ if (item)
+ {
+ /* Have not read any field, thus input file is simply ended */
+ if (item == fields_vars.head())
+ break;
+
+ for ( ; item; item= it++)
+ {
+ if (item->type() == Item::FIELD_ITEM)
+ {
+ /*
+ QQ: We probably should not throw warning for each field.
+ But how about intention to always have the same number
+ of warnings in THD::cuted_fields (and get rid of cuted_fields
+ in the end ?)
+ */
+ thd->cuted_fields++;
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_WARN_TOO_FEW_RECORDS,
+ ER(ER_WARN_TOO_FEW_RECORDS),
+ thd->warning_info->current_row_for_warning());
+ }
+ else
+ ((Item_user_var_as_out_param *)item)->set_null_value(cs);
+ }
+ }
+
+ if (thd->killed ||
+ fill_record_n_invoke_before_triggers(thd, set_fields, set_values,
+ ignore_check_option_errors,
+ table->triggers,
+ TRG_EVENT_INSERT))
+ DBUG_RETURN(1);
+
+ switch (table_list->view_check_option(thd,
+ ignore_check_option_errors)) {
+ case VIEW_CHECK_SKIP:
+ read_info.next_line();
+ goto continue_loop;
+ case VIEW_CHECK_ERROR:
+ DBUG_RETURN(-1);
+ }
+
+ if (write_record(thd, table, &info))
+ DBUG_RETURN(1);
+
+ /*
+ We don't need to reset auto-increment field since we are restoring
+ its default value at the beginning of each loop iteration.
+ */
+ thd->transaction.stmt.modified_non_trans_table= no_trans_update_stmt;
+ thd->warning_info->inc_current_row_for_warning();
+ continue_loop:;
+ }
+ DBUG_RETURN(test(read_info.error) || thd->is_error());
+} /* load xml end */
+
+
/* Unescape all escape characters, mark \N as null */
char
@@ -1062,6 +1269,7 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs,
field_term_length= field_term.length();
line_term_ptr=(char*) line_term.ptr();
line_term_length= line_term.length();
+ level= 0; /* for load xml */
if (line_start.length() == 0)
{
line_start_ptr=0;
@@ -1137,6 +1345,10 @@ READ_INFO::~READ_INFO()
my_free((uchar*) buffer,MYF(0));
error=1;
}
+ List_iterator<XML_TAG> xmlit(taglist);
+ XML_TAG *t;
+ while ((t= xmlit++))
+ delete(t);
}
@@ -1469,3 +1681,319 @@ bool READ_INFO::find_start_of_fields()
}
return 0;
}
+
+
+/*
+ Clear taglist from tags with a specified level
+*/
+int READ_INFO::clear_level(int level)
+{
+ DBUG_ENTER("READ_INFO::read_xml clear_level");
+ List_iterator<XML_TAG> xmlit(taglist);
+ xmlit.rewind();
+ XML_TAG *tag;
+
+ while ((tag= xmlit++))
+ {
+ if(tag->level >= level)
+ {
+ xmlit.remove();
+ delete tag;
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Convert an XML entity to Unicode value.
+ Return -1 on error;
+*/
+static int
+my_xml_entity_to_char(const char *name, uint length)
+{
+ if (length == 2)
+ {
+ if (!memcmp(name, "gt", length))
+ return '>';
+ if (!memcmp(name, "lt", length))
+ return '<';
+ }
+ else if (length == 3)
+ {
+ if (!memcmp(name, "amp", length))
+ return '&';
+ }
+ else if (length == 4)
+ {
+ if (!memcmp(name, "quot", length))
+ return '"';
+ if (!memcmp(name, "apos", length))
+ return '\'';
+ }
+ return -1;
+}
+
+
+/**
+ @brief Convert newline, linefeed, tab to space
+
+ @param chr character
+
+ @details According to the "XML 1.0" standard,
+ only space (#x20) characters, carriage returns,
+ line feeds or tabs are considered as spaces.
+ Convert all of them to space (#x20) for parsing simplicity.
+*/
+static int
+my_tospace(int chr)
+{
+ return (chr == '\t' || chr == '\r' || chr == '\n') ? ' ' : chr;
+}
+
+
+/*
+ Read an xml value: handle multibyte and xml escape
+*/
+int READ_INFO::read_value(int delim, String *val)
+{
+ int chr;
+ String tmp;
+
+ for (chr= GET; my_tospace(chr) != delim && chr != my_b_EOF;)
+ {
+#ifdef USE_MB
+ if (my_mbcharlen(read_charset, chr) > 1)
+ {
+ DBUG_PRINT("read_xml",("multi byte"));
+ int i, ml= my_mbcharlen(read_charset, chr);
+ for (i= 1; i < ml; i++)
+ {
+ val->append(chr);
+ /*
+ Don't use my_tospace() in the middle of a multi-byte character
+ TODO: check that the multi-byte sequence is valid.
+ */
+ chr= GET;
+ if (chr == my_b_EOF)
+ return chr;
+ }
+ }
+#endif
+ if(chr == '&')
+ {
+ tmp.length(0);
+ for (chr= my_tospace(GET) ; chr != ';' ; chr= my_tospace(GET))
+ {
+ if (chr == my_b_EOF)
+ return chr;
+ tmp.append(chr);
+ }
+ if ((chr= my_xml_entity_to_char(tmp.ptr(), tmp.length())) >= 0)
+ val->append(chr);
+ else
+ {
+ val->append('&');
+ val->append(tmp);
+ val->append(';');
+ }
+ }
+ else
+ val->append(chr);
+ chr= GET;
+ }
+ return my_tospace(chr);
+}
+
+
+/*
+ Read a record in xml format
+ tags and attributes are stored in taglist
+ when tag set in ROWS IDENTIFIED BY is closed, we are ready and return
+*/
+int READ_INFO::read_xml()
+{
+ DBUG_ENTER("READ_INFO::read_xml");
+ int chr, chr2, chr3;
+ int delim= 0;
+ String tag, attribute, value;
+ bool in_tag= false;
+
+ tag.length(0);
+ attribute.length(0);
+ value.length(0);
+
+ for (chr= my_tospace(GET); chr != my_b_EOF ; )
+ {
+ switch(chr){
+ case '<': /* read tag */
+ /* TODO: check if this is a comment <!-- comment --> */
+ chr= my_tospace(GET);
+ if(chr == '!')
+ {
+ chr2= GET;
+ chr3= GET;
+
+ if(chr2 == '-' && chr3 == '-')
+ {
+ chr2= 0;
+ chr3= 0;
+ chr= my_tospace(GET);
+
+ while(chr != '>' || chr2 != '-' || chr3 != '-')
+ {
+ if(chr == '-')
+ {
+ chr3= chr2;
+ chr2= chr;
+ }
+ else if (chr2 == '-')
+ {
+ chr2= 0;
+ chr3= 0;
+ }
+ chr= my_tospace(GET);
+ if (chr == my_b_EOF)
+ goto found_eof;
+ }
+ break;
+ }
+ }
+
+ tag.length(0);
+ while(chr != '>' && chr != ' ' && chr != '/' && chr != my_b_EOF)
+ {
+ if(chr != delim) /* fix for the '<field name =' format */
+ tag.append(chr);
+ chr= my_tospace(GET);
+ }
+
+ // row tag should be in ROWS IDENTIFIED BY '<row>' - stored in line_term
+ if((tag.length() == line_term_length -2) &&
+ (strncmp(tag.c_ptr_safe(), line_term_ptr + 1, tag.length()) == 0))
+ {
+ DBUG_PRINT("read_xml", ("start-of-row: %i %s %s",
+ level,tag.c_ptr_safe(), line_term_ptr));
+ }
+
+ if(chr == ' ' || chr == '>')
+ {
+ level++;
+ clear_level(level + 1);
+ }
+
+ if (chr == ' ')
+ in_tag= true;
+ else
+ in_tag= false;
+ break;
+
+ case ' ': /* read attribute */
+ while(chr == ' ') /* skip blanks */
+ chr= my_tospace(GET);
+
+ if(!in_tag)
+ break;
+
+ while(chr != '=' && chr != '/' && chr != '>' && chr != my_b_EOF)
+ {
+ attribute.append(chr);
+ chr= my_tospace(GET);
+ }
+ break;
+
+ case '>': /* end tag - read tag value */
+ in_tag= false;
+ chr= read_value('<', &value);
+ if(chr == my_b_EOF)
+ goto found_eof;
+
+ /* save value to list */
+ if(tag.length() > 0 && value.length() > 0)
+ {
+ DBUG_PRINT("read_xml", ("lev:%i tag:%s val:%s",
+ level,tag.c_ptr_safe(), value.c_ptr_safe()));
+ taglist.push_front( new XML_TAG(level, tag, value));
+ }
+ tag.length(0);
+ value.length(0);
+ attribute.length(0);
+ break;
+
+ case '/': /* close tag */
+ level--;
+ chr= my_tospace(GET);
+ if(chr != '>') /* if this is an empty tag <tag /> */
+ tag.length(0); /* we should keep tag value */
+ while(chr != '>' && chr != my_b_EOF)
+ {
+ tag.append(chr);
+ chr= my_tospace(GET);
+ }
+
+ if((tag.length() == line_term_length -2) &&
+ (strncmp(tag.c_ptr_safe(), line_term_ptr + 1, tag.length()) == 0))
+ {
+ DBUG_PRINT("read_xml", ("found end-of-row %i %s",
+ level, tag.c_ptr_safe()));
+ DBUG_RETURN(0); //normal return
+ }
+ chr= my_tospace(GET);
+ break;
+
+ case '=': /* attribute name end - read the value */
+ //check for tag field and attribute name
+ if(!memcmp(tag.c_ptr_safe(), STRING_WITH_LEN("field")) &&
+ !memcmp(attribute.c_ptr_safe(), STRING_WITH_LEN("name")))
+ {
+ /*
+ this is format <field name="xx">xx</field>
+ where actual fieldname is in attribute
+ */
+ delim= my_tospace(GET);
+ tag.length(0);
+ attribute.length(0);
+ chr= '<'; /* we pretend that it is a tag */
+ level--;
+ break;
+ }
+
+ //check for " or '
+ chr= GET;
+ if (chr == my_b_EOF)
+ goto found_eof;
+ if(chr == '"' || chr == '\'')
+ {
+ delim= chr;
+ }
+ else
+ {
+ delim= ' '; /* no delimiter, use space */
+ PUSH(chr);
+ }
+
+ chr= read_value(delim, &value);
+ if(attribute.length() > 0 && value.length() > 0)
+ {
+ DBUG_PRINT("read_xml", ("lev:%i att:%s val:%s\n",
+ level + 1,
+ attribute.c_ptr_safe(),
+ value.c_ptr_safe()));
+ taglist.push_front(new XML_TAG(level + 1, attribute, value));
+ }
+ attribute.length(0);
+ value.length(0);
+ if (chr != ' ')
+ chr= my_tospace(GET);
+ break;
+
+ default:
+ chr= my_tospace(GET);
+ } /* end switch */
+ } /* end while */
+
+found_eof:
+ DBUG_PRINT("read_xml",("Found eof"));
+ eof= 1;
+ DBUG_RETURN(1);
+}
diff --git a/sql/sql_locale.cc b/sql/sql_locale.cc
index 5ddf65cd1b7..9b1bb67ca06 100644
--- a/sql/sql_locale.cc
+++ b/sql/sql_locale.cc
@@ -23,6 +23,43 @@
#include "mysql_priv.h"
+enum err_msgs_index
+{
+ en_US= 0, cs_CZ, da_DK, nl_NL, et_EE, fr_FR, de_DE, el_GR, hu_HU, it_IT,
+ ja_JP, ko_KR, no_NO, nn_NO, pl_PL, pt_PT, ro_RO, ru_RU, sr_RS, sk_SK,
+ es_ES, sv_SE, uk_UA
+} ERR_MSGS_INDEX;
+
+
+MY_LOCALE_ERRMSGS global_errmsgs[]=
+{
+ {"english", NULL},
+ {"czech", NULL},
+ {"danish", NULL},
+ {"dutch", NULL},
+ {"estonian", NULL},
+ {"french", NULL},
+ {"german", NULL},
+ {"greek", NULL},
+ {"hungarian", NULL},
+ {"italian", NULL},
+ {"japanese", NULL},
+ {"korean", NULL},
+ {"norwegian", NULL},
+ {"norwegian-ny", NULL},
+ {"polish", NULL},
+ {"portuguese", NULL},
+ {"romanian", NULL},
+ {"russian", NULL},
+ {"serbian", NULL},
+ {"slovak", NULL},
+ {"spanish", NULL},
+ {"swedish", NULL},
+ {"ukrainian", NULL},
+ {NULL, NULL}
+};
+
+
/***** LOCALE BEGIN ar_AE: Arabic - United Arab Emirates *****/
static const char *my_locale_month_names_ar_AE[13] =
{"يناير","Ùبراير","مارس","أبريل","مايو","يونيو","يوليو","أغسطس","سبتمبر","أكتوبر","نوÙمبر","ديسمبر", NullS };
@@ -51,7 +88,11 @@ MY_LOCALE my_locale_ar_AE
&my_locale_typelib_day_names_ar_AE,
&my_locale_typelib_ab_day_names_ar_AE,
6,
- 8
+ 8,
+ '.', /* decimal point ar_AE */
+ ',', /* thousands_sep ar_AE */
+ "\x03", /* grouping ar_AE */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ar_AE *****/
@@ -83,7 +124,11 @@ MY_LOCALE my_locale_ar_BH
&my_locale_typelib_day_names_ar_BH,
&my_locale_typelib_ab_day_names_ar_BH,
6,
- 8
+ 8,
+ '.', /* decimal point ar_BH */
+ ',', /* thousands_sep ar_BH */
+ "\x03", /* grouping ar_BH */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ar_BH *****/
@@ -115,7 +160,11 @@ MY_LOCALE my_locale_ar_JO
&my_locale_typelib_day_names_ar_JO,
&my_locale_typelib_ab_day_names_ar_JO,
12,
- 8
+ 8,
+ '.', /* decimal point ar_JO */
+ ',', /* thousands_sep ar_JO */
+ "\x03", /* grouping ar_JO */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ar_JO *****/
@@ -147,7 +196,11 @@ MY_LOCALE my_locale_ar_SA
&my_locale_typelib_day_names_ar_SA,
&my_locale_typelib_ab_day_names_ar_SA,
12,
- 8
+ 8,
+ '.', /* decimal point ar_SA */
+ '\0', /* thousands_sep ar_SA */
+ "\x80", /* grouping ar_SA */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ar_SA *****/
@@ -179,7 +232,11 @@ MY_LOCALE my_locale_ar_SY
&my_locale_typelib_day_names_ar_SY,
&my_locale_typelib_ab_day_names_ar_SY,
12,
- 8
+ 8,
+ '.', /* decimal point ar_SY */
+ ',', /* thousands_sep ar_SY */
+ "\x03", /* grouping ar_SY */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ar_SY *****/
@@ -211,7 +268,11 @@ MY_LOCALE my_locale_be_BY
&my_locale_typelib_day_names_be_BY,
&my_locale_typelib_ab_day_names_be_BY,
10,
- 10
+ 10,
+ ',', /* decimal point be_BY */
+ '.', /* thousands_sep be_BY */
+ "\x03\x03", /* grouping be_BY */
+ &global_errmsgs[en_US]
);
/***** LOCALE END be_BY *****/
@@ -243,7 +304,11 @@ MY_LOCALE my_locale_bg_BG
&my_locale_typelib_day_names_bg_BG,
&my_locale_typelib_ab_day_names_bg_BG,
9,
- 10
+ 10,
+ ',', /* decimal point bg_BG */
+ '\0', /* thousands_sep bg_BG */
+ "\x03\x03", /* grouping bg_BG */
+ &global_errmsgs[en_US]
);
/***** LOCALE END bg_BG *****/
@@ -275,7 +340,11 @@ MY_LOCALE my_locale_ca_ES
&my_locale_typelib_day_names_ca_ES,
&my_locale_typelib_ab_day_names_ca_ES,
8,
- 9
+ 9,
+ ',', /* decimal point ca_ES */
+ '\0', /* thousands_sep ca_ES */
+ "\x80\x80", /* grouping ca_ES */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ca_ES *****/
@@ -307,7 +376,11 @@ MY_LOCALE my_locale_cs_CZ
&my_locale_typelib_day_names_cs_CZ,
&my_locale_typelib_ab_day_names_cs_CZ,
8,
- 7
+ 7,
+ ',', /* decimal point cs_CZ */
+ ' ', /* thousands_sep cs_CZ */
+ "\x03\x03", /* grouping cs_CZ */
+ &global_errmsgs[cs_CZ]
);
/***** LOCALE END cs_CZ *****/
@@ -339,7 +412,11 @@ MY_LOCALE my_locale_da_DK
&my_locale_typelib_day_names_da_DK,
&my_locale_typelib_ab_day_names_da_DK,
9,
- 7
+ 7,
+ ',', /* decimal point da_DK */
+ '.', /* thousands_sep da_DK */
+ "\x03\x03", /* grouping da_DK */
+ &global_errmsgs[da_DK]
);
/***** LOCALE END da_DK *****/
@@ -371,7 +448,11 @@ MY_LOCALE my_locale_de_AT
&my_locale_typelib_day_names_de_AT,
&my_locale_typelib_ab_day_names_de_AT,
9,
- 10
+ 10,
+ ',', /* decimal point de_AT */
+ '\0', /* thousands_sep de_AT */
+ "\x80\x80", /* grouping de_AT */
+ &global_errmsgs[de_DE]
);
/***** LOCALE END de_AT *****/
@@ -403,7 +484,11 @@ MY_LOCALE my_locale_de_DE
&my_locale_typelib_day_names_de_DE,
&my_locale_typelib_ab_day_names_de_DE,
9,
- 10
+ 10,
+ ',', /* decimal point de_DE */
+ '.', /* thousands_sep de_DE */
+ "\x03\x03", /* grouping de_DE */
+ &global_errmsgs[de_DE]
);
/***** LOCALE END de_DE *****/
@@ -435,7 +520,11 @@ MY_LOCALE my_locale_en_US
&my_locale_typelib_day_names_en_US,
&my_locale_typelib_ab_day_names_en_US,
9,
- 9
+ 9,
+ '.', /* decimal point en_US */
+ ',', /* thousands_sep en_US */
+ "\x03\x03", /* grouping en_US */
+ &global_errmsgs[en_US]
);
/***** LOCALE END en_US *****/
@@ -467,7 +556,11 @@ MY_LOCALE my_locale_es_ES
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ ',', /* decimal point es_ES */
+ '\0', /* thousands_sep es_ES */
+ "\x80\x80", /* grouping es_ES */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_ES *****/
@@ -499,7 +592,11 @@ MY_LOCALE my_locale_et_EE
&my_locale_typelib_day_names_et_EE,
&my_locale_typelib_ab_day_names_et_EE,
9,
- 9
+ 9,
+ ',', /* decimal point et_EE */
+ ' ', /* thousands_sep et_EE */
+ "\x03\x03", /* grouping et_EE */
+ &global_errmsgs[et_EE]
);
/***** LOCALE END et_EE *****/
@@ -531,7 +628,11 @@ MY_LOCALE my_locale_eu_ES
&my_locale_typelib_day_names_eu_ES,
&my_locale_typelib_ab_day_names_eu_ES,
9,
- 10
+ 10,
+ ',', /* decimal point eu_ES */
+ '\0', /* thousands_sep eu_ES */
+ "\x80\x80", /* grouping eu_ES */
+ &global_errmsgs[en_US]
);
/***** LOCALE END eu_ES *****/
@@ -563,7 +664,11 @@ MY_LOCALE my_locale_fi_FI
&my_locale_typelib_day_names_fi_FI,
&my_locale_typelib_ab_day_names_fi_FI,
9,
- 11
+ 11,
+ ',', /* decimal point fi_FI */
+ ' ', /* thousands_sep fi_FI */
+ "\x03\x03", /* grouping fi_FI */
+ &global_errmsgs[en_US]
);
/***** LOCALE END fi_FI *****/
@@ -595,7 +700,11 @@ MY_LOCALE my_locale_fo_FO
&my_locale_typelib_day_names_fo_FO,
&my_locale_typelib_ab_day_names_fo_FO,
9,
- 12
+ 12,
+ ',', /* decimal point fo_FO */
+ '.', /* thousands_sep fo_FO */
+ "\x03\x03", /* grouping fo_FO */
+ &global_errmsgs[en_US]
);
/***** LOCALE END fo_FO *****/
@@ -627,7 +736,11 @@ MY_LOCALE my_locale_fr_FR
&my_locale_typelib_day_names_fr_FR,
&my_locale_typelib_ab_day_names_fr_FR,
9,
- 8
+ 8,
+ ',', /* decimal point fr_FR */
+ '\0', /* thousands_sep fr_FR */
+ "\x80\x80", /* grouping fr_FR */
+ &global_errmsgs[fr_FR]
);
/***** LOCALE END fr_FR *****/
@@ -659,7 +772,11 @@ MY_LOCALE my_locale_gl_ES
&my_locale_typelib_day_names_gl_ES,
&my_locale_typelib_ab_day_names_gl_ES,
8,
- 8
+ 8,
+ ',', /* decimal point gl_ES */
+ '\0', /* thousands_sep gl_ES */
+ "\x80\x80", /* grouping gl_ES */
+ &global_errmsgs[en_US]
);
/***** LOCALE END gl_ES *****/
@@ -691,7 +808,11 @@ MY_LOCALE my_locale_gu_IN
&my_locale_typelib_day_names_gu_IN,
&my_locale_typelib_ab_day_names_gu_IN,
10,
- 8
+ 8,
+ '.', /* decimal point gu_IN */
+ ',', /* thousands_sep gu_IN */
+ "\x03", /* grouping gu_IN */
+ &global_errmsgs[en_US]
);
/***** LOCALE END gu_IN *****/
@@ -723,7 +844,11 @@ MY_LOCALE my_locale_he_IL
&my_locale_typelib_day_names_he_IL,
&my_locale_typelib_ab_day_names_he_IL,
7,
- 5
+ 5,
+ '.', /* decimal point he_IL */
+ ',', /* thousands_sep he_IL */
+ "\x03\x03", /* grouping he_IL */
+ &global_errmsgs[en_US]
);
/***** LOCALE END he_IL *****/
@@ -755,7 +880,11 @@ MY_LOCALE my_locale_hi_IN
&my_locale_typelib_day_names_hi_IN,
&my_locale_typelib_ab_day_names_hi_IN,
7,
- 9
+ 9,
+ '.', /* decimal point hi_IN */
+ ',', /* thousands_sep hi_IN */
+ "\x03", /* grouping hi_IN */
+ &global_errmsgs[en_US]
);
/***** LOCALE END hi_IN *****/
@@ -787,7 +916,11 @@ MY_LOCALE my_locale_hr_HR
&my_locale_typelib_day_names_hr_HR,
&my_locale_typelib_ab_day_names_hr_HR,
8,
- 11
+ 11,
+ ',', /* decimal point hr_HR */
+ '\0', /* thousands_sep hr_HR */
+ "\x80\x80", /* grouping hr_HR */
+ &global_errmsgs[en_US]
);
/***** LOCALE END hr_HR *****/
@@ -819,7 +952,11 @@ MY_LOCALE my_locale_hu_HU
&my_locale_typelib_day_names_hu_HU,
&my_locale_typelib_ab_day_names_hu_HU,
10,
- 9
+ 9,
+ ',', /* decimal point hu_HU */
+ '.', /* thousands_sep hu_HU */
+ "\x03\x03", /* grouping hu_HU */
+ &global_errmsgs[hu_HU]
);
/***** LOCALE END hu_HU *****/
@@ -851,7 +988,11 @@ MY_LOCALE my_locale_id_ID
&my_locale_typelib_day_names_id_ID,
&my_locale_typelib_ab_day_names_id_ID,
9,
- 6
+ 6,
+ ',', /* decimal point id_ID */
+ '.', /* thousands_sep id_ID */
+ "\x03\x03", /* grouping id_ID */
+ &global_errmsgs[en_US]
);
/***** LOCALE END id_ID *****/
@@ -883,7 +1024,11 @@ MY_LOCALE my_locale_is_IS
&my_locale_typelib_day_names_is_IS,
&my_locale_typelib_ab_day_names_is_IS,
9,
- 12
+ 12,
+ ',', /* decimal point is_IS */
+ '.', /* thousands_sep is_IS */
+ "\x03\x03", /* grouping is_IS */
+ &global_errmsgs[en_US]
);
/***** LOCALE END is_IS *****/
@@ -915,7 +1060,11 @@ MY_LOCALE my_locale_it_CH
&my_locale_typelib_day_names_it_CH,
&my_locale_typelib_ab_day_names_it_CH,
9,
- 9
+ 9,
+ ',', /* decimal point it_CH */
+ '\'', /* thousands_sep it_CH */
+ "\x03\x03", /* grouping it_CH */
+ &global_errmsgs[it_IT]
);
/***** LOCALE END it_CH *****/
@@ -947,7 +1096,11 @@ MY_LOCALE my_locale_ja_JP
&my_locale_typelib_day_names_ja_JP,
&my_locale_typelib_ab_day_names_ja_JP,
3,
- 3
+ 3,
+ '.', /* decimal point ja_JP */
+ ',', /* thousands_sep ja_JP */
+ "\x03", /* grouping ja_JP */
+ &global_errmsgs[ja_JP]
);
/***** LOCALE END ja_JP *****/
@@ -979,7 +1132,11 @@ MY_LOCALE my_locale_ko_KR
&my_locale_typelib_day_names_ko_KR,
&my_locale_typelib_ab_day_names_ko_KR,
3,
- 3
+ 3,
+ '.', /* decimal point ko_KR */
+ ',', /* thousands_sep ko_KR */
+ "\x03\x03", /* grouping ko_KR */
+ &global_errmsgs[ko_KR]
);
/***** LOCALE END ko_KR *****/
@@ -1011,7 +1168,11 @@ MY_LOCALE my_locale_lt_LT
&my_locale_typelib_day_names_lt_LT,
&my_locale_typelib_ab_day_names_lt_LT,
9,
- 14
+ 14,
+ ',', /* decimal point lt_LT */
+ '.', /* thousands_sep lt_LT */
+ "\x03\x03", /* grouping lt_LT */
+ &global_errmsgs[en_US]
);
/***** LOCALE END lt_LT *****/
@@ -1043,7 +1204,11 @@ MY_LOCALE my_locale_lv_LV
&my_locale_typelib_day_names_lv_LV,
&my_locale_typelib_ab_day_names_lv_LV,
10,
- 11
+ 11,
+ ',', /* decimal point lv_LV */
+ ' ', /* thousands_sep lv_LV */
+ "\x03\x03", /* grouping lv_LV */
+ &global_errmsgs[en_US]
);
/***** LOCALE END lv_LV *****/
@@ -1075,7 +1240,11 @@ MY_LOCALE my_locale_mk_MK
&my_locale_typelib_day_names_mk_MK,
&my_locale_typelib_ab_day_names_mk_MK,
9,
- 10
+ 10,
+ ',', /* decimal point mk_MK */
+ ' ', /* thousands_sep mk_MK */
+ "\x03\x03", /* grouping mk_MK */
+ &global_errmsgs[en_US]
);
/***** LOCALE END mk_MK *****/
@@ -1107,7 +1276,11 @@ MY_LOCALE my_locale_mn_MN
&my_locale_typelib_day_names_mn_MN,
&my_locale_typelib_ab_day_names_mn_MN,
18,
- 6
+ 6,
+ ',', /* decimal point mn_MN */
+ '.', /* thousands_sep mn_MN */
+ "\x03\x03", /* grouping mn_MN */
+ &global_errmsgs[en_US]
);
/***** LOCALE END mn_MN *****/
@@ -1139,7 +1312,11 @@ MY_LOCALE my_locale_ms_MY
&my_locale_typelib_day_names_ms_MY,
&my_locale_typelib_ab_day_names_ms_MY,
9,
- 6
+ 6,
+ '.', /* decimal point ms_MY */
+ ',', /* thousands_sep ms_MY */
+ "\x03", /* grouping ms_MY */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ms_MY *****/
@@ -1171,7 +1348,11 @@ MY_LOCALE my_locale_nb_NO
&my_locale_typelib_day_names_nb_NO,
&my_locale_typelib_ab_day_names_nb_NO,
9,
- 7
+ 7,
+ ',', /* decimal point nb_NO */
+ '.', /* thousands_sep nb_NO */
+ "\x03\x03", /* grouping nb_NO */
+ &global_errmsgs[no_NO]
);
/***** LOCALE END nb_NO *****/
@@ -1203,7 +1384,11 @@ MY_LOCALE my_locale_nl_NL
&my_locale_typelib_day_names_nl_NL,
&my_locale_typelib_ab_day_names_nl_NL,
9,
- 9
+ 9,
+ ',', /* decimal point nl_NL */
+ '\0', /* thousands_sep nl_NL */
+ "\x80\x80", /* grouping nl_NL */
+ &global_errmsgs[nl_NL]
);
/***** LOCALE END nl_NL *****/
@@ -1235,7 +1420,11 @@ MY_LOCALE my_locale_pl_PL
&my_locale_typelib_day_names_pl_PL,
&my_locale_typelib_ab_day_names_pl_PL,
11,
- 12
+ 12,
+ ',', /* decimal point pl_PL */
+ '\0', /* thousands_sep pl_PL */
+ "\x80\x80", /* grouping pl_PL */
+ &global_errmsgs[pl_PL]
);
/***** LOCALE END pl_PL *****/
@@ -1267,7 +1456,11 @@ MY_LOCALE my_locale_pt_BR
&my_locale_typelib_day_names_pt_BR,
&my_locale_typelib_ab_day_names_pt_BR,
9,
- 7
+ 7,
+ ',', /* decimal point pt_BR */
+ '\0', /* thousands_sep pt_BR */
+ "\x80\x80", /* grouping pt_BR */
+ &global_errmsgs[pt_PT]
);
/***** LOCALE END pt_BR *****/
@@ -1299,7 +1492,11 @@ MY_LOCALE my_locale_pt_PT
&my_locale_typelib_day_names_pt_PT,
&my_locale_typelib_ab_day_names_pt_PT,
9,
- 7
+ 7,
+ ',', /* decimal point pt_PT */
+ '\0', /* thousands_sep pt_PT */
+ "\x80\x80", /* grouping pt_PT */
+ &global_errmsgs[pt_PT]
);
/***** LOCALE END pt_PT *****/
@@ -1331,7 +1528,11 @@ MY_LOCALE my_locale_ro_RO
&my_locale_typelib_day_names_ro_RO,
&my_locale_typelib_ab_day_names_ro_RO,
10,
- 8
+ 8,
+ ',', /* decimal point ro_RO */
+ '.', /* thousands_sep ro_RO */
+ "\x03\x03", /* grouping ro_RO */
+ &global_errmsgs[ro_RO]
);
/***** LOCALE END ro_RO *****/
@@ -1363,7 +1564,11 @@ MY_LOCALE my_locale_ru_RU
&my_locale_typelib_day_names_ru_RU,
&my_locale_typelib_ab_day_names_ru_RU,
8,
- 11
+ 11,
+ ',', /* decimal point ru_RU */
+ ' ', /* thousands_sep ru_RU */
+ "\x03\x03", /* grouping ru_RU */
+ &global_errmsgs[ru_RU]
);
/***** LOCALE END ru_RU *****/
@@ -1395,7 +1600,11 @@ MY_LOCALE my_locale_ru_UA
&my_locale_typelib_day_names_ru_UA,
&my_locale_typelib_ab_day_names_ru_UA,
8,
- 11
+ 11,
+ ',', /* decimal point ru_UA */
+ '.', /* thousands_sep ru_UA */
+ "\x03\x03", /* grouping ru_UA */
+ &global_errmsgs[ru_RU]
);
/***** LOCALE END ru_UA *****/
@@ -1427,7 +1636,11 @@ MY_LOCALE my_locale_sk_SK
&my_locale_typelib_day_names_sk_SK,
&my_locale_typelib_ab_day_names_sk_SK,
9,
- 8
+ 8,
+ ',', /* decimal point sk_SK */
+ ' ', /* thousands_sep sk_SK */
+ "\x03\x03", /* grouping sk_SK */
+ &global_errmsgs[sk_SK]
);
/***** LOCALE END sk_SK *****/
@@ -1459,7 +1672,11 @@ MY_LOCALE my_locale_sl_SI
&my_locale_typelib_day_names_sl_SI,
&my_locale_typelib_ab_day_names_sl_SI,
9,
- 10
+ 10,
+ ',', /* decimal point sl_SI */
+ ' ', /* thousands_sep sl_SI */
+ "\x80\x80", /* grouping sl_SI */
+ &global_errmsgs[en_US]
);
/***** LOCALE END sl_SI *****/
@@ -1491,41 +1708,67 @@ MY_LOCALE my_locale_sq_AL
&my_locale_typelib_day_names_sq_AL,
&my_locale_typelib_ab_day_names_sq_AL,
7,
- 10
+ 10,
+ ',', /* decimal point sq_AL */
+ '.', /* thousands_sep sq_AL */
+ "\x03", /* grouping sq_AL */
+ &global_errmsgs[en_US]
);
/***** LOCALE END sq_AL *****/
-/***** LOCALE BEGIN sr_YU: Servian - Yugoslavia *****/
-static const char *my_locale_month_names_sr_YU[13] =
+/***** LOCALE BEGIN sr_RS: Serbian - Serbia *****/
+static const char *my_locale_month_names_sr_RS[13] =
{"januar","februar","mart","april","maj","juni","juli","avgust","septembar","oktobar","novembar","decembar", NullS };
-static const char *my_locale_ab_month_names_sr_YU[13] =
+static const char *my_locale_ab_month_names_sr_RS[13] =
{"jan","feb","mar","apr","maj","jun","jul","avg","sep","okt","nov","dec", NullS };
-static const char *my_locale_day_names_sr_YU[8] =
+static const char *my_locale_day_names_sr_RS[8] =
{"ponedeljak","utorak","sreda","Äetvrtak","petak","subota","nedelja", NullS };
-static const char *my_locale_ab_day_names_sr_YU[8] =
+static const char *my_locale_ab_day_names_sr_RS[8] =
{"pon","uto","sre","Äet","pet","sub","ned", NullS };
-static TYPELIB my_locale_typelib_month_names_sr_YU =
- { array_elements(my_locale_month_names_sr_YU)-1, "", my_locale_month_names_sr_YU, NULL };
-static TYPELIB my_locale_typelib_ab_month_names_sr_YU =
- { array_elements(my_locale_ab_month_names_sr_YU)-1, "", my_locale_ab_month_names_sr_YU, NULL };
-static TYPELIB my_locale_typelib_day_names_sr_YU =
- { array_elements(my_locale_day_names_sr_YU)-1, "", my_locale_day_names_sr_YU, NULL };
-static TYPELIB my_locale_typelib_ab_day_names_sr_YU =
- { array_elements(my_locale_ab_day_names_sr_YU)-1, "", my_locale_ab_day_names_sr_YU, NULL };
-MY_LOCALE my_locale_sr_YU
+static TYPELIB my_locale_typelib_month_names_sr_RS =
+ { array_elements(my_locale_month_names_sr_RS)-1, "", my_locale_month_names_sr_RS, NULL };
+static TYPELIB my_locale_typelib_ab_month_names_sr_RS =
+ { array_elements(my_locale_ab_month_names_sr_RS)-1, "", my_locale_ab_month_names_sr_RS, NULL };
+static TYPELIB my_locale_typelib_day_names_sr_RS =
+ { array_elements(my_locale_day_names_sr_RS)-1, "", my_locale_day_names_sr_RS, NULL };
+static TYPELIB my_locale_typelib_ab_day_names_sr_RS =
+ { array_elements(my_locale_ab_day_names_sr_RS)-1, "", my_locale_ab_day_names_sr_RS, NULL };
+MY_LOCALE my_locale_sr_YU /* Deprecated, use sr_RS instead */
(
48,
"sr_YU",
- "Servian - Yugoslavia",
+ "Serbian - Yugoslavia",
FALSE,
- &my_locale_typelib_month_names_sr_YU,
- &my_locale_typelib_ab_month_names_sr_YU,
- &my_locale_typelib_day_names_sr_YU,
- &my_locale_typelib_ab_day_names_sr_YU,
+ &my_locale_typelib_month_names_sr_RS,
+ &my_locale_typelib_ab_month_names_sr_RS,
+ &my_locale_typelib_day_names_sr_RS,
+ &my_locale_typelib_ab_day_names_sr_RS,
9,
- 10
+ 10,
+ '.', /* decimal point sr_RS */
+ '\0', /* thousands_sep sr_RS */
+ "\x80", /* grouping sr_RS */
+ &global_errmsgs[sr_RS]
);
-/***** LOCALE END sr_YU *****/
+
+MY_LOCALE my_locale_sr_RS
+(
+ 48,
+ "sr_RS",
+ "Serbian - Serbia",
+ FALSE,
+ &my_locale_typelib_month_names_sr_RS,
+ &my_locale_typelib_ab_month_names_sr_RS,
+ &my_locale_typelib_day_names_sr_RS,
+ &my_locale_typelib_ab_day_names_sr_RS,
+ 9,
+ 10,
+ '.', /* decimal point sr_RS */
+ '\0', /* thousands_sep sr_RS */
+ "\x80", /* grouping sr_RS */
+ &global_errmsgs[sr_RS]
+);
+/***** LOCALE END sr_RS *****/
/***** LOCALE BEGIN sv_SE: Swedish - Sweden *****/
static const char *my_locale_month_names_sv_SE[13] =
@@ -1555,7 +1798,11 @@ MY_LOCALE my_locale_sv_SE
&my_locale_typelib_day_names_sv_SE,
&my_locale_typelib_ab_day_names_sv_SE,
9,
- 7
+ 7,
+ ',', /* decimal point sv_SE */
+ ' ', /* thousands_sep sv_SE */
+ "\x03\x03", /* grouping sv_SE */
+ &global_errmsgs[sv_SE]
);
/***** LOCALE END sv_SE *****/
@@ -1587,7 +1834,11 @@ MY_LOCALE my_locale_ta_IN
&my_locale_typelib_day_names_ta_IN,
&my_locale_typelib_ab_day_names_ta_IN,
10,
- 8
+ 8,
+ '.', /* decimal point ta_IN */
+ ',', /* thousands_sep ta_IN */
+ "\x03\x02", /* grouping ta_IN */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ta_IN *****/
@@ -1619,7 +1870,11 @@ MY_LOCALE my_locale_te_IN
&my_locale_typelib_day_names_te_IN,
&my_locale_typelib_ab_day_names_te_IN,
10,
- 9
+ 9,
+ '.', /* decimal point te_IN */
+ ',', /* thousands_sep te_IN */
+ "\x03\x02", /* grouping te_IN */
+ &global_errmsgs[en_US]
);
/***** LOCALE END te_IN *****/
@@ -1651,7 +1906,11 @@ MY_LOCALE my_locale_th_TH
&my_locale_typelib_day_names_th_TH,
&my_locale_typelib_ab_day_names_th_TH,
10,
- 8
+ 8,
+ '.', /* decimal point th_TH */
+ ',', /* thousands_sep th_TH */
+ "\x03", /* grouping th_TH */
+ &global_errmsgs[en_US]
);
/***** LOCALE END th_TH *****/
@@ -1683,7 +1942,11 @@ MY_LOCALE my_locale_tr_TR
&my_locale_typelib_day_names_tr_TR,
&my_locale_typelib_ab_day_names_tr_TR,
7,
- 9
+ 9,
+ ',', /* decimal point tr_TR */
+ '.', /* thousands_sep tr_TR */
+ "\x03\x03", /* grouping tr_TR */
+ &global_errmsgs[en_US]
);
/***** LOCALE END tr_TR *****/
@@ -1715,7 +1978,11 @@ MY_LOCALE my_locale_uk_UA
&my_locale_typelib_day_names_uk_UA,
&my_locale_typelib_ab_day_names_uk_UA,
8,
- 9
+ 9,
+ ',', /* decimal point uk_UA */
+ '.', /* thousands_sep uk_UA */
+ "\x03\x03", /* grouping uk_UA */
+ &global_errmsgs[uk_UA]
);
/***** LOCALE END uk_UA *****/
@@ -1747,7 +2014,11 @@ MY_LOCALE my_locale_ur_PK
&my_locale_typelib_day_names_ur_PK,
&my_locale_typelib_ab_day_names_ur_PK,
6,
- 6
+ 6,
+ '.', /* decimal point ur_PK */
+ ',', /* thousands_sep ur_PK */
+ "\x03\x03", /* grouping ur_PK */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ur_PK *****/
@@ -1779,7 +2050,11 @@ MY_LOCALE my_locale_vi_VN
&my_locale_typelib_day_names_vi_VN,
&my_locale_typelib_ab_day_names_vi_VN,
16,
- 11
+ 11,
+ ',', /* decimal point vi_VN */
+ '.', /* thousands_sep vi_VN */
+ "\x03\x03", /* grouping vi_VN */
+ &global_errmsgs[en_US]
);
/***** LOCALE END vi_VN *****/
@@ -1811,7 +2086,11 @@ MY_LOCALE my_locale_zh_CN
&my_locale_typelib_day_names_zh_CN,
&my_locale_typelib_ab_day_names_zh_CN,
3,
- 3
+ 3,
+ '.', /* decimal point zh_CN */
+ ',', /* thousands_sep zh_CN */
+ "\x03", /* grouping zh_CN */
+ &global_errmsgs[en_US]
);
/***** LOCALE END zh_CN *****/
@@ -1843,7 +2122,11 @@ MY_LOCALE my_locale_zh_TW
&my_locale_typelib_day_names_zh_TW,
&my_locale_typelib_ab_day_names_zh_TW,
3,
- 2
+ 2,
+ '.', /* decimal point zh_TW */
+ ',', /* thousands_sep zh_TW */
+ "\x03", /* grouping zh_TW */
+ &global_errmsgs[en_US]
);
/***** LOCALE END zh_TW *****/
@@ -1859,7 +2142,11 @@ MY_LOCALE my_locale_ar_DZ
&my_locale_typelib_day_names_ar_BH,
&my_locale_typelib_ab_day_names_ar_BH,
6,
- 8
+ 8,
+ '.', /* decimal point ar_DZ */
+ ',', /* thousands_sep ar_DZ */
+ "\x03", /* grouping ar_DZ */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ar_DZ *****/
@@ -1875,7 +2162,11 @@ MY_LOCALE my_locale_ar_EG
&my_locale_typelib_day_names_ar_BH,
&my_locale_typelib_ab_day_names_ar_BH,
6,
- 8
+ 8,
+ '.', /* decimal point ar_EG */
+ ',', /* thousands_sep ar_EG */
+ "\x03", /* grouping ar_EG */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ar_EG *****/
@@ -1891,7 +2182,11 @@ MY_LOCALE my_locale_ar_IN
&my_locale_typelib_day_names_ar_BH,
&my_locale_typelib_ab_day_names_ar_BH,
6,
- 8
+ 8,
+ '.', /* decimal point ar_IN */
+ ',', /* thousands_sep ar_IN */
+ "\x03", /* grouping ar_IN */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ar_IN *****/
@@ -1907,7 +2202,11 @@ MY_LOCALE my_locale_ar_IQ
&my_locale_typelib_day_names_ar_BH,
&my_locale_typelib_ab_day_names_ar_BH,
6,
- 8
+ 8,
+ '.', /* decimal point ar_IQ */
+ ',', /* thousands_sep ar_IQ */
+ "\x03", /* grouping ar_IQ */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ar_IQ *****/
@@ -1923,7 +2222,11 @@ MY_LOCALE my_locale_ar_KW
&my_locale_typelib_day_names_ar_BH,
&my_locale_typelib_ab_day_names_ar_BH,
6,
- 8
+ 8,
+ '.', /* decimal point ar_KW */
+ ',', /* thousands_sep ar_KW */
+ "\x03", /* grouping ar_KW */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ar_KW *****/
@@ -1939,7 +2242,11 @@ MY_LOCALE my_locale_ar_LB
&my_locale_typelib_day_names_ar_JO,
&my_locale_typelib_ab_day_names_ar_JO,
12,
- 8
+ 8,
+ '.', /* decimal point ar_LB */
+ ',', /* thousands_sep ar_LB */
+ "\x03", /* grouping ar_LB */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ar_LB *****/
@@ -1955,7 +2262,11 @@ MY_LOCALE my_locale_ar_LY
&my_locale_typelib_day_names_ar_BH,
&my_locale_typelib_ab_day_names_ar_BH,
6,
- 8
+ 8,
+ '.', /* decimal point ar_LY */
+ ',', /* thousands_sep ar_LY */
+ "\x03", /* grouping ar_LY */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ar_LY *****/
@@ -1971,7 +2282,11 @@ MY_LOCALE my_locale_ar_MA
&my_locale_typelib_day_names_ar_BH,
&my_locale_typelib_ab_day_names_ar_BH,
6,
- 8
+ 8,
+ '.', /* decimal point ar_MA */
+ ',', /* thousands_sep ar_MA */
+ "\x03", /* grouping ar_MA */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ar_MA *****/
@@ -1987,7 +2302,11 @@ MY_LOCALE my_locale_ar_OM
&my_locale_typelib_day_names_ar_BH,
&my_locale_typelib_ab_day_names_ar_BH,
6,
- 8
+ 8,
+ '.', /* decimal point ar_OM */
+ ',', /* thousands_sep ar_OM */
+ "\x03", /* grouping ar_OM */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ar_OM *****/
@@ -2003,7 +2322,11 @@ MY_LOCALE my_locale_ar_QA
&my_locale_typelib_day_names_ar_BH,
&my_locale_typelib_ab_day_names_ar_BH,
6,
- 8
+ 8,
+ '.', /* decimal point ar_QA */
+ ',', /* thousands_sep ar_QA */
+ "\x03", /* grouping ar_QA */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ar_QA *****/
@@ -2019,7 +2342,11 @@ MY_LOCALE my_locale_ar_SD
&my_locale_typelib_day_names_ar_BH,
&my_locale_typelib_ab_day_names_ar_BH,
6,
- 8
+ 8,
+ '.', /* decimal point ar_SD */
+ ',', /* thousands_sep ar_SD */
+ "\x03", /* grouping ar_SD */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ar_SD *****/
@@ -2035,7 +2362,11 @@ MY_LOCALE my_locale_ar_TN
&my_locale_typelib_day_names_ar_BH,
&my_locale_typelib_ab_day_names_ar_BH,
6,
- 8
+ 8,
+ '.', /* decimal point ar_TN */
+ ',', /* thousands_sep ar_TN */
+ "\x03", /* grouping ar_TN */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ar_TN *****/
@@ -2051,7 +2382,11 @@ MY_LOCALE my_locale_ar_YE
&my_locale_typelib_day_names_ar_BH,
&my_locale_typelib_ab_day_names_ar_BH,
6,
- 8
+ 8,
+ '.', /* decimal point ar_YE */
+ ',', /* thousands_sep ar_YE */
+ "\x03", /* grouping ar_YE */
+ &global_errmsgs[en_US]
);
/***** LOCALE END ar_YE *****/
@@ -2067,7 +2402,11 @@ MY_LOCALE my_locale_de_BE
&my_locale_typelib_day_names_de_DE,
&my_locale_typelib_ab_day_names_de_DE,
9,
- 10
+ 10,
+ ',', /* decimal point de_BE */
+ '.', /* thousands_sep de_BE */
+ "\x03\x03", /* grouping de_BE */
+ &global_errmsgs[de_DE]
);
/***** LOCALE END de_BE *****/
@@ -2083,7 +2422,11 @@ MY_LOCALE my_locale_de_CH
&my_locale_typelib_day_names_de_DE,
&my_locale_typelib_ab_day_names_de_DE,
9,
- 10
+ 10,
+ '.', /* decimal point de_CH */
+ '\'', /* thousands_sep de_CH */
+ "\x03\x03", /* grouping de_CH */
+ &global_errmsgs[de_DE]
);
/***** LOCALE END de_CH *****/
@@ -2099,7 +2442,11 @@ MY_LOCALE my_locale_de_LU
&my_locale_typelib_day_names_de_DE,
&my_locale_typelib_ab_day_names_de_DE,
9,
- 10
+ 10,
+ ',', /* decimal point de_LU */
+ '.', /* thousands_sep de_LU */
+ "\x03\x03", /* grouping de_LU */
+ &global_errmsgs[de_DE]
);
/***** LOCALE END de_LU *****/
@@ -2115,7 +2462,11 @@ MY_LOCALE my_locale_en_AU
&my_locale_typelib_day_names_en_US,
&my_locale_typelib_ab_day_names_en_US,
9,
- 9
+ 9,
+ '.', /* decimal point en_AU */
+ ',', /* thousands_sep en_AU */
+ "\x03\x03", /* grouping en_AU */
+ &global_errmsgs[en_US]
);
/***** LOCALE END en_AU *****/
@@ -2131,7 +2482,11 @@ MY_LOCALE my_locale_en_CA
&my_locale_typelib_day_names_en_US,
&my_locale_typelib_ab_day_names_en_US,
9,
- 9
+ 9,
+ '.', /* decimal point en_CA */
+ ',', /* thousands_sep en_CA */
+ "\x03\x03", /* grouping en_CA */
+ &global_errmsgs[en_US]
);
/***** LOCALE END en_CA *****/
@@ -2147,7 +2502,11 @@ MY_LOCALE my_locale_en_GB
&my_locale_typelib_day_names_en_US,
&my_locale_typelib_ab_day_names_en_US,
9,
- 9
+ 9,
+ '.', /* decimal point en_GB */
+ ',', /* thousands_sep en_GB */
+ "\x03\x03", /* grouping en_GB */
+ &global_errmsgs[en_US]
);
/***** LOCALE END en_GB *****/
@@ -2163,7 +2522,11 @@ MY_LOCALE my_locale_en_IN
&my_locale_typelib_day_names_en_US,
&my_locale_typelib_ab_day_names_en_US,
9,
- 9
+ 9,
+ '.', /* decimal point en_IN */
+ ',', /* thousands_sep en_IN */
+ "\x03\x02", /* grouping en_IN */
+ &global_errmsgs[en_US]
);
/***** LOCALE END en_IN *****/
@@ -2179,7 +2542,11 @@ MY_LOCALE my_locale_en_NZ
&my_locale_typelib_day_names_en_US,
&my_locale_typelib_ab_day_names_en_US,
9,
- 9
+ 9,
+ '.', /* decimal point en_NZ */
+ ',', /* thousands_sep en_NZ */
+ "\x03\x03", /* grouping en_NZ */
+ &global_errmsgs[en_US]
);
/***** LOCALE END en_NZ *****/
@@ -2195,7 +2562,11 @@ MY_LOCALE my_locale_en_PH
&my_locale_typelib_day_names_en_US,
&my_locale_typelib_ab_day_names_en_US,
9,
- 9
+ 9,
+ '.', /* decimal point en_PH */
+ ',', /* thousands_sep en_PH */
+ "\x03", /* grouping en_PH */
+ &global_errmsgs[en_US]
);
/***** LOCALE END en_PH *****/
@@ -2211,7 +2582,11 @@ MY_LOCALE my_locale_en_ZA
&my_locale_typelib_day_names_en_US,
&my_locale_typelib_ab_day_names_en_US,
9,
- 9
+ 9,
+ '.', /* decimal point en_ZA */
+ ',', /* thousands_sep en_ZA */
+ "\x03\x03", /* grouping en_ZA */
+ &global_errmsgs[en_US]
);
/***** LOCALE END en_ZA *****/
@@ -2227,7 +2602,11 @@ MY_LOCALE my_locale_en_ZW
&my_locale_typelib_day_names_en_US,
&my_locale_typelib_ab_day_names_en_US,
9,
- 9
+ 9,
+ '.', /* decimal point en_ZW */
+ ',', /* thousands_sep en_ZW */
+ "\x03\x03", /* grouping en_ZW */
+ &global_errmsgs[en_US]
);
/***** LOCALE END en_ZW *****/
@@ -2243,7 +2622,11 @@ MY_LOCALE my_locale_es_AR
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ ',', /* decimal point es_AR */
+ '.', /* thousands_sep es_AR */
+ "\x03\x03", /* grouping es_AR */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_AR *****/
@@ -2259,7 +2642,11 @@ MY_LOCALE my_locale_es_BO
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ ',', /* decimal point es_BO */
+ '\0', /* thousands_sep es_BO */
+ "\x80\x80", /* grouping es_BO */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_BO *****/
@@ -2275,7 +2662,11 @@ MY_LOCALE my_locale_es_CL
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ ',', /* decimal point es_CL */
+ '\0', /* thousands_sep es_CL */
+ "\x80\x80", /* grouping es_CL */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_CL *****/
@@ -2291,7 +2682,11 @@ MY_LOCALE my_locale_es_CO
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ ',', /* decimal point es_CO */
+ '\0', /* thousands_sep es_CO */
+ "\x80\x80", /* grouping es_CO */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_CO *****/
@@ -2307,7 +2702,11 @@ MY_LOCALE my_locale_es_CR
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ '.', /* decimal point es_CR */
+ '\0', /* thousands_sep es_CR */
+ "\x80\x80", /* grouping es_CR */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_CR *****/
@@ -2323,7 +2722,11 @@ MY_LOCALE my_locale_es_DO
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ '.', /* decimal point es_DO */
+ '\0', /* thousands_sep es_DO */
+ "\x80\x80", /* grouping es_DO */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_DO *****/
@@ -2339,7 +2742,11 @@ MY_LOCALE my_locale_es_EC
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ ',', /* decimal point es_EC */
+ '\0', /* thousands_sep es_EC */
+ "\x80\x80", /* grouping es_EC */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_EC *****/
@@ -2355,7 +2762,11 @@ MY_LOCALE my_locale_es_GT
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ '.', /* decimal point es_GT */
+ '\0', /* thousands_sep es_GT */
+ "\x80\x80", /* grouping es_GT */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_GT *****/
@@ -2371,7 +2782,11 @@ MY_LOCALE my_locale_es_HN
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ '.', /* decimal point es_HN */
+ '\0', /* thousands_sep es_HN */
+ "\x80\x80", /* grouping es_HN */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_HN *****/
@@ -2387,7 +2802,11 @@ MY_LOCALE my_locale_es_MX
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ '.', /* decimal point es_MX */
+ '\0', /* thousands_sep es_MX */
+ "\x80\x80", /* grouping es_MX */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_MX *****/
@@ -2403,7 +2822,11 @@ MY_LOCALE my_locale_es_NI
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ '.', /* decimal point es_NI */
+ '\0', /* thousands_sep es_NI */
+ "\x80\x80", /* grouping es_NI */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_NI *****/
@@ -2419,7 +2842,11 @@ MY_LOCALE my_locale_es_PA
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ '.', /* decimal point es_PA */
+ '\0', /* thousands_sep es_PA */
+ "\x80\x80", /* grouping es_PA */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_PA *****/
@@ -2435,7 +2862,11 @@ MY_LOCALE my_locale_es_PE
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ '.', /* decimal point es_PE */
+ '\0', /* thousands_sep es_PE */
+ "\x80\x80", /* grouping es_PE */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_PE *****/
@@ -2451,7 +2882,11 @@ MY_LOCALE my_locale_es_PR
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ '.', /* decimal point es_PR */
+ '\0', /* thousands_sep es_PR */
+ "\x80\x80", /* grouping es_PR */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_PR *****/
@@ -2467,7 +2902,11 @@ MY_LOCALE my_locale_es_PY
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ ',', /* decimal point es_PY */
+ '\0', /* thousands_sep es_PY */
+ "\x80\x80", /* grouping es_PY */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_PY *****/
@@ -2483,7 +2922,11 @@ MY_LOCALE my_locale_es_SV
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ '.', /* decimal point es_SV */
+ '\0', /* thousands_sep es_SV */
+ "\x80\x80", /* grouping es_SV */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_SV *****/
@@ -2499,7 +2942,11 @@ MY_LOCALE my_locale_es_US
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ '.', /* decimal point es_US */
+ ',', /* thousands_sep es_US */
+ "\x03\x03", /* grouping es_US */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_US *****/
@@ -2515,7 +2962,11 @@ MY_LOCALE my_locale_es_UY
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ ',', /* decimal point es_UY */
+ '\0', /* thousands_sep es_UY */
+ "\x80\x80", /* grouping es_UY */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_UY *****/
@@ -2531,7 +2982,11 @@ MY_LOCALE my_locale_es_VE
&my_locale_typelib_day_names_es_ES,
&my_locale_typelib_ab_day_names_es_ES,
10,
- 9
+ 9,
+ ',', /* decimal point es_VE */
+ '\0', /* thousands_sep es_VE */
+ "\x80\x80", /* grouping es_VE */
+ &global_errmsgs[es_ES]
);
/***** LOCALE END es_VE *****/
@@ -2547,7 +3002,11 @@ MY_LOCALE my_locale_fr_BE
&my_locale_typelib_day_names_fr_FR,
&my_locale_typelib_ab_day_names_fr_FR,
9,
- 8
+ 8,
+ ',', /* decimal point fr_BE */
+ '.', /* thousands_sep fr_BE */
+ "\x80\x80", /* grouping fr_BE */
+ &global_errmsgs[fr_FR]
);
/***** LOCALE END fr_BE *****/
@@ -2563,7 +3022,11 @@ MY_LOCALE my_locale_fr_CA
&my_locale_typelib_day_names_fr_FR,
&my_locale_typelib_ab_day_names_fr_FR,
9,
- 8
+ 8,
+ ',', /* decimal point fr_CA */
+ ' ', /* thousands_sep fr_CA */
+ "\x80\x80", /* grouping fr_CA */
+ &global_errmsgs[fr_FR]
);
/***** LOCALE END fr_CA *****/
@@ -2579,7 +3042,11 @@ MY_LOCALE my_locale_fr_CH
&my_locale_typelib_day_names_fr_FR,
&my_locale_typelib_ab_day_names_fr_FR,
9,
- 8
+ 8,
+ ',', /* decimal point fr_CH */
+ '\0', /* thousands_sep fr_CH */
+ "\x80\x80", /* grouping fr_CH */
+ &global_errmsgs[fr_FR]
);
/***** LOCALE END fr_CH *****/
@@ -2595,7 +3062,11 @@ MY_LOCALE my_locale_fr_LU
&my_locale_typelib_day_names_fr_FR,
&my_locale_typelib_ab_day_names_fr_FR,
9,
- 8
+ 8,
+ ',', /* decimal point fr_LU */
+ '\0', /* thousands_sep fr_LU */
+ "\x80\x80", /* grouping fr_LU */
+ &global_errmsgs[fr_FR]
);
/***** LOCALE END fr_LU *****/
@@ -2611,7 +3082,11 @@ MY_LOCALE my_locale_it_IT
&my_locale_typelib_day_names_it_CH,
&my_locale_typelib_ab_day_names_it_CH,
9,
- 9
+ 9,
+ ',', /* decimal point it_IT */
+ '\0', /* thousands_sep it_IT */
+ "\x80\x80", /* grouping it_IT */
+ &global_errmsgs[it_IT]
);
/***** LOCALE END it_IT *****/
@@ -2627,7 +3102,11 @@ MY_LOCALE my_locale_nl_BE
&my_locale_typelib_day_names_nl_NL,
&my_locale_typelib_ab_day_names_nl_NL,
9,
- 9
+ 9,
+ ',', /* decimal point nl_BE */
+ '.', /* thousands_sep nl_BE */
+ "\x80\x80", /* grouping nl_BE */
+ &global_errmsgs[nl_NL]
);
/***** LOCALE END nl_BE *****/
@@ -2643,7 +3122,11 @@ MY_LOCALE my_locale_no_NO
&my_locale_typelib_day_names_nb_NO,
&my_locale_typelib_ab_day_names_nb_NO,
9,
- 7
+ 7,
+ ',', /* decimal point no_NO */
+ '.', /* thousands_sep no_NO */
+ "\x03\x03", /* grouping no_NO */
+ &global_errmsgs[no_NO]
);
/***** LOCALE END no_NO *****/
@@ -2659,7 +3142,11 @@ MY_LOCALE my_locale_sv_FI
&my_locale_typelib_day_names_sv_SE,
&my_locale_typelib_ab_day_names_sv_SE,
9,
- 7
+ 7,
+ ',', /* decimal point sv_FI */
+ ' ', /* thousands_sep sv_FI */
+ "\x03\x03", /* grouping sv_FI */
+ &global_errmsgs[sv_SE]
);
/***** LOCALE END sv_FI *****/
@@ -2675,11 +3162,87 @@ MY_LOCALE my_locale_zh_HK
&my_locale_typelib_day_names_zh_CN,
&my_locale_typelib_ab_day_names_zh_CN,
3,
- 3
+ 3,
+ '.', /* decimal point zh_HK */
+ ',', /* thousands_sep zh_HK */
+ "\x03", /* grouping zh_HK */
+ &global_errmsgs[en_US]
);
/***** LOCALE END zh_HK *****/
+/***** LOCALE BEGIN el_GR: Greek - Greece *****/
+static const char *my_locale_month_names_el_GR[13]=
+{
+ "ΙανουάÏιος", "ΦεβÏουάÏιος", "ΜάÏτιος",
+ "ΑπÏίλιος", "Μάιος", "ΙοÏνιος",
+ "ΙοÏλιος", "ΑÏγουστος", "ΣεπτέμβÏιος",
+ "ΟκτώβÏιος", "ÎοέμβÏιος", "ΔεκέμβÏιος", NullS
+};
+
+static const char *my_locale_ab_month_names_el_GR[13]=
+{
+ "Ιαν", "Φεβ", "ΜάÏ",
+ "ΑπÏ", "Μάι", "ΙοÏν",
+ "ΙοÏλ","ΑÏγ", "Σεπ",
+ "Οκτ", "Îοέ", "Δεκ", NullS
+};
+
+static const char *my_locale_day_names_el_GR[8] =
+{
+ "ΔευτέÏα", "ΤÏίτη", "ΤετάÏτη", "Πέμπτη",
+ "ΠαÏασκευή", "Σάββατο", "ΚυÏιακή", NullS
+};
+
+static const char *my_locale_ab_day_names_el_GR[8]=
+{
+ "Δευ", "ΤÏί", "Τετ", "Πέμ",
+ "ΠαÏ", "Σάβ", "ΚυÏ", NullS
+};
+
+static TYPELIB my_locale_typelib_month_names_el_GR=
+{
+ array_elements(my_locale_month_names_el_GR) - 1,
+ "", my_locale_month_names_el_GR, NULL
+};
+
+static TYPELIB my_locale_typelib_ab_month_names_el_GR=
+{
+ array_elements(my_locale_ab_month_names_el_GR)-1,
+ "", my_locale_ab_month_names_el_GR, NULL
+};
+
+static TYPELIB my_locale_typelib_day_names_el_GR=
+{
+ array_elements(my_locale_day_names_el_GR)-1,
+ "", my_locale_day_names_el_GR, NULL
+};
+
+static TYPELIB my_locale_typelib_ab_day_names_el_GR=
+{
+ array_elements(my_locale_ab_day_names_el_GR) - 1,
+ "", my_locale_ab_day_names_el_GR, NULL
+};
+
+MY_LOCALE my_locale_el_GR
+(
+ 109,
+ "el_GR",
+ "Greek - Greece",
+ FALSE,
+ &my_locale_typelib_month_names_el_GR,
+ &my_locale_typelib_ab_month_names_el_GR,
+ &my_locale_typelib_day_names_el_GR,
+ &my_locale_typelib_ab_day_names_el_GR,
+ 11, /* max mon name length */
+ 9, /* max day name length */
+ ',', /* decimal point el_GR */
+ '.', /* thousands_sep el_GR */
+ "\x80", /* grouping el_GR */
+ &global_errmsgs[el_GR]
+);
+/***** LOCALE END el_GR *****/
+
/*
The list of all locales.
Note, locales must be ordered according to their
@@ -2736,7 +3299,7 @@ MY_LOCALE *my_locales[]=
&my_locale_sk_SK,
&my_locale_sl_SI,
&my_locale_sq_AL,
- &my_locale_sr_YU,
+ &my_locale_sr_RS,
&my_locale_ta_IN,
&my_locale_te_IN,
&my_locale_th_TH,
@@ -2797,10 +3360,18 @@ MY_LOCALE *my_locales[]=
&my_locale_no_NO,
&my_locale_sv_FI,
&my_locale_zh_HK,
+ &my_locale_el_GR,
NULL
};
+MY_LOCALE *my_locales_deprecated[]=
+{
+ &my_locale_sr_YU,
+ NULL
+};
+
+
MY_LOCALE *my_locale_by_number(uint number)
{
MY_LOCALE *locale;
@@ -2813,17 +3384,60 @@ MY_LOCALE *my_locale_by_number(uint number)
}
-MY_LOCALE *my_locale_by_name(const char *name)
+static MY_LOCALE*
+my_locale_by_name(MY_LOCALE** locales, const char *name)
{
MY_LOCALE **locale;
- for (locale= my_locales; *locale != NULL; locale++)
+ for (locale= locales; *locale != NULL; locale++)
{
if (!my_strcasecmp(&my_charset_latin1, (*locale)->name, name))
- {
- // Check that locale is on its correct position in the array
- DBUG_ASSERT((*locale) == my_locales[(*locale)->number]);
return *locale;
- }
}
return NULL;
}
+
+
+MY_LOCALE *my_locale_by_name(const char *name)
+{
+ MY_LOCALE *locale;
+
+ if ((locale= my_locale_by_name(my_locales, name)))
+ {
+ // Check that locale is on its correct position in the array
+ DBUG_ASSERT(locale == my_locales[locale->number]);
+ return locale;
+ }
+ else if ((locale= my_locale_by_name(my_locales_deprecated, name)))
+ {
+ THD *thd= current_thd;
+ /*
+ Replace the deprecated locale to the corresponding
+ 'fresh' locale with the same ID.
+ */
+ locale= my_locales[locale->number];
+ if (thd)
+ {
+ // Send a warning to the client
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_WARN_DEPRECATED_SYNTAX, ER(ER_WARN_DEPRECATED_SYNTAX),
+ name, locale->name);
+ }
+ else
+ {
+ // Send a warning to mysqld error log
+ sql_print_warning("The syntax '%s' is deprecated and will be removed. "
+ "Please use %s instead.",
+ name, locale->name);
+ }
+ }
+ return locale;
+}
+
+
+void cleanup_errmsgs()
+{
+ for (MY_LOCALE_ERRMSGS *msgs= global_errmsgs; msgs->language; msgs++)
+ {
+ my_free(msgs->errmsgs, MYF(MY_WME | MY_FAE | MY_ALLOW_ZERO_PTR));
+ }
+}
diff --git a/sql/sql_map.h b/sql/sql_map.h
index a1efba0da6f..5ae260841e0 100644
--- a/sql/sql_map.h
+++ b/sql/sql_map.h
@@ -1,3 +1,6 @@
+#ifndef SQL_MAP_INCLUDED
+#define SQL_MAP_INCLUDED
+
/* Copyright (C) 2000-2001, 2005 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -60,3 +63,5 @@ public:
return file->map;
}
};
+
+#endif /* SQL_MAP_INCLUDED */
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index de106918178..3244467b19c 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -21,12 +21,14 @@
#include <m_ctype.h>
#include <myisam.h>
#include <my_dir.h>
+#include "rpl_handler.h"
#include "sp_head.h"
#include "sp.h"
#include "sp_cache.h"
#include "events.h"
#include "sql_trigger.h"
+#include "sql_prepare.h"
#include "probes_mysql.h"
/**
@@ -45,7 +47,6 @@
"FUNCTION" : "PROCEDURE")
static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables);
-static bool check_show_create_table_access(THD *thd, TABLE_LIST *table);
const char *any_db="*any*"; // Special symbol for check_access
@@ -307,13 +308,12 @@ void init_update_queries(void)
sql_command_flags[SQLCOM_SHOW_BINLOGS]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_SLAVE_HOSTS]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_BINLOG_EVENTS]= CF_STATUS_COMMAND;
- sql_command_flags[SQLCOM_SHOW_COLUMN_TYPES]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_STORAGE_ENGINES]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_AUTHORS]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_CONTRIBUTORS]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_PRIVILEGES]= CF_STATUS_COMMAND;
- sql_command_flags[SQLCOM_SHOW_WARNS]= CF_STATUS_COMMAND;
- sql_command_flags[SQLCOM_SHOW_ERRORS]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_WARNS]= CF_STATUS_COMMAND | CF_DIAGNOSTIC_STMT;
+ sql_command_flags[SQLCOM_SHOW_ERRORS]= CF_STATUS_COMMAND | CF_DIAGNOSTIC_STMT;
sql_command_flags[SQLCOM_SHOW_ENGINE_STATUS]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_ENGINE_MUTEX]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_ENGINE_LOGS]= CF_STATUS_COMMAND;
@@ -382,7 +382,7 @@ void execute_init_command(THD *thd, sys_var_str *init_command_var,
Vio* save_vio;
ulong save_client_capabilities;
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
thd->profiling.start_new_query();
thd->profiling.set_query_source(init_command_var->value,
init_command_var->value_length);
@@ -410,7 +410,7 @@ void execute_init_command(THD *thd, sys_var_str *init_command_var,
thd->client_capabilities= save_client_capabilities;
thd->net.vio= save_vio;
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
thd->profiling.finish_current_query();
#endif
}
@@ -460,7 +460,7 @@ static void handle_bootstrap_impl(THD *thd)
/* purecov: begin tested */
if (net_realloc(&(thd->net), 2 * thd->net.max_packet))
{
- net_end_statement(thd);
+ thd->protocol->end_statement();
bootstrap_error= 1;
break;
}
@@ -485,8 +485,8 @@ static void handle_bootstrap_impl(THD *thd)
thd->db_length + 1 +
QUERY_CACHE_FLAGS_SIZE);
thd->set_query(query, length);
- DBUG_PRINT("query",("%-.4096s", thd->query()));
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+ DBUG_PRINT("query",("%-.4096s",thd->query()));
+#if defined(ENABLED_PROFILING)
thd->profiling.start_new_query();
thd->profiling.set_query_source(thd->query(), length);
#endif
@@ -501,9 +501,9 @@ static void handle_bootstrap_impl(THD *thd)
close_thread_tables(thd); // Free tables
bootstrap_error= thd->is_error();
- net_end_statement(thd);
+ thd->protocol->end_statement();
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
thd->profiling.finish_current_query();
#endif
@@ -596,7 +596,7 @@ static bool check_merge_table_access(THD *thd, char *db,
tlist->db= db; /* purecov: inspected */
}
error= check_table_access(thd, SELECT_ACL | UPDATE_ACL | DELETE_ACL,
- table_list, UINT_MAX, FALSE);
+ table_list, FALSE, UINT_MAX, FALSE);
}
return error;
}
@@ -808,12 +808,12 @@ bool do_command(THD *thd)
Consider moving to init_connect() instead.
*/
thd->clear_error(); // Clear error message
- thd->main_da.reset_diagnostics_area();
+ thd->stmt_da->reset_diagnostics_area();
net_new_transaction(net);
packet_length= my_net_read(net);
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
thd->profiling.start_new_query();
#endif
if (packet_length == packet_error)
@@ -826,7 +826,7 @@ bool do_command(THD *thd)
/* The error must be set. */
DBUG_ASSERT(thd->is_error());
- net_end_statement(thd);
+ thd->protocol->end_statement();
if (net->error != 3)
{
@@ -873,7 +873,7 @@ bool do_command(THD *thd)
return_value= dispatch_command(command, thd, packet+1, (uint) (packet_length-1));
out:
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
thd->profiling.finish_current_query();
#endif
DBUG_RETURN(return_value);
@@ -1071,7 +1071,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
tbl_name= strmake(db.str, packet + 1, db_len)+1;
strmake(tbl_name, packet + db_len + 2, tbl_len);
if (mysql_table_dump(thd, &db, tbl_name) == 0)
- thd->main_da.disable_status();
+ thd->stmt_da->disable_status();
break;
}
case COM_CHANGE_USER:
@@ -1230,7 +1230,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
general_log_write(thd, command, thd->query(), thd->query_length());
DBUG_PRINT("query",("%-.4096s",thd->query()));
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
thd->profiling.set_query_source(thd->query(), thd->query_length());
#endif
@@ -1243,7 +1243,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
{
char *beginning_of_next_stmt= (char*) end_of_stmt;
- net_end_statement(thd);
+ thd->protocol->end_statement();
query_cache_end_of_result(thd);
/*
Multiple queries exits, execute them individually
@@ -1265,7 +1265,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
MYSQL_QUERY_DONE(thd->is_error());
}
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
thd->profiling.finish_current_query();
thd->profiling.start_new_query("continuing");
thd->profiling.set_query_source(beginning_of_next_stmt, length);
@@ -1341,7 +1341,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
if (check_access(thd,SELECT_ACL,table_list.db,&table_list.grant.privilege,
0, 0, test(table_list.schema_table)))
break;
- if (check_grant(thd, SELECT_ACL, &table_list, 2, UINT_MAX, 0))
+ if (check_grant(thd, SELECT_ACL, &table_list, TRUE, UINT_MAX, FALSE))
break;
/* init structures for VIEW processing */
table_list.select_lex= &(thd->lex->select_lex);
@@ -1366,7 +1366,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
/* We don't calculate statistics for this command */
general_log_print(thd, command, NullS);
net->error=0; // Don't give 'abort' message
- thd->main_da.disable_status(); // Don't send anything back
+ thd->stmt_da->disable_status(); // Don't send anything back
error=TRUE; // End server
break;
@@ -1555,7 +1555,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
#ifndef EMBEDDED_LIBRARY
VOID(my_net_write(net, (uchar*) buff, length));
VOID(net_flush(net));
- thd->main_da.disable_status();
+ thd->stmt_da->disable_status();
#endif
break;
}
@@ -1621,7 +1621,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
/* report error issued during command execution */
if (thd->killed_errno())
{
- if (! thd->main_da.is_set())
+ if (! thd->stmt_da->is_set())
thd->send_kill_message();
}
if (thd->killed == THD::KILL_QUERY || thd->killed == THD::KILL_BAD_DATA)
@@ -1631,13 +1631,13 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
}
/* If commit fails, we should be able to reset the OK status. */
- thd->main_da.can_overwrite_status= TRUE;
+ thd->stmt_da->can_overwrite_status= TRUE;
ha_autocommit_or_rollback(thd, thd->is_error());
- thd->main_da.can_overwrite_status= FALSE;
+ thd->stmt_da->can_overwrite_status= FALSE;
thd->transaction.stmt.reset();
- net_end_statement(thd);
+ thd->protocol->end_statement();
query_cache_end_of_result(thd);
thd->proc_info= "closing tables";
@@ -1684,9 +1684,9 @@ void log_slow_statement(THD *thd)
/*
Do not log administrative statements unless the appropriate option is
- set; do not log into slow log if reading from backup.
+ set.
*/
- if (thd->enable_slow_log && !thd->user_time)
+ if (thd->enable_slow_log)
{
ulonglong end_utime_of_query= thd->current_utime();
thd_proc_info(thd, "logging slow query");
@@ -1807,7 +1807,7 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
Mark this current profiling record to be discarded. We don't
wish to have SHOW commands show up in profiling.
*/
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
thd->profiling.discard_current_query();
#endif
break;
@@ -2075,8 +2075,14 @@ mysql_execute_command(THD *thd)
variables, but for now this is probably good enough.
Don't reset warnings when executing a stored routine.
*/
- if ((all_tables || !lex->is_single_level_stmt()) && !thd->spcont)
- mysql_reset_errors(thd, 0);
+ if ((sql_command_flags[lex->sql_command] & CF_DIAGNOSTIC_STMT) != 0)
+ thd->warning_info->set_read_only(TRUE);
+ else
+ {
+ thd->warning_info->set_read_only(FALSE);
+ if (all_tables)
+ thd->warning_info->opt_clear_warning_info(thd->query_id);
+ }
#ifdef HAVE_REPLICATION
if (unlikely(thd->slave_thread))
@@ -2216,14 +2222,16 @@ mysql_execute_command(THD *thd)
#endif
case SQLCOM_SHOW_STATUS_PROC:
case SQLCOM_SHOW_STATUS_FUNC:
- if (!(res= check_table_access(thd, SELECT_ACL, all_tables, UINT_MAX, FALSE)))
+ if (!(res= check_table_access(thd, SELECT_ACL, all_tables, FALSE,
+ UINT_MAX, FALSE)))
res= execute_sqlcom_select(thd, all_tables);
break;
case SQLCOM_SHOW_STATUS:
{
system_status_var old_status_var= thd->status_var;
thd->initial_status_var= &old_status_var;
- if (!(res= check_table_access(thd, SELECT_ACL, all_tables, UINT_MAX, FALSE)))
+ if (!(res= check_table_access(thd, SELECT_ACL, all_tables, FALSE,
+ UINT_MAX, FALSE)))
res= execute_sqlcom_select(thd, all_tables);
/* Don't log SHOW STATUS commands to slow query log */
thd->server_status&= ~(SERVER_QUERY_NO_INDEX_USED |
@@ -2253,18 +2261,22 @@ mysql_execute_command(THD *thd)
case SQLCOM_SHOW_STORAGE_ENGINES:
case SQLCOM_SHOW_PROFILE:
case SQLCOM_SELECT:
+ {
thd->status_var.last_query_cost= 0.0;
+
+ /*
+ lex->exchange != NULL implies SELECT .. INTO OUTFILE and this
+ requires FILE_ACL access.
+ */
+ ulong privileges_requested= lex->exchange ? SELECT_ACL | FILE_ACL :
+ SELECT_ACL;
+
if (all_tables)
- {
res= check_table_access(thd,
- lex->exchange ? SELECT_ACL | FILE_ACL :
- SELECT_ACL,
- all_tables, UINT_MAX, FALSE);
- }
+ privileges_requested,
+ all_tables, FALSE, UINT_MAX, FALSE);
else
- res= check_access(thd,
- lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL,
- any_db, 0, 0, 0, 0);
+ res= check_access(thd, privileges_requested, any_db, 0, 0, 0, UINT_MAX);
if (res)
break;
@@ -2275,7 +2287,8 @@ mysql_execute_command(THD *thd)
res= execute_sqlcom_select(thd, all_tables);
break;
- case SQLCOM_PREPARE:
+ }
+case SQLCOM_PREPARE:
{
mysql_sql_stmt_prepare(thd);
break;
@@ -2291,8 +2304,8 @@ mysql_execute_command(THD *thd)
break;
}
case SQLCOM_DO:
- if (check_table_access(thd, SELECT_ACL, all_tables, UINT_MAX, FALSE) ||
- open_and_lock_tables(thd, all_tables))
+ if (check_table_access(thd, SELECT_ACL, all_tables, FALSE, UINT_MAX, FALSE)
+ || open_and_lock_tables(thd, all_tables))
goto error;
res= mysql_do(thd, *lex->insert_list);
@@ -2356,7 +2369,7 @@ mysql_execute_command(THD *thd)
}
case SQLCOM_SHOW_PROFILES:
{
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
thd->profiling.discard_current_query();
res= thd->profiling.show_profiles();
if (res)
@@ -2389,6 +2402,7 @@ mysql_execute_command(THD *thd)
res = show_slave_hosts(thd);
break;
}
+ case SQLCOM_SHOW_RELAYLOG_EVENTS: /* fall through */
case SQLCOM_SHOW_BINLOG_EVENTS:
{
if (check_global_access(thd, REPL_SLAVE_ACL))
@@ -2401,8 +2415,8 @@ mysql_execute_command(THD *thd)
case SQLCOM_BACKUP_TABLE:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_table_access(thd, SELECT_ACL, all_tables, UINT_MAX, FALSE) ||
- check_global_access(thd, FILE_ACL))
+ if (check_table_access(thd, SELECT_ACL, all_tables, FALSE, UINT_MAX, FALSE)
+ || check_global_access(thd, FILE_ACL))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
res = mysql_backup_table(thd, first_table);
@@ -2413,8 +2427,8 @@ mysql_execute_command(THD *thd)
case SQLCOM_RESTORE_TABLE:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_table_access(thd, INSERT_ACL, all_tables, UINT_MAX, FALSE) ||
- check_global_access(thd, FILE_ACL))
+ if (check_table_access(thd, INSERT_ACL, all_tables, FALSE, UINT_MAX, FALSE)
+ || check_global_access(thd, FILE_ACL))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
res = mysql_restore_table(thd, first_table);
@@ -2513,7 +2527,7 @@ mysql_execute_command(THD *thd)
test(first_table->schema_table)))
goto error; /* purecov: inspected */
/* Check that the first table has CREATE privilege */
- if (check_grant(thd, CREATE_ACL, all_tables, 0, 1, 0))
+ if (check_grant(thd, CREATE_ACL, all_tables, FALSE, 1, FALSE))
goto error;
pthread_mutex_lock(&LOCK_active_mi);
@@ -2883,7 +2897,7 @@ end_with_restore_list:
(TABLE_LIST *)
create_info.merge_list.first))
goto error; /* purecov: inspected */
- if (check_grant(thd, priv_needed, all_tables, 0, UINT_MAX, 0))
+ if (check_grant(thd, priv_needed, all_tables, FALSE, UINT_MAX, FALSE))
goto error;
if (lex->name.str && !test_all_bits(priv,INSERT_ACL | CREATE_ACL))
{ // Rename of table
@@ -2892,8 +2906,8 @@ end_with_restore_list:
tmp_table.table_name= lex->name.str;
tmp_table.db=select_lex->db;
tmp_table.grant.privilege=priv;
- if (check_grant(thd, INSERT_ACL | CREATE_ACL, &tmp_table, 0,
- UINT_MAX, 0))
+ if (check_grant(thd, INSERT_ACL | CREATE_ACL, &tmp_table, FALSE,
+ UINT_MAX, FALSE))
goto error;
}
@@ -2947,10 +2961,11 @@ end_with_restore_list:
*/
old_list= table[0];
new_list= table->next_local[0];
- if (check_grant(thd, ALTER_ACL | DROP_ACL, &old_list, 0, 1, 0) ||
+ if (check_grant(thd, ALTER_ACL | DROP_ACL, &old_list, FALSE, 1, FALSE) ||
(!test_all_bits(table->next_local->grant.privilege,
INSERT_ACL | CREATE_ACL) &&
- check_grant(thd, INSERT_ACL | CREATE_ACL, &new_list, 0, 1, 0)))
+ check_grant(thd, INSERT_ACL | CREATE_ACL, &new_list, FALSE, 1,
+ FALSE)))
goto error;
}
@@ -2981,11 +2996,75 @@ end_with_restore_list:
goto error;
#else
{
- /* Ignore temporary tables if this is "SHOW CREATE VIEW" */
+ /*
+ Access check:
+ SHOW CREATE TABLE require any privileges on the table level (ie
+ effecting all columns in the table).
+ SHOW CREATE VIEW require the SHOW_VIEW and SELECT ACLs on the table
+ level.
+ NOTE: SHOW_VIEW ACL is checked when the view is created.
+ */
+
if (lex->only_view)
+ {
+ if (check_table_access(thd, SELECT_ACL, first_table, FALSE, 1, FALSE))
+ {
+ my_error(ER_TABLEACCESS_DENIED_ERROR, MYF(0),
+ "SHOW", thd->security_ctx->priv_user,
+ thd->security_ctx->host_or_ip, first_table->alias);
+ goto error;
+ }
+
+ /* Ignore temporary tables if this is "SHOW CREATE VIEW" */
first_table->skip_temporary= 1;
- if (check_show_create_table_access(thd, first_table))
- goto error;
+ }
+ else
+ {
+ ulong save_priv;
+
+ /*
+ If it is an INFORMATION_SCHEMA table, SELECT_ACL privilege is the
+ only privilege allowed. For any other privilege check_access()
+ reports an error. That's how internal implementation protects
+ INFORMATION_SCHEMA from updates.
+
+ For ordinary tables any privilege from the SHOW_CREATE_TABLE_ACLS
+ set is sufficient.
+ */
+
+ ulong check_privs= test(first_table->schema_table) ?
+ SELECT_ACL : SHOW_CREATE_TABLE_ACLS;
+
+ if (check_access(thd, check_privs, first_table->db,
+ &save_priv, FALSE, FALSE,
+ test(first_table->schema_table)))
+ goto error;
+
+ /*
+ save_priv contains any privileges actually granted by check_access
+ (i.e. save_priv contains global (user- and database-level)
+ privileges).
+
+ The fact that check_access() returned FALSE does not mean that
+ access is granted. We need to check if save_priv contains any
+ table-specific privilege. If not, we need to check table-level
+ privileges.
+
+ If there are no global privileges and no table-level privileges,
+ access is denied.
+ */
+
+ if (!(save_priv & (SHOW_CREATE_TABLE_ACLS)) &&
+ !has_any_table_level_privileges(thd, SHOW_CREATE_TABLE_ACLS, first_table))
+ {
+ my_error(ER_TABLEACCESS_DENIED_ERROR, MYF(0),
+ "SHOW", thd->security_ctx->priv_user,
+ thd->security_ctx->host_or_ip, first_table->alias);
+ goto error;
+ }
+ }
+
+ /* Access is granted. Execute the command. */
res= mysqld_show_create(thd, first_table);
break;
}
@@ -2993,8 +3072,8 @@ end_with_restore_list:
case SQLCOM_CHECKSUM:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_table_access(thd, SELECT_ACL | EXTRA_ACL, all_tables,
- UINT_MAX, FALSE))
+ if (check_table_access(thd, SELECT_ACL, all_tables,
+ FALSE, UINT_MAX, FALSE))
goto error; /* purecov: inspected */
res = mysql_checksum_table(thd, first_table, &lex->check_opt);
break;
@@ -3003,7 +3082,7 @@ end_with_restore_list:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
if (check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables,
- UINT_MAX, FALSE))
+ FALSE, UINT_MAX, FALSE))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
res= mysql_repair_table(thd, first_table, &lex->check_opt);
@@ -3022,8 +3101,8 @@ end_with_restore_list:
case SQLCOM_CHECK:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_table_access(thd, SELECT_ACL | EXTRA_ACL , all_tables,
- UINT_MAX, FALSE))
+ if (check_table_access(thd, SELECT_ACL, all_tables,
+ TRUE, UINT_MAX, FALSE))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
res = mysql_check_table(thd, first_table, &lex->check_opt);
@@ -3035,7 +3114,7 @@ end_with_restore_list:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
if (check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables,
- UINT_MAX, FALSE))
+ FALSE, UINT_MAX, FALSE))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
res= mysql_analyze_table(thd, first_table, &lex->check_opt);
@@ -3056,7 +3135,7 @@ end_with_restore_list:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
if (check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables,
- UINT_MAX, FALSE))
+ FALSE, UINT_MAX, FALSE))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
res= (specialflag & (SPECIAL_SAFE_MODE | SPECIAL_NO_NEW_FUNC)) ?
@@ -3429,7 +3508,7 @@ end_with_restore_list:
DBUG_ASSERT(first_table == all_tables && first_table != 0);
if (!lex->drop_temporary)
{
- if (check_table_access(thd, DROP_ACL, all_tables, UINT_MAX, FALSE))
+ if (check_table_access(thd, DROP_ACL, all_tables, FALSE, UINT_MAX, FALSE))
goto error; /* purecov: inspected */
if (end_active_trans(thd))
goto error;
@@ -3474,9 +3553,6 @@ end_with_restore_list:
case SQLCOM_SHOW_PRIVILEGES:
res= mysqld_show_privileges(thd);
break;
- case SQLCOM_SHOW_COLUMN_TYPES:
- res= mysqld_show_column_types(thd);
- break;
case SQLCOM_SHOW_ENGINE_LOGS:
#ifdef DONT_ALLOW_SHOW_COMMANDS
my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND),
@@ -3537,8 +3613,8 @@ end_with_restore_list:
if (lex->autocommit && end_active_trans(thd))
goto error;
- if ((check_table_access(thd, SELECT_ACL, all_tables, UINT_MAX, FALSE) ||
- open_and_lock_tables(thd, all_tables)))
+ if ((check_table_access(thd, SELECT_ACL, all_tables, FALSE, UINT_MAX, FALSE)
+ || open_and_lock_tables(thd, all_tables)))
goto error;
if (lex->one_shot_set && not_all_support_one_shot(lex_var_list))
{
@@ -3592,7 +3668,7 @@ end_with_restore_list:
if (end_active_trans(thd))
goto error;
if (check_table_access(thd, LOCK_TABLES_ACL | SELECT_ACL, all_tables,
- UINT_MAX, FALSE))
+ FALSE, UINT_MAX, FALSE))
goto error;
if (lex->protect_against_global_read_lock &&
!(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
@@ -3988,7 +4064,7 @@ end_with_restore_list:
else
{
if (check_grant(thd,(lex->grant | lex->grant_tot_col | GRANT_ACL),
- all_tables, 0, UINT_MAX, 0))
+ all_tables, FALSE, UINT_MAX, FALSE))
goto error;
/* Conditionally writes to binlog */
res= mysql_table_grant(thd, all_tables, lex->users_list,
@@ -4097,7 +4173,7 @@ end_with_restore_list:
#endif
case SQLCOM_HA_OPEN:
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_table_access(thd, SELECT_ACL, all_tables, UINT_MAX, FALSE))
+ if (check_table_access(thd, SELECT_ACL, all_tables, FALSE, UINT_MAX, FALSE))
goto error;
res= mysql_ha_open(thd, first_table, 0);
break;
@@ -4383,7 +4459,8 @@ create_sp_error:
This will cache all SP and SF and open and lock all tables
required for execution.
*/
- if (check_table_access(thd, SELECT_ACL, all_tables, UINT_MAX, FALSE) ||
+ if (check_table_access(thd, SELECT_ACL, all_tables, FALSE,
+ UINT_MAX, FALSE) ||
open_and_lock_tables(thd, all_tables))
goto error;
@@ -4453,12 +4530,6 @@ create_sp_error:
So just execute the statement.
*/
res= sp->execute_procedure(thd, &lex->value_list);
- /*
- If warnings have been cleared, we have to clear total_warn_count
- too, otherwise the clients get confused.
- */
- if (thd->warn_list.is_empty())
- thd->total_warn_count= 0;
thd->variables.select_limit= select_limit;
@@ -4489,7 +4560,7 @@ create_sp_error:
else
sp= sp_find_routine(thd, TYPE_ENUM_FUNCTION, lex->spname,
&thd->sp_func_cache, FALSE);
- mysql_reset_errors(thd, 0);
+ thd->warning_info->opt_clear_warning_info(thd->query_id);
if (! sp)
{
if (lex->spname->m_db.str)
@@ -4564,7 +4635,7 @@ create_sp_error:
TYPE_ENUM_PROCEDURE : TYPE_ENUM_FUNCTION);
sp_result= sp_routine_exists_in_table(thd, type, lex->spname);
- mysql_reset_errors(thd, 0);
+ thd->warning_info->opt_clear_warning_info(thd->query_id);
if (sp_result == SP_OK)
{
char *db= lex->spname->m_db.str;
@@ -4668,10 +4739,10 @@ create_sp_error:
}
break;
}
-#ifndef DBUG_OFF
case SQLCOM_SHOW_PROC_CODE:
case SQLCOM_SHOW_FUNC_CODE:
{
+#ifndef DBUG_OFF
sp_head *sp;
if (lex->sql_command == SQLCOM_SHOW_PROC_CODE)
@@ -4688,8 +4759,12 @@ create_sp_error:
goto error;
}
break;
- }
+#else
+ my_error(ER_FEATURE_DISABLED, MYF(0),
+ "SHOW PROCEDURE|FUNCTION CODE", "--with-debug");
+ goto error;
#endif // ifndef DBUG_OFF
+ }
case SQLCOM_SHOW_CREATE_TRIGGER:
{
if (lex->spname->m_name.length > NAME_LEN)
@@ -4717,8 +4792,8 @@ create_sp_error:
}
case SQLCOM_DROP_VIEW:
{
- if (check_table_access(thd, DROP_ACL, all_tables, UINT_MAX, FALSE) ||
- end_active_trans(thd))
+ if (check_table_access(thd, DROP_ACL, all_tables, FALSE, UINT_MAX, FALSE)
+ || end_active_trans(thd))
goto error;
/* Conditionally writes to binlog. */
res= mysql_drop_view(thd, first_table, thd->lex->drop_mode);
@@ -4928,7 +5003,7 @@ create_sp_error:
res= mysql_xa_recover(thd);
break;
case SQLCOM_ALTER_TABLESPACE:
- if (check_access(thd, ALTER_ACL, thd->db, 0, 1, 0, thd->db ? is_schema_db(thd->db) : 0))
+ if (check_global_access(thd, CREATE_TABLESPACE_ACL))
break;
if (!(res= mysql_alter_tablespace(thd, lex->alter_tablespace_info)))
my_ok(thd);
@@ -5015,6 +5090,11 @@ create_sp_error:
my_ok(thd, 1);
break;
}
+ case SQLCOM_SIGNAL:
+ case SQLCOM_RESIGNAL:
+ DBUG_ASSERT(lex->m_stmt != NULL);
+ res= lex->m_stmt->execute(thd);
+ break;
default:
#ifndef EMBEDDED_LIBRARY
DBUG_ASSERT(0); /* Impossible */
@@ -5160,7 +5240,7 @@ bool check_single_table_access(THD *thd, ulong privilege,
/* Show only 1 table for check_grant */
if (!(all_tables->belong_to_view &&
(thd->lex->sql_command == SQLCOM_SHOW_FIELDS)) &&
- check_grant(thd, privilege, all_tables, 0, 1, no_errors))
+ check_grant(thd, privilege, all_tables, FALSE, 1, no_errors))
goto deny;
thd->security_ctx= backup_ctx;
@@ -5205,7 +5285,8 @@ bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *all_tables)
subselects_tables= subselects_tables->next_global;
}
if (subselects_tables &&
- (check_table_access(thd, SELECT_ACL, subselects_tables, UINT_MAX, FALSE)))
+ (check_table_access(thd, SELECT_ACL, subselects_tables, FALSE,
+ UINT_MAX, FALSE)))
return 1;
}
return 0;
@@ -5213,46 +5294,54 @@ bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *all_tables)
/**
- Get the user (global) and database privileges for all used tables.
-
- @param save_priv In this we store global and db level grants for the
- table. Note that we don't store db level grants if the
- global grants is enough to satisfy the request and the
- global grants contains a SELECT grant.
-
- @note
- The idea of EXTRA_ACL is that one will be granted access to the table if
- one has the asked privilege on any column combination of the table; For
- example to be able to check a table one needs to have SELECT privilege on
- any column of the table.
-
- @retval
- 0 ok
- @retval
- 1 If we can't get the privileges and we don't use table/column
- grants.
+ @brief Compare requested privileges with the privileges acquired from the
+ User- and Db-tables.
+ @param thd Thread handler
+ @param want_access The requested access privileges.
+ @param db A pointer to the Db name.
+ @param[out] save_priv A pointer to the granted privileges will be stored.
+ @param dont_check_global_grants True if no global grants are checked.
+ @param no_error True if no errors should be sent to the client.
+ @param schema_db True if the db specified belongs to the meta data tables.
+
+ 'save_priv' is used to save the User-table (global) and Db-table grants for
+ the supplied db name. Note that we don't store db level grants if the global
+ grants is enough to satisfy the request AND the global grants contains a
+ SELECT grant.
+
+ A meta data table (from INFORMATION_SCHEMA) can always be accessed with
+ a SELECT_ACL.
+
+ @see check_grant
+
+ @return Status of denial of access by exclusive ACLs.
+ @retval FALSE Access can't exclusively be denied by Db- and User-table
+ access unless Column- and Table-grants are checked too.
+ @retval TRUE Access denied.
*/
+
bool
check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
bool dont_check_global_grants, bool no_errors, bool schema_db)
{
Security_context *sctx= thd->security_ctx;
ulong db_access;
+
/*
GRANT command:
In case of database level grant the database name may be a pattern,
in case of table|column level grant the database name can not be a pattern.
We use 'dont_check_global_grants' as a flag to determine
- if it's database level grant command
+ if it's database level grant command
(see SQLCOM_GRANT case, mysql_execute_command() function) and
set db_is_pattern according to 'dont_check_global_grants' value.
*/
- bool db_is_pattern= (test(want_access & GRANT_ACL) &&
- dont_check_global_grants);
+ bool db_is_pattern= ((want_access & GRANT_ACL) && dont_check_global_grants);
ulong dummy;
DBUG_ENTER("check_access");
DBUG_PRINT("enter",("db: %s want_access: %lu master_access: %lu",
db ? db : "", want_access, sctx->master_access));
+
if (save_priv)
*save_priv=0;
else
@@ -5270,8 +5359,12 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
if (schema_db)
{
- if ((!(sctx->master_access & FILE_ACL) && (want_access & FILE_ACL)) ||
- (want_access & ~(SELECT_ACL | EXTRA_ACL | FILE_ACL)))
+ /*
+ We don't allow any simple privileges but SELECT_ACL or CREATE_VIEW_ACL
+ on the information_schema database.
+ */
+ want_access &= ~SELECT_ACL;
+ if (want_access & DB_ACLS)
{
if (!no_errors)
{
@@ -5279,10 +5372,15 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
sctx->priv_user, sctx->priv_host, db_name);
}
+ /*
+ Access denied;
+ [out] *save_privileges= 0
+ */
DBUG_RETURN(TRUE);
}
else
{
+ /* Access granted */
*save_priv= SELECT_ACL;
DBUG_RETURN(FALSE);
}
@@ -5290,20 +5388,27 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
if ((sctx->master_access & want_access) == want_access)
{
+ /* get access for current db */
+ db_access= sctx->db_access;
/*
- If we don't have a global SELECT privilege, we have to get the database
- specific access rights to be able to handle queries of type
+ 1. If we don't have a global SELECT privilege, we have to get the
+ database specific access rights to be able to handle queries of type
UPDATE t1 SET a=1 WHERE b > 0
+ 2. Change db access if it isn't current db which is being addressed
*/
- db_access= sctx->db_access;
if (!(sctx->master_access & SELECT_ACL) &&
(db && (!thd->db || db_is_pattern || strcmp(db,thd->db))))
db_access=acl_get(sctx->host, sctx->ip, sctx->priv_user, db,
db_is_pattern);
+
+ /*
+ The effective privileges are the union of the global privileges
+ and the the intersection of db- and host-privileges.
+ */
*save_priv=sctx->master_access | db_access;
DBUG_RETURN(FALSE);
}
- if (((want_access & ~sctx->master_access) & ~(DB_ACLS | EXTRA_ACL)) ||
+ if (((want_access & ~sctx->master_access) & ~DB_ACLS) ||
(! db && dont_check_global_grants))
{ // We can never grant this
DBUG_PRINT("error",("No possible access"));
@@ -5318,33 +5423,66 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
}
if (db == any_db)
- DBUG_RETURN(FALSE); // Allow select on anything
+ {
+ /*
+ Access granted; Allow select on *any* db.
+ [out] *save_privileges= 0
+ */
+ DBUG_RETURN(FALSE);
+ }
if (db && (!thd->db || db_is_pattern || strcmp(db,thd->db)))
db_access= acl_get(sctx->host, sctx->ip, sctx->priv_user, db,
db_is_pattern);
else
db_access= sctx->db_access;
- DBUG_PRINT("info",("db_access: %lu", db_access));
- /* Remove SHOW attribute and access rights we already have */
- want_access &= ~(sctx->master_access | EXTRA_ACL);
DBUG_PRINT("info",("db_access: %lu want_access: %lu",
db_access, want_access));
- db_access= ((*save_priv=(db_access | sctx->master_access)) & want_access);
- if (db_access == want_access ||
+ /*
+ Save the union of User-table and the intersection between Db-table and
+ Host-table privileges.
+ */
+ db_access= (db_access | sctx->master_access);
+ *save_priv= db_access;
+
+ /*
+ We need to investigate column- and table access if all requested privileges
+ belongs to the bit set of .
+ */
+ bool need_table_or_column_check=
+ (want_access & (TABLE_ACLS | PROC_ACLS | db_access)) == want_access;
+
+ /*
+ Grant access if the requested access is in the intersection of
+ host- and db-privileges (as retrieved from the acl cache),
+ also grant access if all the requested privileges are in the union of
+ TABLES_ACLS and PROC_ACLS; see check_grant.
+ */
+ if ( (db_access & want_access) == want_access ||
(!dont_check_global_grants &&
- !(want_access & ~(db_access | TABLE_ACLS | PROC_ACLS))))
- DBUG_RETURN(FALSE); /* Ok */
+ need_table_or_column_check))
+ {
+ /*
+ Ok; but need to check table- and column privileges.
+ [out] *save_privileges is (User-priv | (Db-priv & Host-priv))
+ */
+ DBUG_RETURN(FALSE);
+ }
+ /*
+ Access is denied;
+ [out] *save_privileges is (User-priv | (Db-priv & Host-priv))
+ */
DBUG_PRINT("error",("Access denied"));
if (!no_errors)
my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
sctx->priv_user, sctx->priv_host,
(db ? db : (thd->db ?
thd->db :
- "unknown"))); /* purecov: tested */
- DBUG_RETURN(TRUE); /* purecov: tested */
+ "unknown")));
+ DBUG_RETURN(TRUE);
+
}
@@ -5390,14 +5528,20 @@ static bool check_show_access(THD *thd, TABLE_LIST *table)
DBUG_ASSERT(dst_table);
- if (check_access(thd, SELECT_ACL | EXTRA_ACL,
- dst_table->db,
- &dst_table->grant.privilege,
- FALSE, FALSE,
+ if (check_access(thd, SELECT_ACL, dst_table->db,
+ &dst_table->grant.privilege, FALSE, FALSE,
test(dst_table->schema_table)))
- return FALSE;
+ return TRUE; /* Access denied */
+
+ /*
+ Check_grant will grant access if there is any column privileges on
+ all of the tables thanks to the fourth parameter (bool show_table).
+ */
+ if (check_grant(thd, SELECT_ACL, dst_table, TRUE, UINT_MAX, FALSE))
+ return TRUE; /* Access denied */
- return (check_grant(thd, SELECT_ACL, dst_table, 2, UINT_MAX, FALSE));
+ /* Access granted */
+ return FALSE;
}
default:
break;
@@ -5407,30 +5551,46 @@ static bool check_show_access(THD *thd, TABLE_LIST *table)
}
-/**
- Check the privilege for all used tables.
- @param thd Thread context
- @param want_access Privileges requested
- @param tables List of tables to be checked
- @param number Check at most this number of tables.
- @param no_errors FALSE/TRUE - report/don't report error to
- the client (using my_error() call).
+/**
+ @brief Check if the requested privileges exists in either User-, Host- or
+ Db-tables.
+ @param thd Thread context
+ @param want_access Privileges requested
+ @param tables List of tables to be compared against
+ @param no_errors Don't report error to the client (using my_error() call).
+ @param any_combination_of_privileges_will_do TRUE if any privileges on any
+ column combination is enough.
+ @param number Only the first 'number' tables in the linked list are
+ relevant.
+
+ The suppled table list contains cached privileges. This functions calls the
+ help functions check_access and check_grant to verify the first three steps
+ in the privileges check queue:
+ 1. Global privileges
+ 2. OR (db privileges AND host privileges)
+ 3. OR table privileges
+ 4. OR column privileges (not checked by this function!)
+ 5. OR routine privileges (not checked by this function!)
+
+ @see check_access
+ @see check_grant
+
+ @note This functions assumes that table list used and
+ thd->lex->query_tables_own_last value correspond to each other
+ (the latter should be either 0 or point to next_global member
+ of one of elements of this table list).
- @note
- Table privileges are cached in the table list for GRANT checking.
- This functions assumes that table list used and
- thd->lex->query_tables_own_last value correspond to each other
- (the latter should be either 0 or point to next_global member
- of one of elements of this table list).
-
- @retval FALSE OK
- @retval TRUE Access denied
+ @return
+ @retval FALSE OK
+ @retval TRUE Access denied; But column or routine privileges might need to
+ be checked also.
*/
bool
-check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables,
- uint number, bool no_errors)
+check_table_access(THD *thd, ulong requirements,TABLE_LIST *tables,
+ bool any_combination_of_privileges_will_do,
+ uint number, bool no_errors)
{
TABLE_LIST *org_tables= tables;
TABLE_LIST *first_not_own_table= thd->lex->first_not_own_table();
@@ -5441,22 +5601,31 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables,
the given table list refers to the list for prelocking (contains tables
of other queries). For simple queries first_not_own_table is 0.
*/
- for (; i < number && tables != first_not_own_table;
+ for (; i < number && tables != first_not_own_table && tables;
tables= tables->next_global, i++)
{
+ ulong want_access= requirements;
if (tables->security_ctx)
sctx= tables->security_ctx;
else
sctx= backup_ctx;
- if (tables->schema_table &&
- (want_access & ~(SELECT_ACL | EXTRA_ACL | FILE_ACL)))
+ /*
+ Always allow SELECT on schema tables. This is done by removing the
+ required SELECT_ACL privilege in the want_access parameter.
+ Disallow any other DDL or DML operation on any schema table.
+ */
+ if (tables->schema_table)
{
- if (!no_errors)
- my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
- sctx->priv_user, sctx->priv_host,
- INFORMATION_SCHEMA_NAME.str);
- return TRUE;
+ want_access &= ~SELECT_ACL;
+ if (want_access & DB_ACLS)
+ {
+ if (!no_errors)
+ my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
+ sctx->priv_user, sctx->priv_host,
+ INFORMATION_SCHEMA_NAME.str);
+ goto deny;
+ }
}
/*
Register access for view underlying table.
@@ -5468,33 +5637,34 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables,
{
if (check_show_access(thd, tables))
goto deny;
-
continue;
}
+ DBUG_PRINT("info", ("derived: %d view: %d", tables->derived != 0,
+ tables->view != 0));
if (tables->is_anonymous_derived_table() ||
- (tables->table && (int)tables->table->s->tmp_table))
+ (tables->table && tables->table->s &&
+ (int)tables->table->s->tmp_table))
continue;
thd->security_ctx= sctx;
- if ((sctx->master_access & want_access) ==
- (want_access & ~EXTRA_ACL) &&
- thd->db)
+ if ((sctx->master_access & want_access) == want_access &&
+ thd->db)
tables->grant.privilege= want_access;
else if (tables->db && thd->db && strcmp(tables->db, thd->db) == 0)
{
if (check_access(thd, want_access, tables->get_db_name(),
- &tables->grant.privilege, 0, no_errors,
+ &tables->grant.privilege, 0, no_errors,
test(tables->schema_table)))
goto deny; // Access denied
}
else if (check_access(thd, want_access, tables->get_db_name(),
- &tables->grant.privilege, 0, no_errors,
- test(tables->schema_table)))
+ &tables->grant.privilege, 0, no_errors, 0))
goto deny;
}
thd->security_ctx= backup_ctx;
- return check_grant(thd,want_access & ~EXTRA_ACL,org_tables,
- test(want_access & EXTRA_ACL), number, no_errors);
+ return check_grant(thd,requirements,org_tables,
+ any_combination_of_privileges_will_do,
+ number, no_errors);
deny:
thd->security_ctx= backup_ctx;
return TRUE;
@@ -5582,7 +5752,7 @@ bool check_some_access(THD *thd, ulong want_access, TABLE_LIST *table)
if (!check_access(thd, access, table->db,
&table->grant.privilege, 0, 1,
test(table->schema_table)) &&
- !check_grant(thd, access, table, 0, 1, 1))
+ !check_grant(thd, access, table, FALSE, 1, TRUE))
DBUG_RETURN(0);
}
}
@@ -5762,8 +5932,8 @@ void mysql_reset_thd_for_next_command(THD *thd)
thd->user_var_events_alloc= thd->mem_root;
}
thd->clear_error();
- thd->main_da.reset_diagnostics_area();
- thd->total_warn_count=0; // Warnings for this query
+ thd->stmt_da->reset_diagnostics_area();
+ thd->warning_info->reset_for_next_command();
thd->rand_used= 0;
thd->sent_row_count= thd->examined_row_count= 0;
@@ -6049,7 +6219,7 @@ void mysql_parse(THD *thd, const char *inBuf, uint length,
DBUG_PRINT("info",("Command aborted. Fatal_error: %d",
thd->is_fatal_error));
- query_cache_abort(&thd->net);
+ query_cache_abort(&thd->query_cache_tls);
}
if (thd->lex->sphead)
{
@@ -6133,8 +6303,8 @@ bool add_field_to_list(THD *thd, LEX_STRING *field_name, enum_field_types type,
if (type_modifier & PRI_KEY_FLAG)
{
Key *key;
- lex->col_list.push_back(new Key_part_spec(field_name->str, 0));
- key= new Key(Key::PRIMARY, NullS,
+ lex->col_list.push_back(new Key_part_spec(*field_name, 0));
+ key= new Key(Key::PRIMARY, null_lex_str,
&default_key_create_info,
0, lex->col_list);
lex->alter_info.key_list.push_back(key);
@@ -6143,8 +6313,8 @@ bool add_field_to_list(THD *thd, LEX_STRING *field_name, enum_field_types type,
if (type_modifier & (UNIQUE_FLAG | UNIQUE_KEY_FLAG))
{
Key *key;
- lex->col_list.push_back(new Key_part_spec(field_name->str, 0));
- key= new Key(Key::UNIQUE, NullS,
+ lex->col_list.push_back(new Key_part_spec(*field_name, 0));
+ key= new Key(Key::UNIQUE, null_lex_str,
&default_key_create_info, 0,
lex->col_list);
lex->alter_info.key_list.push_back(key);
@@ -6344,7 +6514,19 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
if (!ptr->derived && !my_strcasecmp(system_charset_info, ptr->db,
INFORMATION_SCHEMA_NAME.str))
{
- ST_SCHEMA_TABLE *schema_table= find_schema_table(thd, ptr->table_name);
+ ST_SCHEMA_TABLE *schema_table;
+ if (ptr->updating &&
+ /* Special cases which are processed by commands itself */
+ lex->sql_command != SQLCOM_CHECK &&
+ lex->sql_command != SQLCOM_CHECKSUM)
+ {
+ my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
+ thd->security_ctx->priv_user,
+ thd->security_ctx->priv_host,
+ INFORMATION_SCHEMA_NAME.str);
+ DBUG_RETURN(0);
+ }
+ schema_table= find_schema_table(thd, ptr->table_name);
if (!schema_table ||
(schema_table->hidden &&
((sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND) == 0 ||
@@ -7277,11 +7459,11 @@ bool multi_update_precheck(THD *thd, TABLE_LIST *tables)
else if ((check_access(thd, UPDATE_ACL, table->db,
&table->grant.privilege, 0, 1,
test(table->schema_table)) ||
- check_grant(thd, UPDATE_ACL, table, 0, 1, 1)) &&
+ check_grant(thd, UPDATE_ACL, table, FALSE, 1, TRUE)) &&
(check_access(thd, SELECT_ACL, table->db,
&table->grant.privilege, 0, 0,
test(table->schema_table)) ||
- check_grant(thd, SELECT_ACL, table, 0, 1, 0)))
+ check_grant(thd, SELECT_ACL, table, FALSE, 1, FALSE)))
DBUG_RETURN(TRUE);
table->table_in_first_from_clause= 1;
@@ -7299,7 +7481,7 @@ bool multi_update_precheck(THD *thd, TABLE_LIST *tables)
if (check_access(thd, SELECT_ACL, table->db,
&table->grant.privilege, 0, 0,
test(table->schema_table)) ||
- check_grant(thd, SELECT_ACL, table, 0, 1, 0))
+ check_grant(thd, SELECT_ACL, table, FALSE, 1, FALSE))
DBUG_RETURN(TRUE);
}
}
@@ -7339,7 +7521,7 @@ bool multi_delete_precheck(THD *thd, TABLE_LIST *tables)
/* sql_yacc guarantees that tables and aux_tables are not zero */
DBUG_ASSERT(aux_tables != 0);
- if (check_table_access(thd, SELECT_ACL, tables, UINT_MAX, FALSE))
+ if (check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE))
DBUG_RETURN(TRUE);
/*
@@ -7348,7 +7530,7 @@ bool multi_delete_precheck(THD *thd, TABLE_LIST *tables)
call check_table_access() safely.
*/
thd->lex->query_tables_own_last= 0;
- if (check_table_access(thd, DELETE_ACL, aux_tables, UINT_MAX, FALSE))
+ if (check_table_access(thd, DELETE_ACL, aux_tables, FALSE, UINT_MAX, FALSE))
{
thd->lex->query_tables_own_last= save_query_tables_own_last;
DBUG_RETURN(TRUE);
@@ -7502,25 +7684,6 @@ bool insert_precheck(THD *thd, TABLE_LIST *tables)
/**
- @brief Check privileges for SHOW CREATE TABLE statement.
-
- @param thd Thread context
- @param table Target table
-
- @retval TRUE Failure
- @retval FALSE Success
-*/
-
-static bool check_show_create_table_access(THD *thd, TABLE_LIST *table)
-{
- return check_access(thd, SELECT_ACL | EXTRA_ACL, table->db,
- &table->grant.privilege, 0, 0,
- test(table->schema_table)) ||
- check_grant(thd, SELECT_ACL, table, 2, UINT_MAX, 0);
-}
-
-
-/**
CREATE TABLE query pre-check.
@param thd Thread handler
@@ -7559,7 +7722,7 @@ bool create_table_precheck(THD *thd, TABLE_LIST *tables,
lex->create_info.merge_list.first))
goto err;
if (want_priv != CREATE_TMP_ACL &&
- check_grant(thd, want_priv, create_table, 0, 1, 0))
+ check_grant(thd, want_priv, create_table, FALSE, 1, FALSE))
goto err;
if (select_lex->item_list.elements)
@@ -7587,12 +7750,13 @@ bool create_table_precheck(THD *thd, TABLE_LIST *tables,
}
}
#endif
- if (tables && check_table_access(thd, SELECT_ACL, tables, UINT_MAX, FALSE))
+ if (tables && check_table_access(thd, SELECT_ACL, tables, FALSE,
+ UINT_MAX, FALSE))
goto err;
}
else if (lex->create_info.options & HA_LEX_CREATE_TABLE_LIKE)
{
- if (check_show_create_table_access(thd, tables))
+ if (check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE))
goto err;
}
error= FALSE;
@@ -7790,7 +7954,10 @@ bool check_string_char_length(LEX_STRING *str, const char *err_msg,
return FALSE;
if (!no_error)
- my_error(ER_WRONG_STRING_LENGTH, MYF(0), str->str, err_msg, max_char_length);
+ {
+ ErrConvString err(str->str, str->length, cs);
+ my_error(ER_WRONG_STRING_LENGTH, MYF(0), err.ptr(), err_msg, max_char_length);
+ }
return TRUE;
}
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 0decd3e1a91..c0961f84feb 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -1,4 +1,4 @@
-/* Copyright 2005-2008 MySQL AB, 2008 Sun Microsystems, Inc.
+/* Copyright 2005-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -18,16 +18,29 @@
to partitioning introduced in MySQL version 5.1. It contains functionality
used by all handlers that support partitioning, such as
the partitioning handler itself and the NDB handler.
+ (Much of the code in this file has been split into partition_info.cc and
+ the header files partition_info.h + partition_element.h + sql_partition.h)
- The first version was written by Mikael Ronstrom.
+ The first version was written by Mikael Ronstrom 2004-2006.
+ Various parts of the optimizer code was written by Sergey Petrunia.
+ Code have been maintained by Mattias Jonsson.
+ The second version was written by Mikael Ronstrom 2006-2007 with some
+ final fixes for partition pruning in 2008-2009 with assistance from Sergey
+ Petrunia and Mattias Jonsson.
- This version supports RANGE partitioning, LIST partitioning, HASH
+ The first version supports RANGE partitioning, LIST partitioning, HASH
partitioning and composite partitioning (hereafter called subpartitioning)
where each RANGE/LIST partitioning is HASH partitioned. The hash function
can either be supplied by the user or by only a list of fields (also
called KEY partitioning), where the MySQL server will use an internal
hash function.
There are quite a few defaults that can be used as well.
+
+ The second version introduces a new variant of RANGE and LIST partitioning
+ which is often referred to as column lists in the code variables. This
+ enables a user to specify a set of columns and their concatenated value
+ as the partition value. By comparing the concatenation of these values
+ the proper partition can be choosen.
*/
/* Some general useful functions */
@@ -40,6 +53,10 @@
#ifdef WITH_PARTITION_STORAGE_ENGINE
#include "ha_partition.h"
+
+#define ERROR_INJECT_CRASH(code) \
+ DBUG_EVALUATE_IF(code, (abort(), 0), 0)
+
/*
Partition related functions declarations and some static constants;
*/
@@ -50,9 +67,11 @@ const LEX_STRING partition_keywords[]=
{ C_STRING_WITH_LEN("LIST") },
{ C_STRING_WITH_LEN("KEY") },
{ C_STRING_WITH_LEN("MAXVALUE") },
- { C_STRING_WITH_LEN("LINEAR ") }
+ { C_STRING_WITH_LEN("LINEAR ") },
+ { C_STRING_WITH_LEN(" COLUMNS") }
};
static const char *part_str= "PARTITION";
+static const char *subpart_str= "SUBPARTITION";
static const char *sub_str= "SUB";
static const char *by_str= "BY";
static const char *space_str= " ";
@@ -61,26 +80,23 @@ static const char *end_paren_str= ")";
static const char *begin_paren_str= "(";
static const char *comma_str= ",";
-static int get_part_id_charset_func_all(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-static int get_part_id_charset_func_part(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-static int get_part_id_charset_func_subpart(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-static int get_part_part_id_charset_func(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-static int get_subpart_id_charset_func(partition_info *part_info,
- uint32 *part_id);
+int get_partition_id_list_col(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
int get_partition_id_list(partition_info *part_info,
uint32 *part_id,
longlong *func_value);
+int get_partition_id_range_col(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
int get_partition_id_range(partition_info *part_info,
uint32 *part_id,
longlong *func_value);
+static int get_part_id_charset_func_part(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
+static int get_part_id_charset_func_subpart(partition_info *part_info,
+ uint32 *part_id);
int get_partition_id_hash_nosub(partition_info *part_info,
uint32 *part_id,
longlong *func_value);
@@ -93,30 +109,9 @@ int get_partition_id_linear_hash_nosub(partition_info *part_info,
int get_partition_id_linear_key_nosub(partition_info *part_info,
uint32 *part_id,
longlong *func_value);
-int get_partition_id_range_sub_hash(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_range_sub_key(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_range_sub_linear_hash(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_range_sub_linear_key(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_list_sub_hash(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_list_sub_key(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_list_sub_linear_hash(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
-int get_partition_id_list_sub_linear_key(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value);
+int get_partition_id_with_sub(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value);
int get_partition_id_hash_sub(partition_info *part_info,
uint32 *part_id);
int get_partition_id_key_sub(partition_info *part_info,
@@ -134,17 +129,64 @@ uint32 get_next_partition_id_range(PARTITION_ITERATOR* part_iter);
uint32 get_next_partition_id_list(PARTITION_ITERATOR* part_iter);
int get_part_iter_for_interval_via_mapping(partition_info *part_info,
bool is_subpart,
+ uint32 *store_length_array,
uchar *min_value, uchar *max_value,
+ uint min_len, uint max_len,
uint flags,
PARTITION_ITERATOR *part_iter);
+int get_part_iter_for_interval_cols_via_map(partition_info *part_info,
+ bool is_subpart,
+ uint32 *store_length_array,
+ uchar *min_value, uchar *max_value,
+ uint min_len, uint max_len,
+ uint flags,
+ PARTITION_ITERATOR *part_iter);
int get_part_iter_for_interval_via_walking(partition_info *part_info,
bool is_subpart,
+ uint32 *store_length_array,
uchar *min_value, uchar *max_value,
+ uint min_len, uint max_len,
uint flags,
PARTITION_ITERATOR *part_iter);
+static int cmp_rec_and_tuple(part_column_list_val *val, uint32 nvals_in_rec);
+static int cmp_rec_and_tuple_prune(part_column_list_val *val,
+ uint32 n_vals_in_rec,
+ bool tail_is_min);
#ifdef WITH_PARTITION_STORAGE_ENGINE
/*
+ Convert constants in VALUES definition to the character set the
+ corresponding field uses.
+
+ SYNOPSIS
+ convert_charset_partition_constant()
+ item Item to convert
+ cs Character set to convert to
+
+ RETURN VALUE
+ NULL Error
+ item New converted item
+*/
+
+Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs)
+{
+ THD *thd= current_thd;
+ Name_resolution_context *context= &thd->lex->current_select->context;
+ TABLE_LIST *save_list= context->table_list;
+ const char *save_where= thd->where;
+
+ item= item->safe_charset_converter(cs);
+ context->table_list= NULL;
+ thd->where= "convert character set partition constant";
+ if (!item || item->fix_fields(thd, (Item**)NULL))
+ item= NULL;
+ thd->where= save_where;
+ context->table_list= save_list;
+ return item;
+}
+
+
+/*
A support function to check if a name is in a list of strings
SYNOPSIS
@@ -161,7 +203,7 @@ bool is_name_in_list(char *name,
List<char> list_names)
{
List_iterator<char> names_it(list_names);
- uint no_names= list_names.elements;
+ uint num_names= list_names.elements;
uint i= 0;
do
@@ -169,7 +211,7 @@ bool is_name_in_list(char *name,
char *list_name= names_it++;
if (!(my_strcasecmp(system_charset_info, name, list_name)))
return TRUE;
- } while (++i < no_names);
+ } while (++i < num_names);
return FALSE;
}
@@ -198,24 +240,24 @@ bool partition_default_handling(TABLE *table, partition_info *part_info,
if (!is_create_table_ind)
{
- if (part_info->use_default_no_partitions)
+ if (part_info->use_default_num_partitions)
{
- if (table->file->get_no_parts(normalized_path, &part_info->no_parts))
+ if (table->file->get_no_parts(normalized_path, &part_info->num_parts))
{
DBUG_RETURN(TRUE);
}
}
else if (part_info->is_sub_partitioned() &&
- part_info->use_default_no_subpartitions)
+ part_info->use_default_num_subpartitions)
{
- uint no_parts;
- if (table->file->get_no_parts(normalized_path, &no_parts))
+ uint num_parts;
+ if (table->file->get_no_parts(normalized_path, &num_parts))
{
DBUG_RETURN(TRUE);
}
- DBUG_ASSERT(part_info->no_parts > 0);
- DBUG_ASSERT((no_parts % part_info->no_parts) == 0);
- part_info->no_subparts= no_parts / part_info->no_parts;
+ DBUG_ASSERT(part_info->num_parts > 0);
+ DBUG_ASSERT((num_parts % part_info->num_parts) == 0);
+ part_info->num_subparts= num_parts / part_info->num_parts;
}
}
part_info->set_up_defaults_for_partitioning(table->file,
@@ -249,8 +291,8 @@ bool check_reorganise_list(partition_info *new_part_info,
List<char> list_part_names)
{
uint new_count, old_count;
- uint no_new_parts= new_part_info->partitions.elements;
- uint no_old_parts= old_part_info->partitions.elements;
+ uint num_new_parts= new_part_info->partitions.elements;
+ uint num_old_parts= old_part_info->partitions.elements;
List_iterator<partition_element> new_parts_it(new_part_info->partitions);
bool same_part_info= (new_part_info == old_part_info);
DBUG_ENTER("check_reorganise_list");
@@ -273,8 +315,8 @@ bool check_reorganise_list(partition_info *new_part_info,
if (!is_name_in_list(old_name, list_part_names))
DBUG_RETURN(TRUE);
}
- } while (old_count < no_old_parts);
- } while (new_count < no_new_parts);
+ } while (old_count < num_old_parts);
+ } while (new_count < num_new_parts);
DBUG_RETURN(FALSE);
}
@@ -449,9 +491,10 @@ static bool set_up_field_array(TABLE *table,
bool is_sub_part)
{
Field **ptr, *field, **field_array;
- uint no_fields= 0;
+ uint num_fields= 0;
uint size_field_array;
uint i= 0;
+ uint inx;
partition_info *part_info= table->part_info;
int result= FALSE;
DBUG_ENTER("set_up_field_array");
@@ -460,9 +503,19 @@ static bool set_up_field_array(TABLE *table,
while ((field= *(ptr++)))
{
if (field->flags & GET_FIXED_FIELDS_FLAG)
- no_fields++;
+ num_fields++;
+ }
+ if (num_fields > MAX_REF_PARTS)
+ {
+ char *ptr;
+ if (is_sub_part)
+ ptr= (char*)"subpartition function";
+ else
+ ptr= (char*)"partition function";
+ my_error(ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR, MYF(0), ptr);
+ DBUG_RETURN(TRUE);
}
- if (no_fields == 0)
+ if (num_fields == 0)
{
/*
We are using hidden key as partitioning field
@@ -470,8 +523,8 @@ static bool set_up_field_array(TABLE *table,
DBUG_ASSERT(!is_sub_part);
DBUG_RETURN(result);
}
- size_field_array= (no_fields+1)*sizeof(Field*);
- field_array= (Field**)sql_alloc(size_field_array);
+ size_field_array= (num_fields+1)*sizeof(Field*);
+ field_array= (Field**)sql_calloc(size_field_array);
if (unlikely(!field_array))
{
mem_alloc_error(size_field_array);
@@ -486,7 +539,32 @@ static bool set_up_field_array(TABLE *table,
field->flags|= FIELD_IN_PART_FUNC_FLAG;
if (likely(!result))
{
- field_array[i++]= field;
+ if (!is_sub_part && part_info->column_list)
+ {
+ List_iterator<char> it(part_info->part_field_list);
+ char *field_name;
+
+ DBUG_ASSERT(num_fields == part_info->part_field_list.elements);
+ inx= 0;
+ do
+ {
+ field_name= it++;
+ if (!my_strcasecmp(system_charset_info,
+ field_name,
+ field->field_name))
+ break;
+ } while (++inx < num_fields);
+ if (inx == num_fields)
+ {
+ mem_alloc_error(1);
+ result= TRUE;
+ continue;
+ }
+ }
+ else
+ inx= i;
+ field_array[inx]= field;
+ i++;
/*
We check that the fields are proper. It is required for each
@@ -504,16 +582,16 @@ static bool set_up_field_array(TABLE *table,
}
}
}
- field_array[no_fields]= 0;
+ field_array[num_fields]= 0;
if (!is_sub_part)
{
part_info->part_field_array= field_array;
- part_info->no_part_fields= no_fields;
+ part_info->num_part_fields= num_fields;
}
else
{
part_info->subpart_field_array= field_array;
- part_info->no_subpart_fields= no_fields;
+ part_info->num_subpart_fields= num_fields;
}
DBUG_RETURN(result);
}
@@ -552,36 +630,36 @@ static bool create_full_part_field_array(THD *thd, TABLE *table,
if (!part_info->is_sub_partitioned())
{
part_info->full_part_field_array= part_info->part_field_array;
- part_info->no_full_part_fields= part_info->no_part_fields;
+ part_info->num_full_part_fields= part_info->num_part_fields;
}
else
{
Field *field, **field_array;
- uint no_part_fields=0, size_field_array;
+ uint num_part_fields=0, size_field_array;
ptr= table->field;
while ((field= *(ptr++)))
{
if (field->flags & FIELD_IN_PART_FUNC_FLAG)
- no_part_fields++;
+ num_part_fields++;
}
- size_field_array= (no_part_fields+1)*sizeof(Field*);
- field_array= (Field**)sql_alloc(size_field_array);
+ size_field_array= (num_part_fields+1)*sizeof(Field*);
+ field_array= (Field**)sql_calloc(size_field_array);
if (unlikely(!field_array))
{
mem_alloc_error(size_field_array);
result= TRUE;
goto end;
}
- no_part_fields= 0;
+ num_part_fields= 0;
ptr= table->field;
while ((field= *(ptr++)))
{
if (field->flags & FIELD_IN_PART_FUNC_FLAG)
- field_array[no_part_fields++]= field;
+ field_array[num_part_fields++]= field;
}
- field_array[no_part_fields]=0;
+ field_array[num_part_fields]=0;
part_info->full_part_field_array= field_array;
- part_info->no_full_part_fields= no_part_fields;
+ part_info->num_full_part_fields= num_part_fields;
}
/*
@@ -777,16 +855,16 @@ static bool handle_list_of_fields(List_iterator<char> it,
goto end;
}
}
- if (is_list_empty)
+ if (is_list_empty && part_info->part_type == HASH_PARTITION)
{
uint primary_key= table->s->primary_key;
if (primary_key != MAX_KEY)
{
- uint no_key_parts= table->key_info[primary_key].key_parts, i;
+ uint num_key_parts= table->key_info[primary_key].key_parts, i;
/*
In the case of an empty list we use primary key as partition key.
*/
- for (i= 0; i < no_key_parts; i++)
+ for (i= 0; i < num_key_parts; i++)
{
Field *field= table->key_info[primary_key].key_part[i].field;
field->flags|= GET_FIXED_FIELDS_FLAG;
@@ -850,7 +928,7 @@ int check_signed_flag(partition_info *part_info)
error= ER_PARTITION_CONST_DOMAIN_ERROR;
break;
}
- } while (++i < part_info->no_parts);
+ } while (++i < part_info->num_parts);
}
return error;
}
@@ -869,7 +947,6 @@ int check_signed_flag(partition_info *part_info)
table The table object
part_info Reference to partitioning data structure
is_sub_part Is the table subpartitioned as well
- is_field_to_be_setup Flag if we are to set-up field arrays
RETURN VALUE
TRUE An error occurred, something was wrong with the
@@ -892,8 +969,8 @@ int check_signed_flag(partition_info *part_info)
on the field object.
*/
-bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
- bool is_sub_part, bool is_field_to_be_setup)
+static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
+ bool is_sub_part)
{
partition_info *part_info= table->part_info;
uint dir_length, home_dir_length;
@@ -910,13 +987,6 @@ bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
nesting_map saved_allow_sum_func;
DBUG_ENTER("fix_fields_part_func");
- if (part_info->fixed)
- {
- if (!(is_sub_part || (error= check_signed_flag(part_info))))
- result= FALSE;
- goto end;
- }
-
/*
Set-up the TABLE_LIST object to be a list with a single table
Set the object to zero to create NULL pointers and set alias
@@ -998,8 +1068,7 @@ bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
if (unlikely(error))
{
DBUG_PRINT("info", ("Field in partition function not part of table"));
- if (is_field_to_be_setup)
- clear_field_flag(table);
+ clear_field_flag(table);
goto end;
}
thd->where= save_where;
@@ -1011,11 +1080,7 @@ bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
}
if ((!is_sub_part) && (error= check_signed_flag(part_info)))
goto end;
- result= FALSE;
- if (is_field_to_be_setup)
- result= set_up_field_array(table, is_sub_part);
- if (!is_sub_part)
- part_info->fixed= TRUE;
+ result= set_up_field_array(table, is_sub_part);
end:
table->get_fields_in_item_tree= FALSE;
table->map= 0; //Restore old value
@@ -1194,9 +1259,9 @@ void check_range_capable_PF(TABLE *table)
static bool set_up_partition_bitmap(THD *thd, partition_info *part_info)
{
uint32 *bitmap_buf;
- uint bitmap_bits= part_info->no_subparts?
- (part_info->no_subparts* part_info->no_parts):
- part_info->no_parts;
+ uint bitmap_bits= part_info->num_subparts?
+ (part_info->num_subparts* part_info->num_parts):
+ part_info->num_parts;
uint bitmap_bytes= bitmap_buffer_size(bitmap_bits);
DBUG_ENTER("set_up_partition_bitmap");
@@ -1296,64 +1361,47 @@ static void set_up_partition_func_pointers(partition_info *part_info)
if (part_info->is_sub_partitioned())
{
+ part_info->get_partition_id= get_partition_id_with_sub;
if (part_info->part_type == RANGE_PARTITION)
{
- part_info->get_part_partition_id= get_partition_id_range;
+ if (part_info->column_list)
+ part_info->get_part_partition_id= get_partition_id_range_col;
+ else
+ part_info->get_part_partition_id= get_partition_id_range;
if (part_info->list_of_subpart_fields)
{
if (part_info->linear_hash_ind)
- {
- part_info->get_partition_id= get_partition_id_range_sub_linear_key;
part_info->get_subpartition_id= get_partition_id_linear_key_sub;
- }
else
- {
- part_info->get_partition_id= get_partition_id_range_sub_key;
part_info->get_subpartition_id= get_partition_id_key_sub;
- }
}
else
{
if (part_info->linear_hash_ind)
- {
- part_info->get_partition_id= get_partition_id_range_sub_linear_hash;
part_info->get_subpartition_id= get_partition_id_linear_hash_sub;
- }
else
- {
- part_info->get_partition_id= get_partition_id_range_sub_hash;
part_info->get_subpartition_id= get_partition_id_hash_sub;
- }
}
}
else /* LIST Partitioning */
{
- part_info->get_part_partition_id= get_partition_id_list;
+ if (part_info->column_list)
+ part_info->get_part_partition_id= get_partition_id_list_col;
+ else
+ part_info->get_part_partition_id= get_partition_id_list;
if (part_info->list_of_subpart_fields)
{
if (part_info->linear_hash_ind)
- {
- part_info->get_partition_id= get_partition_id_list_sub_linear_key;
part_info->get_subpartition_id= get_partition_id_linear_key_sub;
- }
else
- {
- part_info->get_partition_id= get_partition_id_list_sub_key;
part_info->get_subpartition_id= get_partition_id_key_sub;
- }
}
else
{
if (part_info->linear_hash_ind)
- {
- part_info->get_partition_id= get_partition_id_list_sub_linear_hash;
part_info->get_subpartition_id= get_partition_id_linear_hash_sub;
- }
else
- {
- part_info->get_partition_id= get_partition_id_list_sub_hash;
part_info->get_subpartition_id= get_partition_id_hash_sub;
- }
}
}
}
@@ -1362,9 +1410,19 @@ static void set_up_partition_func_pointers(partition_info *part_info)
part_info->get_part_partition_id= NULL;
part_info->get_subpartition_id= NULL;
if (part_info->part_type == RANGE_PARTITION)
- part_info->get_partition_id= get_partition_id_range;
+ {
+ if (part_info->column_list)
+ part_info->get_partition_id= get_partition_id_range_col;
+ else
+ part_info->get_partition_id= get_partition_id_range;
+ }
else if (part_info->part_type == LIST_PARTITION)
- part_info->get_partition_id= get_partition_id_list;
+ {
+ if (part_info->column_list)
+ part_info->get_partition_id= get_partition_id_list_col;
+ else
+ part_info->get_partition_id= get_partition_id_list;
+ }
else /* HASH partitioning */
{
if (part_info->list_of_part_fields)
@@ -1383,32 +1441,43 @@ static void set_up_partition_func_pointers(partition_info *part_info)
}
}
}
- if (part_info->full_part_charset_field_array)
- {
- DBUG_ASSERT(part_info->get_partition_id);
- part_info->get_partition_id_charset= part_info->get_partition_id;
- if (part_info->part_charset_field_array &&
- part_info->subpart_charset_field_array)
- part_info->get_partition_id= get_part_id_charset_func_all;
- else if (part_info->part_charset_field_array)
- part_info->get_partition_id= get_part_id_charset_func_part;
- else
- part_info->get_partition_id= get_part_id_charset_func_subpart;
- }
- if (part_info->part_charset_field_array &&
- part_info->is_sub_partitioned())
+ /*
+ We need special functions to handle character sets since they require copy
+ of field pointers and restore afterwards. For subpartitioned tables we do
+ the copy and restore individually on the part and subpart parts. For non-
+ subpartitioned tables we use the same functions as used for the parts part
+ of subpartioning.
+ Thus for subpartitioned tables the get_partition_id is always
+ get_partition_id_with_sub, even when character sets exists.
+ */
+ if (part_info->part_charset_field_array)
{
- DBUG_ASSERT(part_info->get_part_partition_id);
- part_info->get_part_partition_id_charset=
+ if (part_info->is_sub_partitioned())
+ {
+ DBUG_ASSERT(part_info->get_part_partition_id);
+ if (!part_info->column_list)
+ {
+ part_info->get_part_partition_id_charset=
part_info->get_part_partition_id;
- part_info->get_part_partition_id= get_part_part_id_charset_func;
+ part_info->get_part_partition_id= get_part_id_charset_func_part;
+ }
+ }
+ else
+ {
+ DBUG_ASSERT(part_info->get_partition_id);
+ if (!part_info->column_list)
+ {
+ part_info->get_part_partition_id_charset= part_info->get_partition_id;
+ part_info->get_part_partition_id= get_part_id_charset_func_part;
+ }
+ }
}
if (part_info->subpart_charset_field_array)
{
DBUG_ASSERT(part_info->get_subpartition_id);
part_info->get_subpartition_id_charset=
part_info->get_subpartition_id;
- part_info->get_subpartition_id= get_subpart_id_charset_func;
+ part_info->get_subpartition_id= get_part_id_charset_func_subpart;
}
DBUG_VOID_RETURN;
}
@@ -1416,22 +1485,22 @@ static void set_up_partition_func_pointers(partition_info *part_info)
/*
For linear hashing we need a mask which is on the form 2**n - 1 where
- 2**n >= no_parts. Thus if no_parts is 6 then mask is 2**3 - 1 = 8 - 1 = 7.
+ 2**n >= num_parts. Thus if num_parts is 6 then mask is 2**3 - 1 = 8 - 1 = 7.
SYNOPSIS
set_linear_hash_mask()
part_info Reference to partitioning data structure
- no_parts Number of parts in linear hash partitioning
+ num_parts Number of parts in linear hash partitioning
RETURN VALUE
NONE
*/
-void set_linear_hash_mask(partition_info *part_info, uint no_parts)
+void set_linear_hash_mask(partition_info *part_info, uint num_parts)
{
uint mask;
- for (mask= 1; mask < no_parts; mask<<=1)
+ for (mask= 1; mask < num_parts; mask<<=1)
;
part_info->linear_hash_mask= mask - 1;
}
@@ -1445,7 +1514,7 @@ void set_linear_hash_mask(partition_info *part_info, uint no_parts)
get_part_id_from_linear_hash()
hash_value Hash value calculated by HASH function or KEY function
mask Mask calculated previously by set_linear_hash_mask
- no_parts Number of partitions in HASH partitioned part
+ num_parts Number of partitions in HASH partitioned part
RETURN VALUE
part_id The calculated partition identity (starting at 0)
@@ -1458,11 +1527,11 @@ void set_linear_hash_mask(partition_info *part_info, uint no_parts)
*/
static uint32 get_part_id_from_linear_hash(longlong hash_value, uint mask,
- uint no_parts)
+ uint num_parts)
{
uint32 part_id= (uint32)(hash_value & mask);
- if (part_id >= no_parts)
+ if (part_id >= num_parts)
{
uint new_mask= ((mask + 1) >> 1) - 1;
part_id= (uint32)(hash_value & new_mask);
@@ -1606,7 +1675,7 @@ bool fix_partition_func(THD *thd, TABLE *table,
function is correct.
*/
if (part_info->linear_hash_ind)
- set_linear_hash_mask(part_info, part_info->no_subparts);
+ set_linear_hash_mask(part_info, part_info->num_subparts);
if (part_info->list_of_subpart_fields)
{
List_iterator<char> it(part_info->subpart_field_list);
@@ -1616,12 +1685,12 @@ bool fix_partition_func(THD *thd, TABLE *table,
else
{
if (unlikely(fix_fields_part_func(thd, part_info->subpart_expr,
- table, TRUE, TRUE)))
+ table, TRUE)))
goto end;
if (unlikely(part_info->subpart_expr->result_type() != INT_RESULT))
{
- my_error(ER_PARTITION_FUNC_NOT_ALLOWED_ERROR, MYF(0),
- "SUBPARTITION");
+ my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0),
+ subpart_str);
goto end;
}
}
@@ -1634,7 +1703,7 @@ bool fix_partition_func(THD *thd, TABLE *table,
if (part_info->part_type == HASH_PARTITION)
{
if (part_info->linear_hash_ind)
- set_linear_hash_mask(part_info, part_info->no_parts);
+ set_linear_hash_mask(part_info, part_info->num_parts);
if (part_info->list_of_part_fields)
{
List_iterator<char> it(part_info->part_field_list);
@@ -1644,32 +1713,43 @@ bool fix_partition_func(THD *thd, TABLE *table,
else
{
if (unlikely(fix_fields_part_func(thd, part_info->part_expr,
- table, FALSE, TRUE)))
+ table, FALSE)))
goto end;
if (unlikely(part_info->part_expr->result_type() != INT_RESULT))
{
- my_error(ER_PARTITION_FUNC_NOT_ALLOWED_ERROR, MYF(0), part_str);
+ my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0), part_str);
goto end;
}
part_info->part_result_type= INT_RESULT;
}
+ part_info->fixed= TRUE;
}
else
{
const char *error_str;
- if (unlikely(fix_fields_part_func(thd, part_info->part_expr,
- table, FALSE, TRUE)))
- goto end;
+ if (part_info->column_list)
+ {
+ List_iterator<char> it(part_info->part_field_list);
+ if (unlikely(handle_list_of_fields(it, table, part_info, FALSE)))
+ goto end;
+ }
+ else
+ {
+ if (unlikely(fix_fields_part_func(thd, part_info->part_expr,
+ table, FALSE)))
+ goto end;
+ }
+ part_info->fixed= TRUE;
if (part_info->part_type == RANGE_PARTITION)
{
error_str= partition_keywords[PKW_RANGE].str;
- if (unlikely(part_info->check_range_constants()))
+ if (unlikely(part_info->check_range_constants(thd)))
goto end;
}
else if (part_info->part_type == LIST_PARTITION)
{
error_str= partition_keywords[PKW_LIST].str;
- if (unlikely(part_info->check_list_constants()))
+ if (unlikely(part_info->check_list_constants(thd)))
goto end;
}
else
@@ -1678,12 +1758,13 @@ bool fix_partition_func(THD *thd, TABLE *table,
my_error(ER_INCONSISTENT_PARTITION_INFO_ERROR, MYF(0));
goto end;
}
- if (unlikely(part_info->no_parts < 1))
+ if (unlikely(part_info->num_parts < 1))
{
my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), error_str);
goto end;
}
- if (unlikely(part_info->part_expr->result_type() != INT_RESULT))
+ if (unlikely(!part_info->column_list &&
+ part_info->part_expr->result_type() != INT_RESULT))
{
my_error(ER_PARTITION_FUNC_NOT_ALLOWED_ERROR, MYF(0), part_str);
goto end;
@@ -1691,7 +1772,8 @@ bool fix_partition_func(THD *thd, TABLE *table,
}
if (((part_info->part_type != HASH_PARTITION ||
part_info->list_of_part_fields == FALSE) &&
- check_part_func_fields(part_info->part_field_array, TRUE)) ||
+ (!part_info->column_list &&
+ check_part_func_fields(part_info->part_field_array, TRUE))) ||
(part_info->list_of_subpart_fields == FALSE &&
part_info->is_sub_partitioned() &&
check_part_func_fields(part_info->subpart_field_array, TRUE)))
@@ -1714,6 +1796,11 @@ bool fix_partition_func(THD *thd, TABLE *table,
my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
goto end;
}
+ if (unlikely(part_info->check_partition_field_length()))
+ {
+ my_error(ER_PARTITION_FIELDS_TOO_LONG, MYF(0));
+ goto end;
+ }
check_range_capable_PF(table);
set_up_partition_key_maps(table, part_info);
set_up_partition_func_pointers(part_info);
@@ -1736,9 +1823,9 @@ end:
static int add_write(File fptr, const char *buf, uint len)
{
- uint len_written= my_write(fptr, (const uchar*)buf, len, MYF(0));
+ uint ret_code= my_write(fptr, (const uchar*)buf, len, MYF(MY_FNABP));
- if (likely(len == len_written))
+ if (likely(ret_code == 0))
return 0;
else
return 1;
@@ -1787,14 +1874,8 @@ static int add_begin_parenthesis(File fptr)
static int add_part_key_word(File fptr, const char *key_string)
{
int err= add_string(fptr, key_string);
-
err+= add_space(fptr);
- return err + add_begin_parenthesis(fptr);
-}
-
-static int add_hash(File fptr)
-{
- return add_part_key_word(fptr, partition_keywords[PKW_HASH].str);
+ return err;
}
static int add_partition(File fptr)
@@ -1825,16 +1906,16 @@ static int add_subpartition_by(File fptr)
return err + add_partition_by(fptr);
}
-static int add_key_partition(File fptr, List<char> field_list)
+static int add_part_field_list(File fptr, List<char> field_list)
{
- uint i, no_fields;
- int err;
+ uint i, num_fields;
+ int err= 0;
List_iterator<char> part_it(field_list);
- err= add_part_key_word(fptr, partition_keywords[PKW_KEY].str);
- no_fields= field_list.elements;
+ num_fields= field_list.elements;
i= 0;
- while (i < no_fields)
+ err+= add_begin_parenthesis(fptr);
+ while (i < num_fields)
{
const char *field_str= part_it++;
String field_string("", 0, system_charset_info);
@@ -1845,10 +1926,11 @@ static int add_key_partition(File fptr, List<char> field_list)
strlen(field_str));
thd->options= save_options;
err+= add_string_object(fptr, &field_string);
- if (i != (no_fields-1))
+ if (i != (num_fields-1))
err+= add_comma(fptr);
i++;
}
+ err+= add_end_parenthesis(fptr);
return err;
}
@@ -1958,37 +2040,269 @@ static int add_partition_options(File fptr, partition_element *p_elem)
return err + add_engine(fptr,p_elem->engine_type);
}
-static int add_partition_values(File fptr, partition_info *part_info, partition_element *p_elem)
+
+/*
+ Check partition fields for result type and if they need
+ to check the character set.
+
+ SYNOPSIS
+ check_part_field()
+ sql_type Type provided by user
+ field_name Name of field, used for error handling
+ result_type Out value: Result type of field
+ need_cs_check Out value: Do we need character set check
+
+ RETURN VALUES
+ TRUE Error
+ FALSE Ok
+*/
+
+static int check_part_field(enum_field_types sql_type,
+ const char *field_name,
+ Item_result *result_type,
+ bool *need_cs_check)
+{
+ if (sql_type >= MYSQL_TYPE_TINY_BLOB &&
+ sql_type <= MYSQL_TYPE_BLOB)
+ {
+ my_error(ER_BLOB_FIELD_IN_PART_FUNC_ERROR, MYF(0));
+ return TRUE;
+ }
+ switch (sql_type)
+ {
+ case MYSQL_TYPE_NEWDECIMAL:
+ case MYSQL_TYPE_DECIMAL:
+ case MYSQL_TYPE_TINY:
+ case MYSQL_TYPE_SHORT:
+ case MYSQL_TYPE_LONG:
+ case MYSQL_TYPE_LONGLONG:
+ case MYSQL_TYPE_INT24:
+ *result_type= INT_RESULT;
+ *need_cs_check= FALSE;
+ return FALSE;
+ case MYSQL_TYPE_NEWDATE:
+ case MYSQL_TYPE_DATE:
+ case MYSQL_TYPE_TIME:
+ case MYSQL_TYPE_DATETIME:
+ *result_type= STRING_RESULT;
+ *need_cs_check= TRUE;
+ return FALSE;
+ case MYSQL_TYPE_VARCHAR:
+ case MYSQL_TYPE_STRING:
+ case MYSQL_TYPE_VAR_STRING:
+ *result_type= STRING_RESULT;
+ *need_cs_check= TRUE;
+ return FALSE;
+ case MYSQL_TYPE_TIMESTAMP:
+ case MYSQL_TYPE_NULL:
+ case MYSQL_TYPE_FLOAT:
+ case MYSQL_TYPE_DOUBLE:
+ case MYSQL_TYPE_BIT:
+ case MYSQL_TYPE_ENUM:
+ case MYSQL_TYPE_SET:
+ case MYSQL_TYPE_GEOMETRY:
+ goto error;
+ default:
+ goto error;
+ }
+error:
+ my_error(ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD, MYF(0),
+ field_name);
+ return TRUE;
+}
+
+
+/*
+ Find the given field's Create_field object using name of field
+
+ SYNOPSIS
+ get_sql_field()
+ field_name Field name
+ alter_info Info from ALTER TABLE/CREATE TABLE
+
+ RETURN VALUE
+ sql_field Object filled in by parser about field
+ NULL No field found
+*/
+
+static Create_field* get_sql_field(char *field_name,
+ Alter_info *alter_info)
+{
+ List_iterator<Create_field> it(alter_info->create_list);
+ Create_field *sql_field;
+ DBUG_ENTER("get_sql_field");
+
+ while ((sql_field= it++))
+ {
+ if (!(my_strcasecmp(system_charset_info,
+ sql_field->field_name,
+ field_name)))
+ {
+ DBUG_RETURN(sql_field);
+ }
+ }
+ DBUG_RETURN(NULL);
+}
+
+
+static int add_column_list_values(File fptr, partition_info *part_info,
+ part_elem_value *list_value,
+ HA_CREATE_INFO *create_info,
+ Alter_info *alter_info)
+{
+ int err= 0;
+ uint i;
+ List_iterator<char> it(part_info->part_field_list);
+ uint num_elements= part_info->part_field_list.elements;
+ bool use_parenthesis= (part_info->part_type == LIST_PARTITION &&
+ part_info->num_columns > 1U);
+
+ if (use_parenthesis)
+ err+= add_begin_parenthesis(fptr);
+ for (i= 0; i < num_elements; i++)
+ {
+ part_column_list_val *col_val= &list_value->col_val_array[i];
+ char *field_name= it++;
+ if (col_val->max_value)
+ err+= add_string(fptr, partition_keywords[PKW_MAXVALUE].str);
+ else if (col_val->null_value)
+ err+= add_string(fptr, "NULL");
+ else
+ {
+ char buffer[MAX_KEY_LENGTH];
+ String str(buffer, sizeof(buffer), &my_charset_bin);
+ Item *item_expr= col_val->item_expression;
+ if (item_expr->null_value)
+ err+= add_string(fptr, "NULL");
+ else
+ {
+ String *res;
+ CHARSET_INFO *field_cs;
+ bool need_cs_check= FALSE;
+ Item_result result_type= STRING_RESULT;
+
+ /*
+ This function is called at a very early stage, even before
+ we have prepared the sql_field objects. Thus we have to
+ find the proper sql_field object and get the character set
+ from that object.
+ */
+ if (create_info)
+ {
+ Create_field *sql_field;
+
+ if (!(sql_field= get_sql_field(field_name,
+ alter_info)))
+ {
+ my_error(ER_FIELD_NOT_FOUND_PART_ERROR, MYF(0));
+ return 1;
+ }
+ if (check_part_field(sql_field->sql_type,
+ sql_field->field_name,
+ &result_type,
+ &need_cs_check))
+ return 1;
+ if (need_cs_check)
+ field_cs= get_sql_field_charset(sql_field, create_info);
+ else
+ field_cs= NULL;
+ }
+ else
+ {
+ Field *field= part_info->part_field_array[i];
+ result_type= field->result_type();
+ if (check_part_field(field->real_type(),
+ field->field_name,
+ &result_type,
+ &need_cs_check))
+ return 1;
+ DBUG_ASSERT(result_type == field->result_type());
+ if (need_cs_check)
+ field_cs= field->charset();
+ else
+ field_cs= NULL;
+ }
+ if (result_type != item_expr->result_type())
+ {
+ my_error(ER_WRONG_TYPE_COLUMN_VALUE_ERROR, MYF(0));
+ return 1;
+ }
+ if (field_cs && field_cs != item_expr->collation.collation)
+ {
+ if (!(item_expr= convert_charset_partition_constant(item_expr,
+ field_cs)))
+ {
+ my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
+ return 1;
+ }
+ }
+ {
+ String val_conv;
+ val_conv.set_charset(system_charset_info);
+ res= item_expr->val_str(&str);
+ if (get_cs_converted_part_value_from_string(current_thd,
+ item_expr, res,
+ &val_conv, field_cs,
+ (bool)(alter_info != NULL)))
+ return 1;
+ err+= add_string_object(fptr, &val_conv);
+ }
+ }
+ }
+ if (i != (num_elements - 1))
+ err+= add_string(fptr, comma_str);
+ }
+ if (use_parenthesis)
+ err+= add_end_parenthesis(fptr);
+ return err;
+}
+
+static int add_partition_values(File fptr, partition_info *part_info,
+ partition_element *p_elem,
+ HA_CREATE_INFO *create_info,
+ Alter_info *alter_info)
{
int err= 0;
if (part_info->part_type == RANGE_PARTITION)
{
err+= add_string(fptr, " VALUES LESS THAN ");
- if (!p_elem->max_value)
+ if (part_info->column_list)
{
+ List_iterator<part_elem_value> list_val_it(p_elem->list_val_list);
+ part_elem_value *list_value= list_val_it++;
err+= add_begin_parenthesis(fptr);
- if (p_elem->signed_flag)
- err+= add_int(fptr, p_elem->range_value);
- else
- err+= add_uint(fptr, p_elem->range_value);
+ err+= add_column_list_values(fptr, part_info, list_value,
+ create_info, alter_info);
err+= add_end_parenthesis(fptr);
}
else
- err+= add_string(fptr, partition_keywords[PKW_MAXVALUE].str);
+ {
+ if (!p_elem->max_value)
+ {
+ err+= add_begin_parenthesis(fptr);
+ if (p_elem->signed_flag)
+ err+= add_int(fptr, p_elem->range_value);
+ else
+ err+= add_uint(fptr, p_elem->range_value);
+ err+= add_end_parenthesis(fptr);
+ }
+ else
+ err+= add_string(fptr, partition_keywords[PKW_MAXVALUE].str);
+ }
}
else if (part_info->part_type == LIST_PARTITION)
{
uint i;
List_iterator<part_elem_value> list_val_it(p_elem->list_val_list);
err+= add_string(fptr, " VALUES IN ");
- uint no_items= p_elem->list_val_list.elements;
+ uint num_items= p_elem->list_val_list.elements;
err+= add_begin_parenthesis(fptr);
if (p_elem->has_null_value)
{
err+= add_string(fptr, "NULL");
- if (no_items == 0)
+ if (num_items == 0)
{
err+= add_end_parenthesis(fptr);
goto end;
@@ -2000,13 +2314,19 @@ static int add_partition_values(File fptr, partition_info *part_info, partition_
{
part_elem_value *list_value= list_val_it++;
- if (!list_value->unsigned_flag)
- err+= add_int(fptr, list_value->value);
+ if (part_info->column_list)
+ err+= add_column_list_values(fptr, part_info, list_value,
+ create_info, alter_info);
else
- err+= add_uint(fptr, list_value->value);
- if (i != (no_items-1))
+ {
+ if (!list_value->unsigned_flag)
+ err+= add_int(fptr, list_value->value);
+ else
+ err+= add_uint(fptr, list_value->value);
+ }
+ if (i != (num_items-1))
err+= add_comma(fptr);
- } while (++i < no_items);
+ } while (++i < num_items);
err+= add_end_parenthesis(fptr);
}
end:
@@ -2025,6 +2345,8 @@ end:
use_sql_alloc Allocate buffer from sql_alloc if true
otherwise use my_malloc
show_partition_options Should we display partition options
+ create_info Info generated by parser
+ alter_info Info generated by parser
RETURN VALUES
NULL error
@@ -2053,9 +2375,11 @@ end:
char *generate_partition_syntax(partition_info *part_info,
uint *buf_length,
bool use_sql_alloc,
- bool show_partition_options)
+ bool show_partition_options,
+ HA_CREATE_INFO *create_info,
+ Alter_info *alter_info)
{
- uint i,j, tot_no_parts, no_subparts;
+ uint i,j, tot_num_parts, num_subparts;
partition_element *part_elem;
ulonglong buffer_length;
char path[FN_REFLEN];
@@ -2086,9 +2410,12 @@ char *generate_partition_syntax(partition_info *part_info,
if (part_info->linear_hash_ind)
err+= add_string(fptr, partition_keywords[PKW_LINEAR].str);
if (part_info->list_of_part_fields)
- err+= add_key_partition(fptr, part_info->part_field_list);
+ {
+ err+= add_part_key_word(fptr, partition_keywords[PKW_KEY].str);
+ err+= add_part_field_list(fptr, part_info->part_field_list);
+ }
else
- err+= add_hash(fptr);
+ err+= add_part_key_word(fptr, partition_keywords[PKW_HASH].str);
break;
default:
DBUG_ASSERT(0);
@@ -2098,15 +2425,23 @@ char *generate_partition_syntax(partition_info *part_info,
DBUG_RETURN(NULL);
}
if (part_info->part_expr)
+ {
+ err+= add_begin_parenthesis(fptr);
err+= add_string_len(fptr, part_info->part_func_string,
part_info->part_func_len);
- err+= add_end_parenthesis(fptr);
- if ((!part_info->use_default_no_partitions) &&
+ err+= add_end_parenthesis(fptr);
+ }
+ else if (part_info->column_list)
+ {
+ err+= add_string(fptr, partition_keywords[PKW_COLUMNS].str);
+ err+= add_part_field_list(fptr, part_info->part_field_list);
+ }
+ if ((!part_info->use_default_num_partitions) &&
part_info->use_default_partitions)
{
err+= add_string(fptr, "\n");
err+= add_string(fptr, "PARTITIONS ");
- err+= add_int(fptr, part_info->no_parts);
+ err+= add_int(fptr, part_info->num_parts);
}
if (part_info->is_sub_partitioned())
{
@@ -2116,23 +2451,29 @@ char *generate_partition_syntax(partition_info *part_info,
if (part_info->linear_hash_ind)
err+= add_string(fptr, partition_keywords[PKW_LINEAR].str);
if (part_info->list_of_subpart_fields)
- err+= add_key_partition(fptr, part_info->subpart_field_list);
+ {
+ add_part_key_word(fptr, partition_keywords[PKW_KEY].str);
+ add_part_field_list(fptr, part_info->subpart_field_list);
+ }
else
- err+= add_hash(fptr);
+ err+= add_part_key_word(fptr, partition_keywords[PKW_HASH].str);
if (part_info->subpart_expr)
+ {
+ err+= add_begin_parenthesis(fptr);
err+= add_string_len(fptr, part_info->subpart_func_string,
part_info->subpart_func_len);
- err+= add_end_parenthesis(fptr);
- if ((!part_info->use_default_no_subpartitions) &&
+ err+= add_end_parenthesis(fptr);
+ }
+ if ((!part_info->use_default_num_subpartitions) &&
part_info->use_default_subpartitions)
{
err+= add_string(fptr, "\n");
err+= add_string(fptr, "SUBPARTITIONS ");
- err+= add_int(fptr, part_info->no_subparts);
+ err+= add_int(fptr, part_info->num_subparts);
}
}
- tot_no_parts= part_info->partitions.elements;
- no_subparts= part_info->no_subparts;
+ tot_num_parts= part_info->partitions.elements;
+ num_subparts= part_info->num_subparts;
if (!part_info->use_default_partitions)
{
@@ -2155,7 +2496,8 @@ char *generate_partition_syntax(partition_info *part_info,
first= FALSE;
err+= add_partition(fptr);
err+= add_name_string(fptr, part_elem->partition_name);
- err+= add_partition_values(fptr, part_info, part_elem);
+ err+= add_partition_values(fptr, part_info, part_elem,
+ create_info, alter_info);
if (!part_info->is_sub_partitioned() ||
part_info->use_default_subpartitions)
{
@@ -2176,7 +2518,7 @@ char *generate_partition_syntax(partition_info *part_info,
err+= add_name_string(fptr, part_elem->partition_name);
if (show_partition_options)
err+= add_partition_options(fptr, part_elem);
- if (j != (no_subparts-1))
+ if (j != (num_subparts-1))
{
err+= add_comma(fptr);
err+= add_string(fptr, "\n");
@@ -2185,12 +2527,12 @@ char *generate_partition_syntax(partition_info *part_info,
}
else
err+= add_end_parenthesis(fptr);
- } while (++j < no_subparts);
+ } while (++j < num_subparts);
}
}
- if (i == (tot_no_parts-1))
+ if (i == (tot_num_parts-1))
err+= add_end_parenthesis(fptr);
- } while (++i < tot_no_parts);
+ } while (++i < tot_num_parts);
}
if (err)
goto close_file;
@@ -2337,14 +2679,14 @@ static uint32 calculate_key_value(Field **field_array)
get_part_id_for_sub()
loc_part_id Local partition id
sub_part_id Subpartition id
- no_subparts Number of subparts
+ num_subparts Number of subparts
*/
inline
static uint32 get_part_id_for_sub(uint32 loc_part_id, uint32 sub_part_id,
- uint no_subparts)
+ uint num_subparts)
{
- return (uint32)((loc_part_id * no_subparts) + sub_part_id);
+ return (uint32)((loc_part_id * num_subparts) + sub_part_id);
}
@@ -2353,7 +2695,7 @@ static uint32 get_part_id_for_sub(uint32 loc_part_id, uint32 sub_part_id,
SYNOPSIS
get_part_id_hash()
- no_parts Number of hash partitions
+ num_parts Number of hash partitions
part_expr Item tree of hash function
out:part_id The returned partition id
out:func_value Value of hash function
@@ -2363,7 +2705,7 @@ static uint32 get_part_id_for_sub(uint32 loc_part_id, uint32 sub_part_id,
FALSE Success
*/
-static int get_part_id_hash(uint no_parts,
+static int get_part_id_hash(uint num_parts,
Item *part_expr,
uint32 *part_id,
longlong *func_value)
@@ -2374,7 +2716,7 @@ static int get_part_id_hash(uint no_parts,
if (part_val_int(part_expr, func_value))
DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
- int_hash_id= *func_value % no_parts;
+ int_hash_id= *func_value % num_parts;
*part_id= int_hash_id < 0 ? (uint32) -int_hash_id : (uint32) int_hash_id;
DBUG_RETURN(FALSE);
@@ -2388,7 +2730,7 @@ static int get_part_id_hash(uint no_parts,
get_part_id_linear_hash()
part_info A reference to the partition_info struct where all the
desired information is given
- no_parts Number of hash partitions
+ num_parts Number of hash partitions
part_expr Item tree of hash function
out:part_id The returned partition id
out:func_value Value of hash function
@@ -2399,7 +2741,7 @@ static int get_part_id_hash(uint no_parts,
*/
static int get_part_id_linear_hash(partition_info *part_info,
- uint no_parts,
+ uint num_parts,
Item *part_expr,
uint32 *part_id,
longlong *func_value)
@@ -2411,7 +2753,7 @@ static int get_part_id_linear_hash(partition_info *part_info,
*part_id= get_part_id_from_linear_hash(*func_value,
part_info->linear_hash_mask,
- no_parts);
+ num_parts);
DBUG_RETURN(FALSE);
}
@@ -2422,7 +2764,7 @@ static int get_part_id_linear_hash(partition_info *part_info,
SYNOPSIS
get_part_id_key()
field_array Array of fields for PARTTION KEY
- no_parts Number of KEY partitions
+ num_parts Number of KEY partitions
RETURN VALUE
Calculated partition id
@@ -2430,12 +2772,12 @@ static int get_part_id_linear_hash(partition_info *part_info,
inline
static uint32 get_part_id_key(Field **field_array,
- uint no_parts,
+ uint num_parts,
longlong *func_value)
{
DBUG_ENTER("get_part_id_key");
*func_value= calculate_key_value(field_array);
- DBUG_RETURN((uint32) (*func_value % no_parts));
+ DBUG_RETURN((uint32) (*func_value % num_parts));
}
@@ -2447,7 +2789,7 @@ static uint32 get_part_id_key(Field **field_array,
part_info A reference to the partition_info struct where all the
desired information is given
field_array Array of fields for PARTTION KEY
- no_parts Number of KEY partitions
+ num_parts Number of KEY partitions
RETURN VALUE
Calculated partition id
@@ -2456,15 +2798,15 @@ static uint32 get_part_id_key(Field **field_array,
inline
static uint32 get_part_id_linear_key(partition_info *part_info,
Field **field_array,
- uint no_parts,
+ uint num_parts,
longlong *func_value)
{
- DBUG_ENTER("get_partition_id_linear_key");
+ DBUG_ENTER("get_part_id_linear_key");
*func_value= calculate_key_value(field_array);
DBUG_RETURN(get_part_id_from_linear_hash(*func_value,
part_info->linear_hash_mask,
- no_parts));
+ num_parts));
}
/*
@@ -2498,7 +2840,8 @@ static void copy_to_part_field_buffers(Field **ptr,
if (!field->maybe_null() || !field->is_null())
{
CHARSET_INFO *cs= ((Field_str*)field)->charset();
- uint len= field->pack_length();
+ uint max_len= field->pack_length();
+ uint data_len= field->data_length();
uchar *field_buf= *field_bufs;
/*
We only use the field buffer for VARCHAR and CHAR strings
@@ -2510,17 +2853,17 @@ static void copy_to_part_field_buffers(Field **ptr,
if (field->type() == MYSQL_TYPE_VARCHAR)
{
uint len_bytes= ((Field_varstring*)field)->length_bytes;
- my_strnxfrm(cs, field_buf + len_bytes, (len - len_bytes),
- field->ptr + len_bytes, field->field_length);
+ my_strnxfrm(cs, field_buf + len_bytes, max_len,
+ field->ptr + len_bytes, data_len);
if (len_bytes == 1)
- *field_buf= (uchar) field->field_length;
+ *field_buf= (uchar) data_len;
else
- int2store(field_buf, field->field_length);
+ int2store(field_buf, data_len);
}
else
{
- my_strnxfrm(cs, field_buf, len,
- field->ptr, field->field_length);
+ my_strnxfrm(cs, field_buf, max_len,
+ field->ptr, max_len);
}
field->ptr= field_buf;
}
@@ -2550,6 +2893,44 @@ static void restore_part_field_pointers(Field **ptr, uchar **restore_ptr)
return;
}
+/*
+ This function is used to calculate the partition id where all partition
+ fields have been prepared to point to a record where the partition field
+ values are bound.
+
+ SYNOPSIS
+ get_partition_id()
+ part_info A reference to the partition_info struct where all the
+ desired information is given
+ out:part_id The partition id is returned through this pointer
+ out:func_value Value of partition function (longlong)
+
+ RETURN VALUE
+ part_id Partition id of partition that would contain
+ row with given values of PF-fields
+ HA_ERR_NO_PARTITION_FOUND The fields of the partition function didn't
+ fit into any partition and thus the values of
+ the PF-fields are not allowed.
+
+ DESCRIPTION
+ A routine used from write_row, update_row and delete_row from any
+ handler supporting partitioning. It is also a support routine for
+ get_partition_set used to find the set of partitions needed to scan
+ for a certain index scan or full table scan.
+
+ It is actually 9 different variants of this function which are called
+ through a function pointer.
+
+ get_partition_id_list
+ get_partition_id_list_col
+ get_partition_id_range
+ get_partition_id_range_col
+ get_partition_id_hash_nosub
+ get_partition_id_key_nosub
+ get_partition_id_linear_hash_nosub
+ get_partition_id_linear_key_nosub
+ get_partition_id_with_sub
+*/
/*
This function is used to calculate the main partition to use in the case of
@@ -2571,67 +2952,26 @@ static void restore_part_field_pointers(Field **ptr, uchar **restore_ptr)
DESCRIPTION
- It is actually 6 different variants of this function which are called
+ It is actually 8 different variants of this function which are called
through a function pointer.
get_partition_id_list
+ get_partition_id_list_col
get_partition_id_range
+ get_partition_id_range_col
get_partition_id_hash_nosub
get_partition_id_key_nosub
get_partition_id_linear_hash_nosub
get_partition_id_linear_key_nosub
*/
-static int get_part_id_charset_func_subpart(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- int res;
- copy_to_part_field_buffers(part_info->subpart_charset_field_array,
- part_info->subpart_field_buffers,
- part_info->restore_subpart_field_ptrs);
- res= part_info->get_partition_id_charset(part_info, part_id, func_value);
- restore_part_field_pointers(part_info->subpart_charset_field_array,
- part_info->restore_subpart_field_ptrs);
- return res;
-}
-
-
static int get_part_id_charset_func_part(partition_info *part_info,
uint32 *part_id,
longlong *func_value)
{
int res;
- copy_to_part_field_buffers(part_info->part_charset_field_array,
- part_info->part_field_buffers,
- part_info->restore_part_field_ptrs);
- res= part_info->get_partition_id_charset(part_info, part_id, func_value);
- restore_part_field_pointers(part_info->part_charset_field_array,
- part_info->restore_part_field_ptrs);
- return res;
-}
-
-
-static int get_part_id_charset_func_all(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- int res;
- copy_to_part_field_buffers(part_info->full_part_field_array,
- part_info->full_part_field_buffers,
- part_info->restore_full_part_field_ptrs);
- res= part_info->get_partition_id_charset(part_info, part_id, func_value);
- restore_part_field_pointers(part_info->full_part_field_array,
- part_info->restore_full_part_field_ptrs);
- return res;
-}
-
+ DBUG_ENTER("get_part_id_charset_func_part");
-static int get_part_part_id_charset_func(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- int res;
copy_to_part_field_buffers(part_info->part_charset_field_array,
part_info->part_field_buffers,
part_info->restore_part_field_ptrs);
@@ -2639,21 +2979,58 @@ static int get_part_part_id_charset_func(partition_info *part_info,
part_id, func_value);
restore_part_field_pointers(part_info->part_charset_field_array,
part_info->restore_part_field_ptrs);
- return res;
+ DBUG_RETURN(res);
}
-static int get_subpart_id_charset_func(partition_info *part_info,
- uint32 *part_id)
+static int get_part_id_charset_func_subpart(partition_info *part_info,
+ uint32 *part_id)
{
int res;
+ DBUG_ENTER("get_part_id_charset_func_subpart");
+
copy_to_part_field_buffers(part_info->subpart_charset_field_array,
part_info->subpart_field_buffers,
part_info->restore_subpart_field_ptrs);
res= part_info->get_subpartition_id_charset(part_info, part_id);
restore_part_field_pointers(part_info->subpart_charset_field_array,
part_info->restore_subpart_field_ptrs);
- return res;
+ DBUG_RETURN(res);
+}
+
+int get_partition_id_list_col(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ part_column_list_val *list_col_array= part_info->list_col_array;
+ uint num_columns= part_info->part_field_list.elements;
+ int list_index, cmp;
+ int min_list_index= 0;
+ int max_list_index= part_info->num_list_values - 1;
+ DBUG_ENTER("get_partition_id_list_col");
+
+ while (max_list_index >= min_list_index)
+ {
+ list_index= (max_list_index + min_list_index) >> 1;
+ cmp= cmp_rec_and_tuple(list_col_array + list_index*num_columns,
+ num_columns);
+ if (cmp > 0)
+ min_list_index= list_index + 1;
+ else if (cmp < 0)
+ {
+ if (!list_index)
+ goto notfound;
+ max_list_index= list_index - 1;
+ }
+ else
+ {
+ *part_id= (uint32)list_col_array[list_index].partition_id;
+ DBUG_RETURN(0);
+ }
+ }
+notfound:
+ *part_id= 0;
+ DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
}
@@ -2664,7 +3041,7 @@ int get_partition_id_list(partition_info *part_info,
LIST_PART_ENTRY *list_array= part_info->list_array;
int list_index;
int min_list_index= 0;
- int max_list_index= part_info->no_list_values - 1;
+ int max_list_index= part_info->num_list_values - 1;
longlong part_func_value;
int error= part_val_int(part_info->part_expr, &part_func_value);
longlong list_value;
@@ -2734,7 +3111,7 @@ notfound:
index idx.
The function returns first number idx, such that
list_array[idx].list_value is NOT contained within the passed interval.
- If all array elements are contained, part_info->no_list_values is
+ If all array elements are contained, part_info->num_list_values is
returned.
NOTE
@@ -2748,6 +3125,44 @@ notfound:
The edge of corresponding sub-array of part_info->list_array
*/
+uint32 get_partition_id_cols_list_for_endpoint(partition_info *part_info,
+ bool left_endpoint,
+ bool include_endpoint,
+ uint32 nparts)
+{
+ part_column_list_val *list_col_array= part_info->list_col_array;
+ uint num_columns= part_info->part_field_list.elements;
+ int list_index, cmp;
+ uint min_list_index= 0;
+ uint max_list_index= part_info->num_list_values - 1;
+ bool tailf= !(left_endpoint ^ include_endpoint);
+ DBUG_ENTER("get_partition_id_cols_list_for_endpoint");
+
+ do
+ {
+ list_index= (max_list_index + min_list_index) >> 1;
+ cmp= cmp_rec_and_tuple_prune(list_col_array + list_index*num_columns,
+ nparts, tailf);
+ if (cmp > 0)
+ min_list_index= list_index + 1;
+ else if (cmp < 0)
+ {
+ if (!list_index)
+ goto notfound;
+ max_list_index= list_index - 1;
+ }
+ else
+ {
+ DBUG_RETURN(list_index + test(!tailf));
+ }
+ } while (max_list_index >= min_list_index);
+ if (cmp > 0)
+ list_index++;
+notfound:
+ DBUG_RETURN(list_index);
+}
+
+
uint32 get_list_array_idx_for_endpoint_charset(partition_info *part_info,
bool left_endpoint,
bool include_endpoint)
@@ -2769,7 +3184,7 @@ uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
{
LIST_PART_ENTRY *list_array= part_info->list_array;
uint list_index;
- uint min_list_index= 0, max_list_index= part_info->no_list_values - 1;
+ uint min_list_index= 0, max_list_index= part_info->num_list_values - 1;
longlong list_value;
/* Get the partitioning function value for the endpoint */
longlong part_func_value=
@@ -2799,7 +3214,7 @@ uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
if (unsigned_flag)
part_func_value-= 0x8000000000000000ULL;
- DBUG_ASSERT(part_info->no_list_values);
+ DBUG_ASSERT(part_info->num_list_values);
do
{
list_index= (max_list_index + min_list_index) >> 1;
@@ -2824,12 +3239,49 @@ notfound:
}
+int get_partition_id_range_col(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
+{
+ part_column_list_val *range_col_array= part_info->range_col_array;
+ uint num_columns= part_info->part_field_list.elements;
+ uint max_partition= part_info->num_parts - 1;
+ uint min_part_id= 0;
+ uint max_part_id= max_partition;
+ uint loc_part_id;
+ DBUG_ENTER("get_partition_id_range_col");
+
+ while (max_part_id > min_part_id)
+ {
+ loc_part_id= (max_part_id + min_part_id + 1) >> 1;
+ if (cmp_rec_and_tuple(range_col_array + loc_part_id*num_columns,
+ num_columns) >= 0)
+ min_part_id= loc_part_id + 1;
+ else
+ max_part_id= loc_part_id - 1;
+ }
+ loc_part_id= max_part_id;
+ if (loc_part_id != max_partition)
+ if (cmp_rec_and_tuple(range_col_array + loc_part_id*num_columns,
+ num_columns) >= 0)
+ loc_part_id++;
+ *part_id= (uint32)loc_part_id;
+ if (loc_part_id == max_partition &&
+ (cmp_rec_and_tuple(range_col_array + loc_part_id*num_columns,
+ num_columns) >= 0))
+ DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+
+ DBUG_PRINT("exit",("partition: %d", *part_id));
+ DBUG_RETURN(0);
+}
+
+
int get_partition_id_range(partition_info *part_info,
uint32 *part_id,
longlong *func_value)
{
longlong *range_array= part_info->range_int_array;
- uint max_partition= part_info->no_parts - 1;
+ uint max_partition= part_info->num_parts - 1;
uint min_part_id= 0;
uint max_part_id= max_partition;
uint loc_part_id;
@@ -2906,7 +3358,7 @@ int get_partition_id_range(partition_info *part_info,
represented by range_int_array[idx] has EMPTY intersection with the
passed interval.
If the interval represented by the last array element has non-empty
- intersection with the passed interval, part_info->no_parts is
+ intersection with the passed interval, part_info->num_parts is
returned.
RETURN
@@ -2934,7 +3386,7 @@ uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
bool include_endpoint)
{
longlong *range_array= part_info->range_int_array;
- uint max_partition= part_info->no_parts - 1;
+ uint max_partition= part_info->num_parts - 1;
uint min_part_id= 0, max_part_id= max_partition, loc_part_id;
/* Get the partitioning function value for the endpoint */
longlong part_func_value=
@@ -3017,7 +3469,7 @@ int get_partition_id_hash_nosub(partition_info *part_info,
uint32 *part_id,
longlong *func_value)
{
- return get_part_id_hash(part_info->no_parts, part_info->part_expr,
+ return get_part_id_hash(part_info->num_parts, part_info->part_expr,
part_id, func_value);
}
@@ -3026,7 +3478,7 @@ int get_partition_id_linear_hash_nosub(partition_info *part_info,
uint32 *part_id,
longlong *func_value)
{
- return get_part_id_linear_hash(part_info, part_info->no_parts,
+ return get_part_id_linear_hash(part_info, part_info->num_parts,
part_info->part_expr, part_id, func_value);
}
@@ -3036,232 +3488,44 @@ int get_partition_id_key_nosub(partition_info *part_info,
longlong *func_value)
{
*part_id= get_part_id_key(part_info->part_field_array,
- part_info->no_parts, func_value);
+ part_info->num_parts, func_value);
return 0;
}
int get_partition_id_linear_key_nosub(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
+ uint32 *part_id,
+ longlong *func_value)
{
*part_id= get_part_id_linear_key(part_info,
part_info->part_field_array,
- part_info->no_parts, func_value);
+ part_info->num_parts, func_value);
return 0;
}
-int get_partition_id_range_sub_hash(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- uint32 loc_part_id, sub_part_id;
- uint no_subparts;
- longlong local_func_value;
- int error;
- DBUG_ENTER("get_partition_id_range_sub_hash");
- LINT_INIT(loc_part_id);
- LINT_INIT(sub_part_id);
-
- if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
- func_value))))
- {
- DBUG_RETURN(error);
- }
- no_subparts= part_info->no_subparts;
- if (unlikely((error= get_part_id_hash(no_subparts, part_info->subpart_expr,
- &sub_part_id, &local_func_value))))
- {
- DBUG_RETURN(error);
- }
-
- *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(0);
-}
-
-
-int get_partition_id_range_sub_linear_hash(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- uint32 loc_part_id, sub_part_id;
- uint no_subparts;
- longlong local_func_value;
- int error;
- DBUG_ENTER("get_partition_id_range_sub_linear_hash");
- LINT_INIT(loc_part_id);
- LINT_INIT(sub_part_id);
-
- if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
- func_value))))
- {
- DBUG_RETURN(error);
- }
- no_subparts= part_info->no_subparts;
- if (unlikely((error= get_part_id_linear_hash(part_info, no_subparts,
- part_info->subpart_expr,
- &sub_part_id,
- &local_func_value))))
- {
- DBUG_RETURN(error);
- }
-
- *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(0);
-}
-
-
-int get_partition_id_range_sub_key(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- uint32 loc_part_id, sub_part_id;
- uint no_subparts;
- longlong local_func_value;
- int error;
- DBUG_ENTER("get_partition_id_range_sub_key");
- LINT_INIT(loc_part_id);
-
- if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
- func_value))))
- {
- DBUG_RETURN(error);
- }
- no_subparts= part_info->no_subparts;
- sub_part_id= get_part_id_key(part_info->subpart_field_array,
- no_subparts, &local_func_value);
- *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(0);
-}
-
-
-int get_partition_id_range_sub_linear_key(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- uint32 loc_part_id, sub_part_id;
- uint no_subparts;
- longlong local_func_value;
- int error;
- DBUG_ENTER("get_partition_id_range_sub_linear_key");
- LINT_INIT(loc_part_id);
-
- if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
- func_value))))
- {
- DBUG_RETURN(error);
- }
- no_subparts= part_info->no_subparts;
- sub_part_id= get_part_id_linear_key(part_info,
- part_info->subpart_field_array,
- no_subparts, &local_func_value);
- *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(0);
-}
-
-
-int get_partition_id_list_sub_hash(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- uint32 loc_part_id, sub_part_id;
- uint no_subparts;
- longlong local_func_value;
- int error;
- DBUG_ENTER("get_partition_id_list_sub_hash");
- LINT_INIT(sub_part_id);
-
- if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
- func_value))))
- {
- DBUG_RETURN(error);
- }
- no_subparts= part_info->no_subparts;
- if (unlikely((error= get_part_id_hash(no_subparts, part_info->subpart_expr,
- &sub_part_id, &local_func_value))))
- {
- DBUG_RETURN(error);
- }
-
- *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(0);
-}
-
-
-int get_partition_id_list_sub_linear_hash(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- uint32 loc_part_id, sub_part_id;
- uint no_subparts;
- longlong local_func_value;
- int error;
- DBUG_ENTER("get_partition_id_list_sub_linear_hash");
- LINT_INIT(sub_part_id);
-
- if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
- func_value))))
- {
- DBUG_RETURN(error);
- }
- no_subparts= part_info->no_subparts;
- if (unlikely((error= get_part_id_linear_hash(part_info, no_subparts,
- part_info->subpart_expr,
- &sub_part_id,
- &local_func_value))))
- {
- DBUG_RETURN(error);
- }
-
- *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(0);
-}
-
-
-int get_partition_id_list_sub_key(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
+int get_partition_id_with_sub(partition_info *part_info,
+ uint32 *part_id,
+ longlong *func_value)
{
uint32 loc_part_id, sub_part_id;
- uint no_subparts;
- longlong local_func_value;
+ uint num_subparts;
int error;
- DBUG_ENTER("get_partition_id_range_sub_key");
+ DBUG_ENTER("get_partition_id_with_sub");
- if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
- func_value))))
+ if (unlikely((error= part_info->get_part_partition_id(part_info,
+ &loc_part_id,
+ func_value))))
{
DBUG_RETURN(error);
}
- no_subparts= part_info->no_subparts;
- sub_part_id= get_part_id_key(part_info->subpart_field_array,
- no_subparts, &local_func_value);
- *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(0);
-}
-
-
-int get_partition_id_list_sub_linear_key(partition_info *part_info,
- uint32 *part_id,
- longlong *func_value)
-{
- uint32 loc_part_id, sub_part_id;
- uint no_subparts;
- longlong local_func_value;
- int error;
- DBUG_ENTER("get_partition_id_list_sub_linear_key");
-
- if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
- func_value))))
+ num_subparts= part_info->num_subparts;
+ if (unlikely((error= part_info->get_subpartition_id(part_info,
+ &sub_part_id))))
{
DBUG_RETURN(error);
- }
- no_subparts= part_info->no_subparts;
- sub_part_id= get_part_id_linear_key(part_info,
- part_info->subpart_field_array,
- no_subparts, &local_func_value);
- *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
+ }
+ *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, num_subparts);
DBUG_RETURN(0);
}
@@ -3294,7 +3558,7 @@ int get_partition_id_hash_sub(partition_info *part_info,
uint32 *part_id)
{
longlong func_value;
- return get_part_id_hash(part_info->no_subparts, part_info->subpart_expr,
+ return get_part_id_hash(part_info->num_subparts, part_info->subpart_expr,
part_id, &func_value);
}
@@ -3303,7 +3567,7 @@ int get_partition_id_linear_hash_sub(partition_info *part_info,
uint32 *part_id)
{
longlong func_value;
- return get_part_id_linear_hash(part_info, part_info->no_subparts,
+ return get_part_id_linear_hash(part_info, part_info->num_subparts,
part_info->subpart_expr, part_id,
&func_value);
}
@@ -3314,7 +3578,7 @@ int get_partition_id_key_sub(partition_info *part_info,
{
longlong func_value;
*part_id= get_part_id_key(part_info->subpart_field_array,
- part_info->no_subparts, &func_value);
+ part_info->num_subparts, &func_value);
return FALSE;
}
@@ -3325,7 +3589,7 @@ int get_partition_id_linear_key_sub(partition_info *part_info,
longlong func_value;
*part_id= get_part_id_linear_key(part_info,
part_info->subpart_field_array,
- part_info->no_subparts, &func_value);
+ part_info->num_subparts, &func_value);
return FALSE;
}
@@ -3624,16 +3888,16 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
const key_range *key_spec, part_id_range *part_spec)
{
partition_info *part_info= table->part_info;
- uint no_parts= part_info->get_tot_partitions();
+ uint num_parts= part_info->get_tot_partitions();
uint i, part_id;
- uint sub_part= no_parts;
- uint32 part_part= no_parts;
+ uint sub_part= num_parts;
+ uint32 part_part= num_parts;
KEY *key_info= NULL;
bool found_part_field= FALSE;
DBUG_ENTER("get_partition_set");
part_spec->start_part= 0;
- part_spec->end_part= no_parts - 1;
+ part_spec->end_part= num_parts - 1;
if ((index < MAX_KEY) &&
key_spec->flag == (uint)HA_READ_KEY_EXACT &&
part_info->some_fields_in_PF.is_set(index))
@@ -3670,7 +3934,7 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
{
if (get_sub_part_id_from_key(table, buf, key_info, key_spec, &sub_part))
{
- part_spec->start_part= no_parts;
+ part_spec->start_part= num_parts;
DBUG_VOID_RETURN;
}
}
@@ -3684,7 +3948,7 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
allowed values. Thus it is certain that the result of this
scan will be empty.
*/
- part_spec->start_part= no_parts;
+ part_spec->start_part= num_parts;
DBUG_VOID_RETURN;
}
}
@@ -3722,7 +3986,7 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
{
if (get_sub_part_id_from_key(table, buf, key_info, key_spec, &sub_part))
{
- part_spec->start_part= no_parts;
+ part_spec->start_part= num_parts;
clear_indicator_in_key_fields(key_info);
DBUG_VOID_RETURN;
}
@@ -3731,7 +3995,7 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
{
if (get_part_id_from_key(table,buf,key_info,key_spec,&part_part))
{
- part_spec->start_part= no_parts;
+ part_spec->start_part= num_parts;
clear_indicator_in_key_fields(key_info);
DBUG_VOID_RETURN;
}
@@ -3752,29 +4016,29 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
nothing or we have discovered a range of partitions with possible holes
in it. We need a bitvector to further the work here.
*/
- if (!(part_part == no_parts && sub_part == no_parts))
+ if (!(part_part == num_parts && sub_part == num_parts))
{
/*
We can only arrive here if we are using subpartitioning.
*/
- if (part_part != no_parts)
+ if (part_part != num_parts)
{
/*
We know the top partition and need to scan all underlying
subpartitions. This is a range without holes.
*/
- DBUG_ASSERT(sub_part == no_parts);
- part_spec->start_part= part_part * part_info->no_subparts;
- part_spec->end_part= part_spec->start_part+part_info->no_subparts - 1;
+ DBUG_ASSERT(sub_part == num_parts);
+ part_spec->start_part= part_part * part_info->num_subparts;
+ part_spec->end_part= part_spec->start_part+part_info->num_subparts - 1;
}
else
{
- DBUG_ASSERT(sub_part != no_parts);
+ DBUG_ASSERT(sub_part != num_parts);
part_spec->start_part= sub_part;
part_spec->end_part=sub_part+
- (part_info->no_subparts*(part_info->no_parts-1));
- for (i= 0, part_id= sub_part; i < part_info->no_parts;
- i++, part_id+= part_info->no_subparts)
+ (part_info->num_subparts*(part_info->num_parts-1));
+ for (i= 0, part_id= sub_part; i < part_info->num_parts;
+ i++, part_id+= part_info->num_subparts)
; //Set bit part_id in bit array
}
}
@@ -3893,10 +4157,12 @@ bool mysql_unpack_partition(THD *thd,
mem_alloc_error(sizeof(partition_info));
goto end;
}
- lex.part_info->part_state= part_state;
- lex.part_info->part_state_len= part_state_len;
+ part_info= lex.part_info;
+ part_info->part_state= part_state;
+ part_info->part_state_len= part_state_len;
DBUG_PRINT("info", ("Parse: %s", part_buf));
- if (parse_sql(thd, & parser_state, NULL))
+ if (parse_sql(thd, & parser_state, NULL) ||
+ part_info->fix_parser_data(thd))
{
thd->free_items();
goto end;
@@ -3917,7 +4183,6 @@ bool mysql_unpack_partition(THD *thd,
*/
DBUG_PRINT("info", ("Successful parse"));
- part_info= lex.part_info;
DBUG_PRINT("info", ("default engine = %s, default_db_type = %s",
ha_resolve_storage_engine_name(part_info->default_engine_type),
ha_resolve_storage_engine_name(default_db_type)));
@@ -4038,9 +4303,9 @@ set_engine_all_partitions(partition_info *part_info,
partition_element *sub_elem= sub_it++;
sub_elem->engine_type= engine_type;
- } while (++j < part_info->no_subparts);
+ } while (++j < part_info->num_subparts);
}
- } while (++i < part_info->no_parts);
+ } while (++i < part_info->num_parts);
}
/*
SYNOPSIS
@@ -4185,7 +4450,7 @@ uint set_part_state(Alter_info *alter_info, partition_info *tab_part_info,
enum partition_state part_state)
{
uint part_count= 0;
- uint no_parts_found= 0;
+ uint num_parts_found= 0;
List_iterator<partition_element> part_it(tab_part_info->partitions);
do
@@ -4198,15 +4463,17 @@ uint set_part_state(Alter_info *alter_info, partition_info *tab_part_info,
/*
Mark the partition.
I.e mark the partition as a partition to be "changed" by
- analyzing/optimizing/rebuilding/checking/repairing
+ analyzing/optimizing/rebuilding/checking/repairing/...
*/
- no_parts_found++;
+ num_parts_found++;
part_elem->part_state= part_state;
DBUG_PRINT("info", ("Setting part_state to %u for partition %s",
part_state, part_elem->partition_name));
}
- } while (++part_count < tab_part_info->no_parts);
- return no_parts_found;
+ else
+ part_elem->part_state= PART_NORMAL;
+ } while (++part_count < tab_part_info->num_parts);
+ return num_parts_found;
}
@@ -4273,6 +4540,11 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
partition_info *tab_part_info= table->part_info;
partition_info *alt_part_info= thd->work_part_info;
uint flags= 0;
+ bool is_last_partition_reorged;
+ part_elem_value *tab_max_elem_val= NULL;
+ part_elem_value *alt_max_elem_val= NULL;
+ longlong tab_max_range= 0, alt_max_range= 0;
+
if (!tab_part_info)
{
my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
@@ -4282,13 +4554,13 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
{
uint new_part_no, curr_part_no;
if (tab_part_info->part_type != HASH_PARTITION ||
- tab_part_info->use_default_no_partitions)
+ tab_part_info->use_default_num_partitions)
{
my_error(ER_REORG_NO_PARAM_ERROR, MYF(0));
DBUG_RETURN(TRUE);
}
new_part_no= table->file->get_default_no_partitions(create_info);
- curr_part_no= tab_part_info->no_parts;
+ curr_part_no= tab_part_info->num_parts;
if (new_part_no == curr_part_no)
{
/*
@@ -4306,7 +4578,7 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
setting the flag for no default number of partitions
*/
alter_info->flags|= ALTER_ADD_PARTITION;
- thd->work_part_info->no_parts= new_part_no - curr_part_no;
+ thd->work_part_info->num_parts= new_part_no - curr_part_no;
}
else
{
@@ -4315,7 +4587,7 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
without setting the flag for no default number of partitions
*/
alter_info->flags|= ALTER_COALESCE_PARTITION;
- alter_info->no_parts= curr_part_no - new_part_no;
+ alter_info->num_parts= curr_part_no - new_part_no;
}
}
if (!(flags= table->file->alter_table_flags(alter_info->flags)))
@@ -4327,34 +4599,73 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
((flags & (HA_FAST_CHANGE_PARTITION | HA_PARTITION_ONE_PHASE)) != 0);
DBUG_PRINT("info", ("*fast_alter_partition: %d flags: 0x%x",
*fast_alter_partition, flags));
- if (((alter_info->flags & ALTER_ADD_PARTITION) ||
- (alter_info->flags & ALTER_REORGANIZE_PARTITION)) &&
- (thd->work_part_info->part_type != tab_part_info->part_type) &&
- (thd->work_part_info->part_type != NOT_A_PARTITION))
+ if ((alter_info->flags & ALTER_ADD_PARTITION) ||
+ (alter_info->flags & ALTER_REORGANIZE_PARTITION))
{
- if (thd->work_part_info->part_type == RANGE_PARTITION)
- {
- my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
- "RANGE", "LESS THAN");
- }
- else if (thd->work_part_info->part_type == LIST_PARTITION)
+ if (thd->work_part_info->part_type != tab_part_info->part_type)
{
- DBUG_ASSERT(thd->work_part_info->part_type == LIST_PARTITION);
- my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
- "LIST", "IN");
+ if (thd->work_part_info->part_type == NOT_A_PARTITION)
+ {
+ if (tab_part_info->part_type == RANGE_PARTITION)
+ {
+ my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), "RANGE");
+ DBUG_RETURN(TRUE);
+ }
+ else if (tab_part_info->part_type == LIST_PARTITION)
+ {
+ my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), "LIST");
+ DBUG_RETURN(TRUE);
+ }
+ /*
+ Hash partitions can be altered without parser finds out about
+ that it is HASH partitioned. So no error here.
+ */
+ }
+ else
+ {
+ if (thd->work_part_info->part_type == RANGE_PARTITION)
+ {
+ my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
+ "RANGE", "LESS THAN");
+ }
+ else if (thd->work_part_info->part_type == LIST_PARTITION)
+ {
+ DBUG_ASSERT(thd->work_part_info->part_type == LIST_PARTITION);
+ my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
+ "LIST", "IN");
+ }
+ else if (tab_part_info->part_type == RANGE_PARTITION)
+ {
+ my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
+ "RANGE", "LESS THAN");
+ }
+ else
+ {
+ DBUG_ASSERT(tab_part_info->part_type == LIST_PARTITION);
+ my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
+ "LIST", "IN");
+ }
+ DBUG_RETURN(TRUE);
+ }
}
- else if (tab_part_info->part_type == RANGE_PARTITION)
+ if ((tab_part_info->column_list &&
+ alt_part_info->num_columns != tab_part_info->num_columns) ||
+ (!tab_part_info->column_list &&
+ (tab_part_info->part_type == RANGE_PARTITION ||
+ tab_part_info->part_type == LIST_PARTITION) &&
+ alt_part_info->num_columns != 1U) ||
+ (!tab_part_info->column_list &&
+ tab_part_info->part_type == HASH_PARTITION &&
+ alt_part_info->num_columns != 0))
{
- my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
- "RANGE", "LESS THAN");
+ my_error(ER_PARTITION_COLUMN_LIST_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
}
- else
+ alt_part_info->column_list= tab_part_info->column_list;
+ if (alt_part_info->fix_parser_data(thd))
{
- DBUG_ASSERT(tab_part_info->part_type == LIST_PARTITION);
- my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
- "LIST", "IN");
+ DBUG_RETURN(TRUE);
}
- DBUG_RETURN(TRUE);
}
if (alter_info->flags & ALTER_ADD_PARTITION)
{
@@ -4364,9 +4675,9 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
partitioning scheme as currently set-up.
Partitions are always added at the end in ADD PARTITION.
*/
- uint no_new_partitions= alt_part_info->no_parts;
- uint no_orig_partitions= tab_part_info->no_parts;
- uint check_total_partitions= no_new_partitions + no_orig_partitions;
+ uint num_new_partitions= alt_part_info->num_parts;
+ uint num_orig_partitions= tab_part_info->num_parts;
+ uint check_total_partitions= num_new_partitions + num_orig_partitions;
uint new_total_partitions= check_total_partitions;
/*
We allow quite a lot of values to be supplied by defaults, however we
@@ -4383,22 +4694,22 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
my_error(ER_PARTITION_MAXVALUE_ERROR, MYF(0));
DBUG_RETURN(TRUE);
}
- if (no_new_partitions == 0)
+ if (num_new_partitions == 0)
{
my_error(ER_ADD_PARTITION_NO_NEW_PARTITION, MYF(0));
DBUG_RETURN(TRUE);
}
if (tab_part_info->is_sub_partitioned())
{
- if (alt_part_info->no_subparts == 0)
- alt_part_info->no_subparts= tab_part_info->no_subparts;
- else if (alt_part_info->no_subparts != tab_part_info->no_subparts)
+ if (alt_part_info->num_subparts == 0)
+ alt_part_info->num_subparts= tab_part_info->num_subparts;
+ else if (alt_part_info->num_subparts != tab_part_info->num_subparts)
{
my_error(ER_ADD_PARTITION_SUBPART_ERROR, MYF(0));
DBUG_RETURN(TRUE);
}
check_total_partitions= new_total_partitions*
- alt_part_info->no_subparts;
+ alt_part_info->num_subparts;
}
if (check_total_partitions > MAX_PARTITIONS)
{
@@ -4408,8 +4719,8 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
alt_part_info->part_type= tab_part_info->part_type;
alt_part_info->subpart_type= tab_part_info->subpart_type;
if (alt_part_info->set_up_defaults_for_partitioning(table->file,
- ULL(0),
- tab_part_info->no_parts))
+ ULL(0),
+ tab_part_info->num_parts))
{
DBUG_RETURN(TRUE);
}
@@ -4484,7 +4795,7 @@ that are reorganised.
uint lower_2n= upper_2n >> 1;
bool all_parts= TRUE;
if (tab_part_info->linear_hash_ind &&
- no_new_partitions < upper_2n)
+ num_new_partitions < upper_2n)
{
/*
An analysis of which parts needs reorganisation shows that it is
@@ -4493,7 +4804,7 @@ that are reorganised.
onwards it starts again from partition 0 and goes on until
it reaches p(upper_2n - 1). If the last new partition reaches
beyond upper_2n - 1 then the first interval will end with
- p(lower_2n - 1) and start with p(no_orig_partitions - lower_2n).
+ p(lower_2n - 1) and start with p(num_orig_partitions - lower_2n).
If lower_2n partitions are added then p0 to p(lower_2n - 1) will
be reorganised which means that the two interval becomes one
interval at this point. Thus only when adding less than
@@ -4521,7 +4832,7 @@ that are reorganised.
to TRUE. In this case we don't get into this if-part at all.
*/
all_parts= FALSE;
- if (no_new_partitions >= lower_2n)
+ if (num_new_partitions >= lower_2n)
{
/*
In this case there is only one interval since the two intervals
@@ -4537,8 +4848,8 @@ that are reorganised.
Also in this case there is only one interval since we are not
going over a 2**n boundary
*/
- start_part= no_orig_partitions - lower_2n;
- end_part= start_part + (no_new_partitions - 1);
+ start_part= num_orig_partitions - lower_2n;
+ end_part= start_part + (num_new_partitions - 1);
}
else
{
@@ -4547,7 +4858,7 @@ that are reorganised.
new parts that would ensure that the intervals become
overlapping.
*/
- start_part= no_orig_partitions - lower_2n;
+ start_part= num_orig_partitions - lower_2n;
end_part= upper_2n - 1;
start_sec_part= 0;
end_sec_part= new_total_partitions - (upper_2n + 1);
@@ -4564,7 +4875,7 @@ that are reorganised.
{
p_elem->part_state= PART_CHANGED;
}
- } while (++part_no < no_orig_partitions);
+ } while (++part_no < num_orig_partitions);
}
/*
Need to concatenate the lists here to make it possible to check the
@@ -4587,8 +4898,8 @@ that are reorganised.
mem_alloc_error(1);
DBUG_RETURN(TRUE);
}
- } while (++part_count < no_new_partitions);
- tab_part_info->no_parts+= no_new_partitions;
+ } while (++part_count < num_new_partitions);
+ tab_part_info->num_parts+= num_new_partitions;
}
/*
If we specify partitions explicitly we don't use defaults anymore.
@@ -4603,7 +4914,7 @@ that are reorganised.
DBUG_PRINT("info", ("part_info: 0x%lx", (long) tab_part_info));
tab_part_info->use_default_partitions= FALSE;
}
- tab_part_info->use_default_no_partitions= FALSE;
+ tab_part_info->use_default_num_partitions= FALSE;
tab_part_info->is_auto_partitioned= FALSE;
}
}
@@ -4617,8 +4928,8 @@ that are reorganised.
command to drop the partition failed in the middle.
*/
uint part_count= 0;
- uint no_parts_dropped= alter_info->partition_names.elements;
- uint no_parts_found= 0;
+ uint num_parts_dropped= alter_info->partition_names.elements;
+ uint num_parts_found= 0;
List_iterator<partition_element> part_it(tab_part_info->partitions);
tab_part_info->is_auto_partitioned= FALSE;
@@ -4628,7 +4939,7 @@ that are reorganised.
my_error(ER_ONLY_ON_RANGE_LIST_PARTITION, MYF(0), "DROP");
DBUG_RETURN(TRUE);
}
- if (no_parts_dropped >= tab_part_info->no_parts)
+ if (num_parts_dropped >= tab_part_info->num_parts)
{
my_error(ER_DROP_LAST_PARTITION, MYF(0));
DBUG_RETURN(TRUE);
@@ -4642,11 +4953,11 @@ that are reorganised.
/*
Set state to indicate that the partition is to be dropped.
*/
- no_parts_found++;
+ num_parts_found++;
part_elem->part_state= PART_TO_BE_DROPPED;
}
- } while (++part_count < tab_part_info->no_parts);
- if (no_parts_found != no_parts_dropped)
+ } while (++part_count < tab_part_info->num_parts);
+ if (num_parts_found != num_parts_dropped)
{
my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "DROP");
DBUG_RETURN(TRUE);
@@ -4656,14 +4967,14 @@ that are reorganised.
my_error(ER_ROW_IS_REFERENCED, MYF(0));
DBUG_RETURN(TRUE);
}
- tab_part_info->no_parts-= no_parts_dropped;
+ tab_part_info->num_parts-= num_parts_dropped;
}
else if (alter_info->flags & ALTER_REBUILD_PARTITION)
{
- uint no_parts_found;
- uint no_parts_opt= alter_info->partition_names.elements;
- no_parts_found= set_part_state(alter_info, tab_part_info, PART_CHANGED);
- if (no_parts_found != no_parts_opt &&
+ uint num_parts_found;
+ uint num_parts_opt= alter_info->partition_names.elements;
+ num_parts_found= set_part_state(alter_info, tab_part_info, PART_CHANGED);
+ if (num_parts_found != num_parts_opt &&
(!(alter_info->flags & ALTER_ALL_PARTITION)))
{
my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "REBUILD");
@@ -4677,20 +4988,20 @@ that are reorganised.
}
else if (alter_info->flags & ALTER_COALESCE_PARTITION)
{
- uint no_parts_coalesced= alter_info->no_parts;
- uint no_parts_remain= tab_part_info->no_parts - no_parts_coalesced;
+ uint num_parts_coalesced= alter_info->num_parts;
+ uint num_parts_remain= tab_part_info->num_parts - num_parts_coalesced;
List_iterator<partition_element> part_it(tab_part_info->partitions);
if (tab_part_info->part_type != HASH_PARTITION)
{
my_error(ER_COALESCE_ONLY_ON_HASH_PARTITION, MYF(0));
DBUG_RETURN(TRUE);
}
- if (no_parts_coalesced == 0)
+ if (num_parts_coalesced == 0)
{
my_error(ER_COALESCE_PARTITION_NO_PARTITION, MYF(0));
DBUG_RETURN(TRUE);
}
- if (no_parts_coalesced >= tab_part_info->no_parts)
+ if (num_parts_coalesced >= tab_part_info->num_parts)
{
my_error(ER_DROP_LAST_PARTITION, MYF(0));
DBUG_RETURN(TRUE);
@@ -4738,21 +5049,21 @@ state of p1.
uint upper_2n= tab_part_info->linear_hash_mask + 1;
uint lower_2n= upper_2n >> 1;
all_parts= FALSE;
- if (no_parts_coalesced >= lower_2n)
+ if (num_parts_coalesced >= lower_2n)
{
all_parts= TRUE;
}
- else if (no_parts_remain >= lower_2n)
+ else if (num_parts_remain >= lower_2n)
{
- end_part= tab_part_info->no_parts - (lower_2n + 1);
- start_part= no_parts_remain - lower_2n;
+ end_part= tab_part_info->num_parts - (lower_2n + 1);
+ start_part= num_parts_remain - lower_2n;
}
else
{
start_part= 0;
- end_part= tab_part_info->no_parts - (lower_2n + 1);
+ end_part= tab_part_info->num_parts - (lower_2n + 1);
end_sec_part= (lower_2n >> 1) - 1;
- start_sec_part= end_sec_part - (lower_2n - (no_parts_remain + 1));
+ start_sec_part= end_sec_part - (lower_2n - (num_parts_remain + 1));
}
}
do
@@ -4763,19 +5074,19 @@ state of p1.
(part_count >= start_part && part_count <= end_part) ||
(part_count >= start_sec_part && part_count <= end_sec_part)))
p_elem->part_state= PART_CHANGED;
- if (++part_count > no_parts_remain)
+ if (++part_count > num_parts_remain)
{
if (*fast_alter_partition)
p_elem->part_state= PART_REORGED_DROPPED;
else
part_it.remove();
}
- } while (part_count < tab_part_info->no_parts);
- tab_part_info->no_parts= no_parts_remain;
+ } while (part_count < tab_part_info->num_parts);
+ tab_part_info->num_parts= num_parts_remain;
}
if (!(alter_info->flags & ALTER_TABLE_REORG))
{
- tab_part_info->use_default_no_partitions= FALSE;
+ tab_part_info->use_default_num_partitions= FALSE;
tab_part_info->is_auto_partitioned= FALSE;
}
}
@@ -4792,33 +5103,32 @@ state of p1.
range as those changed from.
This command can be used on RANGE and LIST partitions.
*/
- uint no_parts_reorged= alter_info->partition_names.elements;
- uint no_parts_new= thd->work_part_info->partitions.elements;
- partition_info *alt_part_info= thd->work_part_info;
+ uint num_parts_reorged= alter_info->partition_names.elements;
+ uint num_parts_new= thd->work_part_info->partitions.elements;
uint check_total_partitions;
tab_part_info->is_auto_partitioned= FALSE;
- if (no_parts_reorged > tab_part_info->no_parts)
+ if (num_parts_reorged > tab_part_info->num_parts)
{
my_error(ER_REORG_PARTITION_NOT_EXIST, MYF(0));
DBUG_RETURN(TRUE);
}
if (!(tab_part_info->part_type == RANGE_PARTITION ||
tab_part_info->part_type == LIST_PARTITION) &&
- (no_parts_new != no_parts_reorged))
+ (num_parts_new != num_parts_reorged))
{
my_error(ER_REORG_HASH_ONLY_ON_SAME_NO, MYF(0));
DBUG_RETURN(TRUE);
}
if (tab_part_info->is_sub_partitioned() &&
- alt_part_info->no_subparts &&
- alt_part_info->no_subparts != tab_part_info->no_subparts)
+ alt_part_info->num_subparts &&
+ alt_part_info->num_subparts != tab_part_info->num_subparts)
{
my_error(ER_PARTITION_WRONG_NO_SUBPART_ERROR, MYF(0));
DBUG_RETURN(TRUE);
}
- check_total_partitions= tab_part_info->no_parts + no_parts_new;
- check_total_partitions-= no_parts_reorged;
+ check_total_partitions= tab_part_info->num_parts + num_parts_new;
+ check_total_partitions-= num_parts_reorged;
if (check_total_partitions > MAX_PARTITIONS)
{
my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
@@ -4826,7 +5136,7 @@ state of p1.
}
alt_part_info->part_type= tab_part_info->part_type;
alt_part_info->subpart_type= tab_part_info->subpart_type;
- alt_part_info->no_subparts= tab_part_info->no_subparts;
+ alt_part_info->num_subparts= tab_part_info->num_subparts;
DBUG_ASSERT(!alt_part_info->use_default_partitions);
if (alt_part_info->set_up_defaults_for_partitioning(table->file,
ULL(0),
@@ -4874,9 +5184,7 @@ the generated partition syntax in a correct manner.
uint part_count= 0;
bool found_first= FALSE;
bool found_last= FALSE;
- bool is_last_partition_reorged;
uint drop_count= 0;
- longlong tab_max_range= 0, alt_max_range= 0;
do
{
partition_element *part_elem= tab_it++;
@@ -4886,7 +5194,13 @@ the generated partition syntax in a correct manner.
{
is_last_partition_reorged= TRUE;
drop_count++;
- tab_max_range= part_elem->range_value;
+ if (tab_part_info->column_list)
+ {
+ List_iterator<part_elem_value> p(part_elem->list_val_list);
+ tab_max_elem_val= p++;
+ }
+ else
+ tab_max_range= part_elem->range_value;
if (*fast_alter_partition &&
tab_part_info->temp_partitions.push_back(part_elem))
{
@@ -4898,20 +5212,28 @@ the generated partition syntax in a correct manner.
if (!found_first)
{
uint alt_part_count= 0;
- found_first= TRUE;
+ partition_element *alt_part_elem;
List_iterator<partition_element>
alt_it(alt_part_info->partitions);
+ found_first= TRUE;
do
{
- partition_element *alt_part_elem= alt_it++;
- alt_max_range= alt_part_elem->range_value;
+ alt_part_elem= alt_it++;
+ if (tab_part_info->column_list)
+ {
+ List_iterator<part_elem_value> p(alt_part_elem->list_val_list);
+ alt_max_elem_val= p++;
+ }
+ else
+ alt_max_range= alt_part_elem->range_value;
+
if (*fast_alter_partition)
alt_part_elem->part_state= PART_TO_BE_ADDED;
if (alt_part_count == 0)
tab_it.replace(alt_part_elem);
else
tab_it.after(alt_part_elem);
- } while (++alt_part_count < no_parts_new);
+ } while (++alt_part_count < num_parts_new);
}
else if (found_last)
{
@@ -4926,32 +5248,13 @@ the generated partition syntax in a correct manner.
if (found_first)
found_last= TRUE;
}
- } while (++part_count < tab_part_info->no_parts);
- if (drop_count != no_parts_reorged)
+ } while (++part_count < tab_part_info->num_parts);
+ if (drop_count != num_parts_reorged)
{
my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "REORGANIZE");
DBUG_RETURN(TRUE);
}
- if (tab_part_info->part_type == RANGE_PARTITION &&
- ((is_last_partition_reorged &&
- alt_max_range < tab_max_range) ||
- (!is_last_partition_reorged &&
- alt_max_range != tab_max_range)))
- {
- /*
- For range partitioning the total resulting range before and
- after the change must be the same except in one case. This is
- when the last partition is reorganised, in this case it is
- acceptable to increase the total range.
- The reason is that it is not allowed to have "holes" in the
- middle of the ranges and thus we should not allow to reorganise
- to create "holes". Also we should not allow using REORGANIZE
- to drop data.
- */
- my_error(ER_REORG_OUTSIDE_RANGE, MYF(0));
- DBUG_RETURN(TRUE);
- }
- tab_part_info->no_parts= check_total_partitions;
+ tab_part_info->num_parts= check_total_partitions;
}
}
else
@@ -4967,13 +5270,45 @@ the generated partition syntax in a correct manner.
!alt_part_info->use_default_subpartitions)
{
tab_part_info->use_default_subpartitions= FALSE;
- tab_part_info->use_default_no_subpartitions= FALSE;
+ tab_part_info->use_default_num_subpartitions= FALSE;
}
if (tab_part_info->check_partition_info(thd, (handlerton**)NULL,
- table->file, ULL(0), FALSE))
+ table->file, ULL(0), TRUE))
{
DBUG_RETURN(TRUE);
}
+ /*
+ The check below needs to be performed after check_partition_info
+ since this function "fixes" the item trees of the new partitions
+ to reorganize into
+ */
+ if (alter_info->flags == ALTER_REORGANIZE_PARTITION &&
+ tab_part_info->part_type == RANGE_PARTITION &&
+ ((is_last_partition_reorged &&
+ (tab_part_info->column_list ?
+ (tab_part_info->compare_column_values(
+ alt_max_elem_val->col_val_array,
+ tab_max_elem_val->col_val_array) < 0) :
+ alt_max_range < tab_max_range)) ||
+ (!is_last_partition_reorged &&
+ (tab_part_info->column_list ?
+ (tab_part_info->compare_column_values(
+ alt_max_elem_val->col_val_array,
+ tab_max_elem_val->col_val_array) != 0) :
+ alt_max_range != tab_max_range))))
+ {
+ /*
+ For range partitioning the total resulting range before and
+ after the change must be the same except in one case. This is
+ when the last partition is reorganised, in this case it is
+ acceptable to increase the total range.
+ The reason is that it is not allowed to have "holes" in the
+ middle of the ranges and thus we should not allow to reorganise
+ to create "holes".
+ */
+ my_error(ER_REORG_OUTSIDE_RANGE, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
}
}
else
@@ -5087,6 +5422,10 @@ the generated partition syntax in a correct manner.
{
DBUG_PRINT("info", ("partition changed"));
*partition_changed= TRUE;
+ if (thd->work_part_info->fix_parser_data(thd))
+ {
+ DBUG_RETURN(TRUE);
+ }
}
/*
Set up partition default_engine_type either from the create_info
@@ -5247,8 +5586,8 @@ static bool mysql_drop_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
part_it.remove();
remove_count++;
}
- } while (++i < part_info->no_parts);
- part_info->no_parts-= remove_count;
+ } while (++i < part_info->num_parts);
+ part_info->num_parts-= remove_count;
DBUG_RETURN(FALSE);
}
@@ -5370,7 +5709,7 @@ static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
char normal_path[FN_REFLEN];
List_iterator<partition_element> part_it(part_info->partitions);
uint temp_partitions= part_info->temp_partitions.elements;
- uint no_elements= part_info->partitions.elements;
+ uint num_elements= part_info->partitions.elements;
uint i= 0;
DBUG_ENTER("write_log_changed_partitions");
@@ -5383,7 +5722,7 @@ static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
if (part_info->is_sub_partitioned())
{
List_iterator<partition_element> sub_it(part_elem->subpartitions);
- uint no_subparts= part_info->no_subparts;
+ uint num_subparts= part_info->num_subparts;
uint j= 0;
do
{
@@ -5412,7 +5751,7 @@ static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
*next_entry= log_entry->entry_pos;
sub_elem->log_entry= log_entry;
insert_part_info_log_entry_list(part_info, log_entry);
- } while (++j < no_subparts);
+ } while (++j < num_subparts);
}
else
{
@@ -5440,7 +5779,7 @@ static bool write_log_changed_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
insert_part_info_log_entry_list(part_info, log_entry);
}
}
- } while (++i < no_elements);
+ } while (++i < num_elements);
DBUG_RETURN(FALSE);
}
@@ -5466,14 +5805,14 @@ static bool write_log_dropped_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
char tmp_path[FN_LEN];
List_iterator<partition_element> part_it(part_info->partitions);
List_iterator<partition_element> temp_it(part_info->temp_partitions);
- uint no_temp_partitions= part_info->temp_partitions.elements;
- uint no_elements= part_info->partitions.elements;
+ uint num_temp_partitions= part_info->temp_partitions.elements;
+ uint num_elements= part_info->partitions.elements;
DBUG_ENTER("write_log_dropped_partitions");
ddl_log_entry.action_type= DDL_LOG_DELETE_ACTION;
if (temp_list)
- no_elements= no_temp_partitions;
- while (no_elements--)
+ num_elements= num_temp_partitions;
+ while (num_elements--)
{
partition_element *part_elem;
if (temp_list)
@@ -5487,14 +5826,14 @@ static bool write_log_dropped_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
uint name_variant;
if (part_elem->part_state == PART_CHANGED ||
(part_elem->part_state == PART_TO_BE_ADDED &&
- no_temp_partitions))
+ num_temp_partitions))
name_variant= TEMP_PART_NAME;
else
name_variant= NORMAL_PART_NAME;
if (part_info->is_sub_partitioned())
{
List_iterator<partition_element> sub_it(part_elem->subpartitions);
- uint no_subparts= part_info->no_subparts;
+ uint num_subparts= part_info->num_subparts;
uint j= 0;
do
{
@@ -5514,7 +5853,7 @@ static bool write_log_dropped_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
*next_entry= log_entry->entry_pos;
sub_elem->log_entry= log_entry;
insert_part_info_log_entry_list(part_info, log_entry);
- } while (++j < no_subparts);
+ } while (++j < num_subparts);
}
else
{
@@ -6612,16 +6951,19 @@ void make_used_partitions_str(partition_info *part_info, String *parts_str)
IMPLEMENTATION
There are two available interval analyzer functions:
(1) get_part_iter_for_interval_via_mapping
- (2) get_part_iter_for_interval_via_walking
+ (2) get_part_iter_for_interval_cols_via_map
+ (3) get_part_iter_for_interval_via_walking
They both have limited applicability:
(1) is applicable for "PARTITION BY <RANGE|LIST>(func(t.field))", where
func is a monotonic function.
-
- (2) is applicable for
+
+ (2) is applicable for "PARTITION BY <RANGE|LIST> COLUMNS (field_list)
+
+ (3) is applicable for
"[SUB]PARTITION BY <any-partitioning-type>(any_func(t.integer_field))"
- If both are applicable, (1) is preferred over (2).
+ If both (1) and (3) are applicable, (1) is preferred over (3).
This function sets part_info::get_part_iter_for_interval according to
this criteria, and also sets some auxilary fields that the function
@@ -6641,10 +6983,19 @@ static void set_up_range_analysis_info(partition_info *part_info)
switch (part_info->part_type) {
case RANGE_PARTITION:
case LIST_PARTITION:
- if (part_info->part_expr->get_monotonicity_info() != NON_MONOTONIC)
+ if (!part_info->column_list)
+ {
+ if (part_info->part_expr->get_monotonicity_info() != NON_MONOTONIC)
+ {
+ part_info->get_part_iter_for_interval=
+ get_part_iter_for_interval_via_mapping;
+ goto setup_subparts;
+ }
+ }
+ else
{
part_info->get_part_iter_for_interval=
- get_part_iter_for_interval_via_mapping;
+ get_part_iter_for_interval_cols_via_map;
goto setup_subparts;
}
default:
@@ -6655,7 +7006,7 @@ static void set_up_range_analysis_info(partition_info *part_info)
Check if get_part_iter_for_interval_via_walking() can be used for
partitioning
*/
- if (part_info->no_part_fields == 1)
+ if (part_info->num_part_fields == 1)
{
Field *field= part_info->part_field_array[0];
switch (field->type()) {
@@ -6677,7 +7028,7 @@ setup_subparts:
Check if get_part_iter_for_interval_via_walking() can be used for
subpartitioning
*/
- if (part_info->no_subpart_fields == 1)
+ if (part_info->num_subpart_fields == 1)
{
Field *field= part_info->subpart_field_array[0];
switch (field->type()) {
@@ -6695,9 +7046,118 @@ setup_subparts:
}
+/*
+ This function takes a memory of packed fields in opt-range format
+ and stores it in record format. To avoid having to worry about how
+ the length of fields are calculated in opt-range format we send
+ an array of lengths used for each field in store_length_array.
+
+ SYNOPSIS
+ store_tuple_to_record()
+ pfield Field array
+ store_length_array Array of field lengths
+ value Memory where fields are stored
+ value_end End of memory
+
+ RETURN VALUE
+ nparts Number of fields assigned
+*/
+uint32 store_tuple_to_record(Field **pfield,
+ uint32 *store_length_array,
+ uchar *value,
+ uchar *value_end)
+{
+ /* This function is inspired by store_key_image_rec. */
+ uint32 nparts= 0;
+ uchar *loc_value;
+ while (value < value_end)
+ {
+ loc_value= value;
+ if ((*pfield)->real_maybe_null())
+ {
+ if (*loc_value)
+ (*pfield)->set_null();
+ else
+ (*pfield)->set_notnull();
+ loc_value++;
+ }
+ uint len= (*pfield)->pack_length();
+ (*pfield)->set_key_image(loc_value, len);
+ value+= *store_length_array;
+ store_length_array++;
+ nparts++;
+ pfield++;
+ }
+ return nparts;
+}
+
+/*
+ RANGE(columns) partitioning: compare value bound and probe tuple.
+
+ The value bound always is a full tuple (but may include the MAXVALUE
+ special value).
+
+ The probe tuple may be a prefix of partitioning tuple. The tail_is_min
+ parameter specifies whether the suffix components should be assumed to
+ hold MAXVALUE
+*/
+
+static int cmp_rec_and_tuple(part_column_list_val *val, uint32 nvals_in_rec)
+{
+ partition_info *part_info= val->part_info;
+ Field **field= part_info->part_field_array;
+ Field **fields_end= field + nvals_in_rec;
+ int res;
+
+ for (; field != fields_end; field++, val++)
+ {
+ if (val->max_value)
+ return -1;
+ if ((*field)->is_null())
+ {
+ if (val->null_value)
+ continue;
+ return -1;
+ }
+ if (val->null_value)
+ return +1;
+ res= (*field)->cmp((const uchar*)val->column_value);
+ if (res)
+ return res;
+ }
+ return 0;
+}
+
+
+static int cmp_rec_and_tuple_prune(part_column_list_val *val,
+ uint32 n_vals_in_rec,
+ bool tail_is_min)
+{
+ int cmp;
+ Field **field;
+ partition_info *part_info;
+ if ((cmp= cmp_rec_and_tuple(val, n_vals_in_rec)))
+ return cmp;
+ part_info= val->part_info;
+ field= part_info->part_field_array + n_vals_in_rec;
+ for (; *field; field++, val++)
+ {
+ if (tail_is_min)
+ return -1;
+ if (!tail_is_min && !val->max_value)
+ return +1;
+ }
+ return 0;
+}
+
+
typedef uint32 (*get_endpoint_func)(partition_info*, bool left_endpoint,
bool include_endpoint);
+typedef uint32 (*get_col_endpoint_func)(partition_info*, bool left_endpoint,
+ bool include_endpoint,
+ uint32 num_parts);
+
/*
Partitioning Interval Analysis: Initialize the iterator for "mapping" case
@@ -6733,18 +7193,145 @@ typedef uint32 (*get_endpoint_func)(partition_info*, bool left_endpoint,
-1 - All partitions would match (iterator not initialized)
*/
+uint32 get_partition_id_cols_range_for_endpoint(partition_info *part_info,
+ bool left_endpoint,
+ bool include_endpoint,
+ uint32 nparts)
+{
+ uint max_partition= part_info->num_parts - 1;
+ uint min_part_id= 0, max_part_id= max_partition, loc_part_id;
+ part_column_list_val *range_col_array= part_info->range_col_array;
+ uint num_columns= part_info->part_field_list.elements;
+ bool tailf= !(left_endpoint ^ include_endpoint);
+ DBUG_ENTER("get_partition_id_cols_range_for_endpoint");
+
+ /* Get the partitioning function value for the endpoint */
+ while (max_part_id > min_part_id)
+ {
+ loc_part_id= (max_part_id + min_part_id + 1) >> 1;
+ if (cmp_rec_and_tuple_prune(range_col_array + loc_part_id*num_columns,
+ nparts, tailf) >= 0)
+ min_part_id= loc_part_id + 1;
+ else
+ max_part_id= loc_part_id - 1;
+ }
+ loc_part_id= max_part_id;
+ if (loc_part_id < max_partition &&
+ cmp_rec_and_tuple_prune(range_col_array + (loc_part_id+1)*num_columns,
+ nparts, tailf) >= 0
+ )
+ {
+ loc_part_id++;
+ }
+ if (left_endpoint)
+ {
+ if (cmp_rec_and_tuple_prune(range_col_array + loc_part_id*num_columns,
+ nparts, tailf) >= 0)
+ loc_part_id++;
+ }
+ else
+ {
+ if (loc_part_id < max_partition)
+ {
+ int res= cmp_rec_and_tuple_prune(range_col_array +
+ loc_part_id * num_columns,
+ nparts, tailf);
+ if (!res)
+ loc_part_id += test(include_endpoint);
+ else if (res > 0)
+ loc_part_id++;
+ }
+ loc_part_id++;
+ }
+ DBUG_RETURN(loc_part_id);
+}
+
+
+int get_part_iter_for_interval_cols_via_map(partition_info *part_info,
+ bool is_subpart,
+ uint32 *store_length_array,
+ uchar *min_value, uchar *max_value,
+ uint min_len, uint max_len,
+ uint flags,
+ PARTITION_ITERATOR *part_iter)
+{
+ uint32 nparts;
+ get_col_endpoint_func get_col_endpoint;
+ DBUG_ENTER("get_part_iter_for_interval_cols_via_map");
+
+ if (part_info->part_type == RANGE_PARTITION)
+ {
+ get_col_endpoint= get_partition_id_cols_range_for_endpoint;
+ part_iter->get_next= get_next_partition_id_range;
+ }
+ else if (part_info->part_type == LIST_PARTITION)
+ {
+ get_col_endpoint= get_partition_id_cols_list_for_endpoint;
+ part_iter->get_next= get_next_partition_id_list;
+ part_iter->part_info= part_info;
+ DBUG_ASSERT(part_info->num_list_values);
+ }
+ else
+ assert(0);
+
+ if (flags & NO_MIN_RANGE)
+ part_iter->part_nums.start= part_iter->part_nums.cur= 0;
+ else
+ {
+ // Copy from min_value to record
+ nparts= store_tuple_to_record(part_info->part_field_array,
+ store_length_array,
+ min_value,
+ min_value + min_len);
+ part_iter->part_nums.start= part_iter->part_nums.cur=
+ get_col_endpoint(part_info, TRUE, !(flags & NEAR_MIN),
+ nparts);
+ }
+ if (flags & NO_MAX_RANGE)
+ {
+ if (part_info->part_type == RANGE_PARTITION)
+ part_iter->part_nums.end= part_info->num_parts;
+ else /* LIST_PARTITION */
+ {
+ DBUG_ASSERT(part_info->part_type == LIST_PARTITION);
+ part_iter->part_nums.end= part_info->num_list_values;
+ }
+ }
+ else
+ {
+ // Copy from max_value to record
+ nparts= store_tuple_to_record(part_info->part_field_array,
+ store_length_array,
+ max_value,
+ max_value + max_len);
+ part_iter->part_nums.end= get_col_endpoint(part_info, FALSE,
+ !(flags & NEAR_MAX),
+ nparts);
+ }
+ if (part_iter->part_nums.start == part_iter->part_nums.end)
+ DBUG_RETURN(0);
+ DBUG_RETURN(1);
+}
+
+
int get_part_iter_for_interval_via_mapping(partition_info *part_info,
bool is_subpart,
+ uint32 *store_length_array, /* ignored */
uchar *min_value, uchar *max_value,
+ uint min_len, uint max_len, /* ignored */
uint flags,
PARTITION_ITERATOR *part_iter)
{
- DBUG_ASSERT(!is_subpart);
Field *field= part_info->part_field_array[0];
uint32 max_endpoint_val;
get_endpoint_func get_endpoint;
bool can_match_multiple_values; /* is not '=' */
uint field_len= field->pack_length_in_rec();
+ DBUG_ENTER("get_part_iter_for_interval_via_mapping");
+ DBUG_ASSERT(!is_subpart);
+ (void) store_length_array;
+ (void)min_len;
+ (void)max_len;
part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE;
if (part_info->part_type == RANGE_PARTITION)
@@ -6753,7 +7340,7 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
get_endpoint= get_partition_id_range_for_endpoint_charset;
else
get_endpoint= get_partition_id_range_for_endpoint;
- max_endpoint_val= part_info->no_parts;
+ max_endpoint_val= part_info->num_parts;
part_iter->get_next= get_next_partition_id_range;
}
else if (part_info->part_type == LIST_PARTITION)
@@ -6763,7 +7350,7 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
get_endpoint= get_list_array_idx_for_endpoint_charset;
else
get_endpoint= get_list_array_idx_for_endpoint;
- max_endpoint_val= part_info->no_list_values;
+ max_endpoint_val= part_info->num_list_values;
part_iter->get_next= get_next_partition_id_list;
part_iter->part_info= part_info;
if (max_endpoint_val == 0)
@@ -6776,7 +7363,7 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
part_iter->part_nums.start= part_iter->part_nums.end= 0;
part_iter->part_nums.cur= 0;
part_iter->ret_null_part= part_iter->ret_null_part_orig= TRUE;
- return -1;
+ DBUG_RETURN(-1);
}
}
else
@@ -6808,11 +7395,11 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
{
part_iter->ret_null_part= part_iter->ret_null_part_orig= TRUE;
part_iter->part_nums.start= part_iter->part_nums.cur= 0;
- if (*max_value && !(flags & NO_MAX_RANGE))
+ if (!(flags & NO_MAX_RANGE) && *max_value)
{
/* The right bound is X <= NULL, i.e. it is a "X IS NULL" interval */
part_iter->part_nums.end= 0;
- return 1;
+ DBUG_RETURN(1);
}
}
else
@@ -6836,11 +7423,11 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
part_iter->part_nums.cur= part_iter->part_nums.start= 0;
part_iter->part_nums.end= 0;
part_iter->ret_null_part= part_iter->ret_null_part_orig= TRUE;
- return 1;
+ DBUG_RETURN(1);
}
part_iter->part_nums.cur= part_iter->part_nums.start;
if (part_iter->part_nums.start == max_endpoint_val)
- return 0; /* No partitions */
+ DBUG_RETURN(0); /* No partitions */
}
}
@@ -6854,14 +7441,14 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
part_iter->part_nums.end= get_endpoint(part_info, 0, include_endp);
if (part_iter->part_nums.start >= part_iter->part_nums.end &&
!part_iter->ret_null_part)
- return 0; /* No partitions */
+ DBUG_RETURN(0); /* No partitions */
}
- return 1; /* Ok, iterator initialized */
+ DBUG_RETURN(1); /* Ok, iterator initialized */
}
/* See get_part_iter_for_interval_via_walking for definition of what this is */
-#define MAX_RANGE_TO_WALK 10
+#define MAX_RANGE_TO_WALK 32
/*
@@ -6897,16 +7484,6 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
Intervals with +inf/-inf, and [NULL, c1] interval can be processed but
that is more tricky and I don't have time to do it right now.
- Additionally we have these requirements:
- * number of values in the interval must be less then number of
- [sub]partitions, and
- * Number of values in the interval must be less then MAX_RANGE_TO_WALK.
-
- The rationale behind these requirements is that if they are not met
- we're likely to hit most of the partitions and traversing the interval
- will only add overhead. So it's better return "all partitions used" in
- that case.
-
RETURN
0 - No matching partitions, iterator not initialized
1 - Some partitions would match, iterator intialized for traversing them
@@ -6914,25 +7491,32 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
*/
int get_part_iter_for_interval_via_walking(partition_info *part_info,
- bool is_subpart,
- uchar *min_value, uchar *max_value,
- uint flags,
- PARTITION_ITERATOR *part_iter)
+ bool is_subpart,
+ uint32 *store_length_array, /* ignored */
+ uchar *min_value, uchar *max_value,
+ uint min_len, uint max_len, /* ignored */
+ uint flags,
+ PARTITION_ITERATOR *part_iter)
{
Field *field;
uint total_parts;
partition_iter_func get_next_func;
+ DBUG_ENTER("get_part_iter_for_interval_via_walking");
+ (void)store_length_array;
+ (void)min_len;
+ (void)max_len;
+
part_iter->ret_null_part= part_iter->ret_null_part_orig= FALSE;
if (is_subpart)
{
field= part_info->subpart_field_array[0];
- total_parts= part_info->no_subparts;
+ total_parts= part_info->num_subparts;
get_next_func= get_next_subpartition_via_walking;
}
else
{
field= part_info->part_field_array[0];
- total_parts= part_info->no_parts;
+ total_parts= part_info->num_parts;
get_next_func= get_next_partition_via_walking;
}
@@ -6952,7 +7536,7 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
if (!part_info->get_subpartition_id(part_info, &part_id))
{
init_single_partition_iterator(part_id, part_iter);
- return 1; /* Ok, iterator initialized */
+ DBUG_RETURN(1); /* Ok, iterator initialized */
}
}
else
@@ -6965,10 +7549,10 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
if (!res)
{
init_single_partition_iterator(part_id, part_iter);
- return 1; /* Ok, iterator initialized */
+ DBUG_RETURN(1); /* Ok, iterator initialized */
}
}
- return 0; /* No partitions match */
+ DBUG_RETURN(0); /* No partitions match */
}
if ((field->real_maybe_null() &&
@@ -6976,7 +7560,7 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
(!(flags & NO_MAX_RANGE) && *max_value))) || // X <? NULL
(flags & (NO_MIN_RANGE | NO_MAX_RANGE))) // -inf at any bound
{
- return -1; /* Can't handle this interval, have to use all partitions */
+ DBUG_RETURN(-1); /* Can't handle this interval, have to use all partitions */
}
/* Get integers for left and right interval bound */
@@ -6995,20 +7579,36 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
an empty interval by "wrapping around" a + 4G-1 + 1 = a.
*/
if ((ulonglong)b - (ulonglong)a == ~0ULL)
- return -1;
+ DBUG_RETURN(-1);
a += test(flags & NEAR_MIN);
b += test(!(flags & NEAR_MAX));
ulonglong n_values= b - a;
-
- if (n_values > total_parts || n_values > MAX_RANGE_TO_WALK)
- return -1;
+
+ /*
+ Will it pay off to enumerate all values in the [a..b] range and evaluate
+ the partitioning function for every value? It depends on
+ 1. whether we'll be able to infer that some partitions are not used
+ 2. if time savings from not scanning these partitions will be greater
+ than time spent in enumeration.
+ We will assume that the cost of accessing one extra partition is greater
+ than the cost of evaluating the partitioning function O(#partitions).
+ This means we should jump at any chance to eliminate a partition, which
+ gives us this logic:
+
+ Do the enumeration if
+ - the number of values to enumerate is comparable to the number of
+ partitions, or
+ - there are not many values to enumerate.
+ */
+ if ((n_values > 2*total_parts) && n_values > MAX_RANGE_TO_WALK)
+ DBUG_RETURN(-1);
part_iter->field_vals.start= part_iter->field_vals.cur= a;
part_iter->field_vals.end= b;
part_iter->part_info= part_info;
part_iter->get_next= get_next_func;
- return 1;
+ DBUG_RETURN(1);
}
@@ -7056,8 +7656,9 @@ uint32 get_next_partition_id_range(PARTITION_ITERATOR* part_iter)
DESCRIPTION
This implementation of PARTITION_ITERATOR::get_next() is special for
- LIST partitioning: it enumerates partition ids in
- part_info->list_array[i] where i runs over [min_idx, max_idx] interval.
+ LIST partitioning: it enumerates partition ids in
+ part_info->list_array[i] (list_col_array[i] for COLUMNS LIST
+ partitioning) where i runs over [min_idx, max_idx] interval.
The function conforms to partition_iter_func type.
RETURN
@@ -7079,8 +7680,13 @@ uint32 get_next_partition_id_list(PARTITION_ITERATOR *part_iter)
return NOT_A_PARTITION_ID;
}
else
- return part_iter->part_info->list_array[part_iter->
- part_nums.cur++].partition_id;
+ {
+ partition_info *part_info= part_iter->part_info;
+ uint32 num_part= part_iter->part_nums.cur++;
+ return part_info->column_list ?
+ part_info->list_col_array[num_part].partition_id :
+ part_info->list_array[num_part].partition_id;
+ }
}
@@ -7221,5 +7827,17 @@ void create_subpartition_name(char *out, const char *in1,
strxmov(out, in1, "#P#", transl_part_name,
"#SP#", transl_subpart_name, "#REN#", NullS);
}
+
+uint get_partition_field_store_length(Field *field)
+{
+ uint store_length;
+
+ store_length= field->key_length();
+ if (field->real_maybe_null())
+ store_length+= HA_KEY_NULL_LENGTH;
+ if (field->real_type() == MYSQL_TYPE_VARCHAR)
+ store_length+= HA_KEY_BLOB_LENGTH;
+ return store_length;
+}
#endif
diff --git a/sql/sql_partition.h b/sql/sql_partition.h
index 282e24f1853..6e1bf8b5728 100644
--- a/sql/sql_partition.h
+++ b/sql/sql_partition.h
@@ -1,4 +1,7 @@
-/* Copyright (C) 2006 MySQL AB
+#ifndef SQL_PARTITION_INCLUDED
+#define SQL_PARTITION_INCLUDED
+
+/* Copyright 2005-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -65,15 +68,19 @@ int get_part_for_delete(const uchar *buf, const uchar *rec0,
void prune_partition_set(const TABLE *table, part_id_range *part_spec);
bool check_partition_info(partition_info *part_info,handlerton **eng_type,
TABLE *table, handler *file, HA_CREATE_INFO *info);
-void set_linear_hash_mask(partition_info *part_info, uint no_parts);
+void set_linear_hash_mask(partition_info *part_info, uint num_parts);
bool fix_partition_func(THD *thd, TABLE *table, bool create_table_ind);
-char *generate_partition_syntax(partition_info *part_info,
- uint *buf_length, bool use_sql_alloc,
- bool show_partition_options);
bool partition_key_modified(TABLE *table, const MY_BITMAP *fields);
void get_partition_set(const TABLE *table, uchar *buf, const uint index,
const key_range *key_spec,
part_id_range *part_spec);
+uint get_partition_field_store_length(Field *field);
+int get_cs_converted_part_value_from_string(THD *thd,
+ Item *item,
+ String *input_str,
+ String *output_str,
+ CHARSET_INFO *cs,
+ bool use_hex);
void get_full_part_id_from_key(const TABLE *table, uchar *buf,
KEY *key_info,
const key_range *key_spec,
@@ -96,6 +103,7 @@ bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
bool check_part_func_fields(Field **ptr, bool ok_with_charsets);
bool field_is_partition_charset(Field *field);
+Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs);
/*
A "Get next" function for partition iterator.
@@ -172,13 +180,16 @@ typedef struct st_partition_iter
SYNOPSIS
get_partitions_in_range_iter()
- part_info Partitioning info
- is_subpart
- min_val Left edge, field value in opt_range_key format.
- max_val Right edge, field value in opt_range_key format.
- flags Some combination of NEAR_MIN, NEAR_MAX, NO_MIN_RANGE,
- NO_MAX_RANGE.
- part_iter Iterator structure to be initialized
+ part_info Partitioning info
+ is_subpart
+ store_length_array Length of fields packed in opt_range_key format
+ min_val Left edge, field value in opt_range_key format
+ max_val Right edge, field value in opt_range_key format
+ min_len Length of minimum value
+ max_len Length of maximum value
+ flags Some combination of NEAR_MIN, NEAR_MAX, NO_MIN_RANGE,
+ NO_MAX_RANGE
+ part_iter Iterator structure to be initialized
DESCRIPTION
Functions with this signature are used to perform "Partitioning Interval
@@ -191,8 +202,9 @@ typedef struct st_partition_iter
The set of partitions is returned by initializing an iterator in *part_iter
NOTES
- There are currently two functions of this type:
+ There are currently three functions of this type:
- get_part_iter_for_interval_via_walking
+ - get_part_iter_for_interval_cols_via_map
- get_part_iter_for_interval_via_mapping
RETURN
@@ -203,9 +215,12 @@ typedef struct st_partition_iter
typedef int (*get_partitions_in_range_iter)(partition_info *part_info,
bool is_subpart,
+ uint32 *store_length_array,
uchar *min_val, uchar *max_val,
+ uint min_len, uint max_len,
uint flags,
PARTITION_ITERATOR *part_iter);
#include "partition_info.h"
+#endif /* SQL_PARTITION_INCLUDED */
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index bafc601d142..936c9ae8866 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) 2005 MySQL AB
+/* Copyright (C) 2005 MySQL AB, 2009 Sun Microsystems, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -19,14 +19,6 @@
#define REPORT_TO_LOG 1
#define REPORT_TO_USER 2
-#ifdef DBUG_OFF
-#define plugin_ref_to_int(A) A
-#define plugin_int_to_ref(A) A
-#else
-#define plugin_ref_to_int(A) (A ? A[0] : NULL)
-#define plugin_int_to_ref(A) &(A)
-#endif
-
extern struct st_mysql_plugin *mysqld_builtins[];
/**
@@ -54,7 +46,8 @@ const LEX_STRING plugin_type_names[MYSQL_MAX_PLUGIN_TYPE_NUM]=
{ C_STRING_WITH_LEN("STORAGE ENGINE") },
{ C_STRING_WITH_LEN("FTPARSER") },
{ C_STRING_WITH_LEN("DAEMON") },
- { C_STRING_WITH_LEN("INFORMATION SCHEMA") }
+ { C_STRING_WITH_LEN("INFORMATION SCHEMA") },
+ { C_STRING_WITH_LEN("REPLICATION") },
};
extern int initialize_schema_table(st_plugin_int *plugin);
@@ -93,7 +86,8 @@ static int min_plugin_info_interface_version[MYSQL_MAX_PLUGIN_TYPE_NUM]=
MYSQL_HANDLERTON_INTERFACE_VERSION,
MYSQL_FTPARSER_INTERFACE_VERSION,
MYSQL_DAEMON_INTERFACE_VERSION,
- MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION
+ MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION,
+ MYSQL_REPLICATION_INTERFACE_VERSION,
};
static int cur_plugin_info_interface_version[MYSQL_MAX_PLUGIN_TYPE_NUM]=
{
@@ -101,10 +95,13 @@ static int cur_plugin_info_interface_version[MYSQL_MAX_PLUGIN_TYPE_NUM]=
MYSQL_HANDLERTON_INTERFACE_VERSION,
MYSQL_FTPARSER_INTERFACE_VERSION,
MYSQL_DAEMON_INTERFACE_VERSION,
- MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION
+ MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION,
+ MYSQL_REPLICATION_INTERFACE_VERSION,
};
-static bool initialized= 0;
+/* support for Services */
+
+#include "sql_plugin_services.h"
/*
A mutex LOCK_plugin must be acquired before accessing the
@@ -118,6 +115,8 @@ static HASH plugin_hash[MYSQL_MAX_PLUGIN_TYPE_NUM];
static bool reap_needed= false;
static int plugin_array_version=0;
+static bool initialized= 0;
+
/*
write-lock on LOCK_system_variables_hash is required before modifying
the following variables/structures
@@ -230,6 +229,22 @@ extern bool throw_bounds_warning(THD *thd, bool fixed, bool unsignd,
extern bool check_if_table_exists(THD *thd, TABLE_LIST *table, bool *exists);
#endif /* EMBEDDED_LIBRARY */
+static void report_error(int where_to, uint error, ...)
+{
+ va_list args;
+ if (where_to & REPORT_TO_USER)
+ {
+ va_start(args, error);
+ my_printv_error(error, ER(error), MYF(0), args);
+ va_end(args);
+ }
+ if (where_to & REPORT_TO_LOG)
+ {
+ va_start(args, error);
+ error_log_print(ERROR_LEVEL, ER_DEFAULT(error), args);
+ va_end(args);
+ }
+}
/****************************************************************************
Value type thunks, allows the C world to play in the C++ world
@@ -350,7 +365,7 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
{
#ifdef HAVE_DLOPEN
char dlpath[FN_REFLEN];
- uint plugin_dir_len, dummy_errors, dlpathlen;
+ uint plugin_dir_len, dummy_errors, dlpathlen, i;
struct st_plugin_dl *tmp, plugin_dl;
void *sym;
DBUG_ENTER("plugin_dl_add");
@@ -365,10 +380,7 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
system_charset_info, 1) ||
plugin_dir_len + dl->length + 1 >= FN_REFLEN)
{
- if (report & REPORT_TO_USER)
- my_error(ER_UDF_NO_PATHS, MYF(0));
- if (report & REPORT_TO_LOG)
- sql_print_error("%s", ER(ER_UDF_NO_PATHS));
+ report_error(report, ER_UDF_NO_PATHS);
DBUG_RETURN(0);
}
/* If this dll is already loaded just increase ref_count. */
@@ -393,20 +405,14 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
if (*errmsg == ':') errmsg++;
if (*errmsg == ' ') errmsg++;
}
- if (report & REPORT_TO_USER)
- my_error(ER_CANT_OPEN_LIBRARY, MYF(0), dlpath, errno, errmsg);
- if (report & REPORT_TO_LOG)
- sql_print_error(ER(ER_CANT_OPEN_LIBRARY), dlpath, errno, errmsg);
+ report_error(report, ER_CANT_OPEN_LIBRARY, dlpath, errno, errmsg);
DBUG_RETURN(0);
}
/* Determine interface version */
if (!(sym= dlsym(plugin_dl.handle, plugin_interface_version_sym)))
{
free_plugin_mem(&plugin_dl);
- if (report & REPORT_TO_USER)
- my_error(ER_CANT_FIND_DL_ENTRY, MYF(0), plugin_interface_version_sym);
- if (report & REPORT_TO_LOG)
- sql_print_error(ER(ER_CANT_FIND_DL_ENTRY), plugin_interface_version_sym);
+ report_error(report, ER_CANT_FIND_DL_ENTRY, plugin_interface_version_sym);
DBUG_RETURN(0);
}
plugin_dl.version= *(int *)sym;
@@ -415,28 +421,42 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
(plugin_dl.version >> 8) > (MYSQL_PLUGIN_INTERFACE_VERSION >> 8))
{
free_plugin_mem(&plugin_dl);
- if (report & REPORT_TO_USER)
- my_error(ER_CANT_OPEN_LIBRARY, MYF(0), dlpath, 0,
- "plugin interface version mismatch");
- if (report & REPORT_TO_LOG)
- sql_print_error(ER(ER_CANT_OPEN_LIBRARY), dlpath, 0,
- "plugin interface version mismatch");
+ report_error(report, ER_CANT_OPEN_LIBRARY, MYF(0), dlpath, 0,
+ "plugin interface version mismatch");
DBUG_RETURN(0);
}
+
+ /* link the services in */
+ for (i= 0; i < array_elements(list_of_services); i++)
+ {
+ if ((sym= dlsym(plugin_dl.handle, list_of_services[i].name)))
+ {
+ uint ver= (uint)(intptr)*(void**)sym;
+ if (ver > list_of_services[i].version ||
+ (ver >> 8) < (list_of_services[i].version >> 8))
+ {
+ char buf[MYSQL_ERRMSG_SIZE];
+ my_snprintf(buf, sizeof(buf),
+ "service '%s' interface version mismatch",
+ list_of_services[i].name);
+ report_error(report, ER_CANT_OPEN_LIBRARY, dlpath, 0, buf);
+ DBUG_RETURN(0);
+ }
+ *(void**)sym= list_of_services[i].service;
+ }
+ }
+
/* Find plugin declarations */
if (!(sym= dlsym(plugin_dl.handle, plugin_declarations_sym)))
{
free_plugin_mem(&plugin_dl);
- if (report & REPORT_TO_USER)
- my_error(ER_CANT_FIND_DL_ENTRY, MYF(0), plugin_declarations_sym);
- if (report & REPORT_TO_LOG)
- sql_print_error(ER(ER_CANT_FIND_DL_ENTRY), plugin_declarations_sym);
+ report_error(report, ER_CANT_FIND_DL_ENTRY, MYF(0),
+ plugin_declarations_sym);
DBUG_RETURN(0);
}
if (plugin_dl.version != MYSQL_PLUGIN_INTERFACE_VERSION)
{
- int i;
uint sizeof_st_plugin;
struct st_mysql_plugin *old, *cur;
char *ptr= (char *)sym;
@@ -446,11 +466,7 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
else
{
#ifdef ERROR_ON_NO_SIZEOF_PLUGIN_SYMBOL
- free_plugin_mem(&plugin_dl);
- if (report & REPORT_TO_USER)
- my_error(ER_CANT_FIND_DL_ENTRY, MYF(0), sizeof_st_plugin_sym);
- if (report & REPORT_TO_LOG)
- sql_print_error(ER(ER_CANT_FIND_DL_ENTRY), sizeof_st_plugin_sym);
+ report_error(report, ER_CANT_FIND_DL_ENTRY, sizeof_st_plugin_sym);
DBUG_RETURN(0);
#else
/*
@@ -472,10 +488,7 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
if (!cur)
{
free_plugin_mem(&plugin_dl);
- if (report & REPORT_TO_USER)
- my_error(ER_OUTOFMEMORY, MYF(0), plugin_dl.dl.length);
- if (report & REPORT_TO_LOG)
- sql_print_error(ER(ER_OUTOFMEMORY), plugin_dl.dl.length);
+ report_error(report, ER_OUTOFMEMORY, plugin_dl.dl.length);
DBUG_RETURN(0);
}
/*
@@ -497,10 +510,7 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
if (! (plugin_dl.dl.str= (char*) my_malloc(plugin_dl.dl.length, MYF(0))))
{
free_plugin_mem(&plugin_dl);
- if (report & REPORT_TO_USER)
- my_error(ER_OUTOFMEMORY, MYF(0), plugin_dl.dl.length);
- if (report & REPORT_TO_LOG)
- sql_print_error(ER(ER_OUTOFMEMORY), plugin_dl.dl.length);
+ report_error(report, ER_OUTOFMEMORY, plugin_dl.dl.length);
DBUG_RETURN(0);
}
plugin_dl.dl.length= copy_and_convert(plugin_dl.dl.str, plugin_dl.dl.length,
@@ -511,19 +521,13 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report)
if (! (tmp= plugin_dl_insert_or_reuse(&plugin_dl)))
{
free_plugin_mem(&plugin_dl);
- if (report & REPORT_TO_USER)
- my_error(ER_OUTOFMEMORY, MYF(0), sizeof(struct st_plugin_dl));
- if (report & REPORT_TO_LOG)
- sql_print_error(ER(ER_OUTOFMEMORY), sizeof(struct st_plugin_dl));
+ report_error(report, ER_OUTOFMEMORY, sizeof(struct st_plugin_dl));
DBUG_RETURN(0);
}
DBUG_RETURN(tmp);
#else
DBUG_ENTER("plugin_dl_add");
- if (report & REPORT_TO_USER)
- my_error(ER_FEATURE_DISABLED, MYF(0), "plugin", "HAVE_DLOPEN");
- if (report & REPORT_TO_LOG)
- sql_print_error(ER(ER_FEATURE_DISABLED), "plugin", "HAVE_DLOPEN");
+ report_error(report, ER_FEATURE_DISABLED, "plugin", "HAVE_DLOPEN");
DBUG_RETURN(0);
#endif
}
@@ -574,14 +578,15 @@ static struct st_plugin_int *plugin_find_internal(const LEX_STRING *name, int ty
for (i= 0; i < MYSQL_MAX_PLUGIN_TYPE_NUM; i++)
{
struct st_plugin_int *plugin= (st_plugin_int *)
- hash_search(&plugin_hash[i], (const uchar *)name->str, name->length);
+ my_hash_search(&plugin_hash[i], (const uchar *)name->str, name->length);
if (plugin)
DBUG_RETURN(plugin);
}
}
else
DBUG_RETURN((st_plugin_int *)
- hash_search(&plugin_hash[type], (const uchar *)name->str, name->length));
+ my_hash_search(&plugin_hash[type], (const uchar *)name->str,
+ name->length));
DBUG_RETURN(0);
}
@@ -639,7 +644,7 @@ static plugin_ref intern_plugin_lock(LEX *lex, plugin_ref rc CALLER_INFO_PROTO)
/*
For debugging, we do an additional malloc which allows the
memory manager and/or valgrind to track locked references and
- double unlocks to aid resolving reference counting.problems.
+ double unlocks to aid resolving reference counting problems.
*/
if (!(plugin= (plugin_ref) my_malloc_ci(sizeof(pi), MYF(MY_WME))))
DBUG_RETURN(NULL);
@@ -722,10 +727,7 @@ static bool plugin_add(MEM_ROOT *tmp_root,
DBUG_ENTER("plugin_add");
if (plugin_find_internal(name, MYSQL_ANY_PLUGIN))
{
- if (report & REPORT_TO_USER)
- my_error(ER_UDF_EXISTS, MYF(0), name->str);
- if (report & REPORT_TO_LOG)
- sql_print_error(ER(ER_UDF_EXISTS), name->str);
+ report_error(report, ER_UDF_EXISTS, name->str);
DBUG_RETURN(TRUE);
}
/* Clear the whole struct to catch future extensions. */
@@ -752,10 +754,7 @@ static bool plugin_add(MEM_ROOT *tmp_root,
strxnmov(buf, sizeof(buf) - 1, "API version for ",
plugin_type_names[plugin->type].str,
" plugin is too different", NullS);
- if (report & REPORT_TO_USER)
- my_error(ER_CANT_OPEN_LIBRARY, MYF(0), dl->str, 0, buf);
- if (report & REPORT_TO_LOG)
- sql_print_error(ER(ER_CANT_OPEN_LIBRARY), dl->str, 0, buf);
+ report_error(report, ER_CANT_OPEN_LIBRARY, dl->str, 0, buf);
goto err;
}
tmp.plugin= plugin;
@@ -784,10 +783,7 @@ static bool plugin_add(MEM_ROOT *tmp_root,
DBUG_RETURN(FALSE);
}
}
- if (report & REPORT_TO_USER)
- my_error(ER_CANT_FIND_DL_ENTRY, MYF(0), name->str);
- if (report & REPORT_TO_LOG)
- sql_print_error(ER(ER_CANT_FIND_DL_ENTRY), name->str);
+ report_error(report, ER_CANT_FIND_DL_ENTRY, name->str);
err:
plugin_dl_del(dl);
DBUG_RETURN(TRUE);
@@ -858,7 +854,7 @@ static void plugin_del(struct st_plugin_int *plugin)
safe_mutex_assert_owner(&LOCK_plugin);
/* Free allocated strings before deleting the plugin. */
plugin_vars_free_values(plugin->system_vars);
- hash_delete(&plugin_hash[plugin->plugin->type], (uchar*)plugin);
+ my_hash_delete(&plugin_hash[plugin->plugin->type], (uchar*)plugin);
if (plugin->plugin_dl)
plugin_dl_del(&plugin->plugin_dl->dl);
plugin->state= PLUGIN_IS_FREED;
@@ -1133,8 +1129,8 @@ int plugin_init(int *argc, char **argv, int flags)
init_alloc_root(&plugin_mem_root, 4096, 4096);
init_alloc_root(&tmp_root, 4096, 4096);
- if (hash_init(&bookmark_hash, &my_charset_bin, 16, 0, 0,
- get_bookmark_hash_key, NULL, HASH_UNIQUE))
+ if (my_hash_init(&bookmark_hash, &my_charset_bin, 16, 0, 0,
+ get_bookmark_hash_key, NULL, HASH_UNIQUE))
goto err;
@@ -1148,8 +1144,8 @@ int plugin_init(int *argc, char **argv, int flags)
for (i= 0; i < MYSQL_MAX_PLUGIN_TYPE_NUM; i++)
{
- if (hash_init(&plugin_hash[i], system_charset_info, 16, 0, 0,
- get_plugin_hash_key, NULL, HASH_UNIQUE))
+ if (my_hash_init(&plugin_hash[i], system_charset_info, 16, 0, 0,
+ get_plugin_hash_key, NULL, HASH_UNIQUE))
goto err;
}
@@ -1627,7 +1623,7 @@ void plugin_shutdown(void)
/* Dispose of the memory */
for (i= 0; i < MYSQL_MAX_PLUGIN_TYPE_NUM; i++)
- hash_free(&plugin_hash[i]);
+ my_hash_free(&plugin_hash[i]);
delete_dynamic(&plugin_array);
count= plugin_dl_array.elements;
@@ -1639,7 +1635,7 @@ void plugin_shutdown(void)
my_afree(dl);
delete_dynamic(&plugin_dl_array);
- hash_free(&bookmark_hash);
+ my_hash_free(&bookmark_hash);
free_root(&plugin_mem_root, MYF(0));
global_variables_dynamic_size= 0;
@@ -1660,7 +1656,7 @@ bool mysql_install_plugin(THD *thd, const LEX_STRING *name, const LEX_STRING *dl
bzero(&tables, sizeof(tables));
tables.db= (char *)"mysql";
tables.table_name= tables.alias= (char *)"plugin";
- if (check_table_access(thd, INSERT_ACL, &tables, 1, FALSE))
+ if (check_table_access(thd, INSERT_ACL, &tables, FALSE, 1, FALSE))
DBUG_RETURN(TRUE);
/* need to open before acquiring LOCK_plugin or it will deadlock */
@@ -1764,12 +1760,13 @@ bool mysql_uninstall_plugin(THD *thd, const LEX_STRING *name)
reap_plugins();
pthread_mutex_unlock(&LOCK_plugin);
+ uchar user_key[MAX_KEY_LENGTH];
table->use_all_columns();
table->field[0]->store(name->str, name->length, system_charset_info);
- if (! table->file->index_read_idx_map(table->record[0], 0,
- (uchar *)table->field[0]->ptr,
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))
+ key_copy(user_key, table->record[0], table->key_info,
+ table->key_info->key_length);
+ if (! table->file->index_read_idx_map(table->record[0], 0, user_key,
+ HA_WHOLE_KEY, HA_READ_KEY_EXACT))
{
int error;
/*
@@ -1827,7 +1824,7 @@ bool plugin_foreach_with_mask(THD *thd, plugin_foreach_func *func,
HASH *hash= plugin_hash + type;
for (idx= 0; idx < total; idx++)
{
- plugin= (struct st_plugin_int *) hash_element(hash, idx);
+ plugin= (struct st_plugin_int *) my_hash_element(hash, idx);
plugins[idx]= !(plugin->state & state_mask) ? plugin : NULL;
}
}
@@ -2226,8 +2223,8 @@ static st_bookmark *find_bookmark(const char *plugin, const char *name,
varname[0]= flags & PLUGIN_VAR_TYPEMASK;
- result= (st_bookmark*) hash_search(&bookmark_hash,
- (const uchar*) varname, length - 1);
+ result= (st_bookmark*) my_hash_search(&bookmark_hash,
+ (const uchar*) varname, length - 1);
my_afree(varname);
return result;
@@ -2387,7 +2384,7 @@ static uchar *intern_sys_var_ptr(THD* thd, int offset, bool global_lock)
{
sys_var_pluginvar *pi;
sys_var *var;
- st_bookmark *v= (st_bookmark*) hash_element(&bookmark_hash,idx);
+ st_bookmark *v= (st_bookmark*) my_hash_element(&bookmark_hash,idx);
if (v->version <= thd->variables.dynamic_variables_version ||
!(var= intern_find_sys_var(v->key + 1, v->name_len, true)) ||
@@ -2481,7 +2478,7 @@ static void cleanup_variables(THD *thd, struct system_variables *vars)
rw_rdlock(&LOCK_system_variables_hash);
for (idx= 0; idx < bookmark_hash.records; idx++)
{
- v= (st_bookmark*) hash_element(&bookmark_hash, idx);
+ v= (st_bookmark*) my_hash_element(&bookmark_hash, idx);
if (v->version > vars->dynamic_variables_version ||
!(var= intern_find_sys_var(v->key + 1, v->name_len, true)) ||
!(pivar= var->cast_pluginvar()) ||
diff --git a/sql/sql_plugin.h b/sql/sql_plugin.h
index 004d0d5abb7..23ce85c994b 100644
--- a/sql/sql_plugin.h
+++ b/sql/sql_plugin.h
@@ -18,6 +18,14 @@
class sys_var;
+#ifdef DBUG_OFF
+#define plugin_ref_to_int(A) A
+#define plugin_int_to_ref(A) A
+#else
+#define plugin_ref_to_int(A) (A ? A[0] : NULL)
+#define plugin_int_to_ref(A) &(A)
+#endif
+
/*
the following flags are valid for plugin_init()
*/
@@ -27,16 +35,6 @@ class sys_var;
#define INITIAL_LEX_PLUGIN_LIST_SIZE 16
-/*
- the following #define adds server-only members to enum_mysql_show_type,
- that is defined in plugin.h
-*/
-#define SHOW_FUNC SHOW_FUNC, SHOW_KEY_CACHE_LONG, SHOW_KEY_CACHE_LONGLONG, \
- SHOW_LONG_STATUS, SHOW_DOUBLE_STATUS, SHOW_HAVE, \
- SHOW_MY_BOOL, SHOW_HA_ROWS, SHOW_SYS, SHOW_LONG_NOFLUSH, \
- SHOW_LONGLONG_STATUS
-#include <mysql/plugin.h>
-#undef SHOW_FUNC
typedef enum enum_mysql_show_type SHOW_TYPE;
typedef struct st_mysql_show_var SHOW_VAR;
diff --git a/sql/sql_plugin_services.h b/sql/sql_plugin_services.h
new file mode 100644
index 00000000000..7491ddab79d
--- /dev/null
+++ b/sql/sql_plugin_services.h
@@ -0,0 +1,44 @@
+/* Copyright (C) 2009 Sun Microsystems, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* support for Services */
+#include <service_versions.h>
+
+struct st_service_ref {
+ const char *name;
+ uint version;
+ void *service;
+};
+
+static struct my_snprintf_service_st my_snprintf_handler = {
+ my_snprintf,
+ my_vsnprintf
+};
+
+static struct thd_alloc_service_st thd_alloc_handler= {
+ thd_alloc,
+ thd_calloc,
+ thd_strdup,
+ thd_strmake,
+ thd_memdup,
+ thd_make_lex_string
+};
+
+static struct st_service_ref list_of_services[]=
+{
+ { "my_snprintf_service", VERSION_my_snprintf, &my_snprintf_handler },
+ { "thd_alloc_service", VERSION_thd_alloc, &thd_alloc_handler }
+};
+
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 7c902de35b8..168934206e5 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -84,6 +84,7 @@ When one supplies long data for a placeholder:
*/
#include "mysql_priv.h"
+#include "sql_prepare.h"
#include "sql_select.h" // for JOIN
#include "sql_cursor.h"
#include "sp_head.h"
@@ -106,7 +107,7 @@ class Select_fetch_protocol_binary: public select_send
Protocol_binary protocol;
public:
Select_fetch_protocol_binary(THD *thd);
- virtual bool send_fields(List<Item> &list, uint flags);
+ virtual bool send_result_set_metadata(List<Item> &list, uint flags);
virtual bool send_data(List<Item> &items);
virtual bool send_eof();
#ifdef EMBEDDED_LIBRARY
@@ -163,6 +164,7 @@ public:
bool execute_loop(String *expanded_query,
bool open_cursor,
uchar *packet_arg, uchar *packet_end_arg);
+ bool execute_server_runnable(Server_runnable *server_runnable);
/* Destroy this statement */
void deallocate();
private:
@@ -183,6 +185,78 @@ private:
void swap_prepared_statement(Prepared_statement *copy);
};
+/**
+ Execute one SQL statement in an isolated context.
+*/
+
+class Execute_sql_statement: public Server_runnable
+{
+public:
+ Execute_sql_statement(LEX_STRING sql_text);
+ virtual bool execute_server_code(THD *thd);
+private:
+ LEX_STRING m_sql_text;
+};
+
+
+class Ed_connection;
+
+/**
+ Protocol_local: a helper class to intercept the result
+ of the data written to the network.
+*/
+
+class Protocol_local :public Protocol
+{
+public:
+ Protocol_local(THD *thd, Ed_connection *ed_connection);
+ ~Protocol_local() { free_root(&m_rset_root, MYF(0)); }
+protected:
+ virtual void prepare_for_resend();
+ virtual bool write();
+ virtual bool store_null();
+ virtual bool store_tiny(longlong from);
+ virtual bool store_short(longlong from);
+ virtual bool store_long(longlong from);
+ virtual bool store_longlong(longlong from, bool unsigned_flag);
+ virtual bool store_decimal(const my_decimal *);
+ virtual bool store(const char *from, size_t length, CHARSET_INFO *cs);
+ virtual bool store(const char *from, size_t length,
+ CHARSET_INFO *fromcs, CHARSET_INFO *tocs);
+ virtual bool store(MYSQL_TIME *time);
+ virtual bool store_date(MYSQL_TIME *time);
+ virtual bool store_time(MYSQL_TIME *time);
+ virtual bool store(float value, uint32 decimals, String *buffer);
+ virtual bool store(double value, uint32 decimals, String *buffer);
+ virtual bool store(Field *field);
+
+ virtual bool send_result_set_metadata(List<Item> *list, uint flags);
+ virtual bool send_out_parameters(List<Item_param> *sp_params);
+#ifdef EMBEDDED_LIBRARY
+ void remove_last_row();
+#endif
+ virtual enum enum_protocol_type type() { return PROTOCOL_LOCAL; };
+
+ virtual bool send_ok(uint server_status, uint statement_warn_count,
+ ulonglong affected_rows, ulonglong last_insert_id,
+ const char *message);
+
+ virtual bool send_eof(uint server_status, uint statement_warn_count);
+ virtual bool send_error(uint sql_errno, const char *err_msg, const char* sqlstate);
+private:
+ bool store_string(const char *str, size_t length,
+ CHARSET_INFO *src_cs, CHARSET_INFO *dst_cs);
+
+ bool store_column(const void *data, size_t length);
+ void opt_add_row_to_rset();
+private:
+ Ed_connection *m_connection;
+ MEM_ROOT m_rset_root;
+ List<Ed_row> *m_rset;
+ size_t m_column_count;
+ Ed_column *m_current_row;
+ Ed_column *m_current_column;
+};
/******************************************************************************
Implementation
@@ -250,7 +324,7 @@ static bool send_prep_stmt(Prepared_statement *stmt, uint columns)
int2store(buff+5, columns);
int2store(buff+7, stmt->param_count);
buff[9]= 0; // Guard against a 4.1 client
- tmp= min(stmt->thd->total_warn_count, 65535);
+ tmp= min(stmt->thd->warning_info->statement_warn_count(), 65535);
int2store(buff+10, tmp);
/*
@@ -260,12 +334,12 @@ static bool send_prep_stmt(Prepared_statement *stmt, uint columns)
error= my_net_write(net, buff, sizeof(buff));
if (stmt->param_count && ! error)
{
- error= thd->protocol_text.send_fields((List<Item> *)
+ error= thd->protocol_text.send_result_set_metadata((List<Item> *)
&stmt->lex->param_list,
Protocol::SEND_EOF);
}
/* Flag that a response has already been sent */
- thd->main_da.disable_status();
+ thd->stmt_da->disable_status();
DBUG_RETURN(error);
}
#else
@@ -277,7 +351,7 @@ static bool send_prep_stmt(Prepared_statement *stmt,
thd->client_stmt_id= stmt->id;
thd->client_param_count= stmt->param_count;
thd->clear_error();
- thd->main_da.disable_status();
+ thd->stmt_da->disable_status();
return 0;
}
@@ -1027,9 +1101,9 @@ static bool insert_params_from_vars(Prepared_statement *stmt,
{
Item_param *param= *it;
varname= var_it++;
- entry= (user_var_entry*)hash_search(&stmt->thd->user_vars,
- (uchar*) varname->str,
- varname->length);
+ entry= (user_var_entry*)my_hash_search(&stmt->thd->user_vars,
+ (uchar*) varname->str,
+ varname->length);
if (param->set_from_user_var(stmt->thd, entry) ||
param->convert_str_value(stmt->thd))
DBUG_RETURN(1);
@@ -1074,8 +1148,8 @@ static bool insert_params_from_vars_with_log(Prepared_statement *stmt,
Item_param *param= *it;
varname= var_it++;
- entry= (user_var_entry *) hash_search(&thd->user_vars, (uchar*) varname->str,
- varname->length);
+ entry= (user_var_entry *) my_hash_search(&thd->user_vars, (uchar*)
+ varname->str, varname->length);
/*
We have to call the setup_one_conversion_function() here to set
the parameter's members that might be needed further
@@ -1336,7 +1410,7 @@ static int mysql_test_select(Prepared_statement *stmt,
ulong privilege= lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL;
if (tables)
{
- if (check_table_access(thd, privilege, tables, UINT_MAX, FALSE))
+ if (check_table_access(thd, privilege, tables, FALSE, UINT_MAX, FALSE))
goto error;
}
else if (check_access(thd, privilege, any_db,0,0,0,0))
@@ -1374,7 +1448,7 @@ static int mysql_test_select(Prepared_statement *stmt,
unit->prepare call above.
*/
if (send_prep_stmt(stmt, lex->result->field_count(fields)) ||
- lex->result->send_fields(fields, Protocol::SEND_EOF) ||
+ lex->result->send_result_set_metadata(fields, Protocol::SEND_EOF) ||
thd->protocol->flush())
goto error;
DBUG_RETURN(2);
@@ -1405,7 +1479,8 @@ static bool mysql_test_do_fields(Prepared_statement *stmt,
THD *thd= stmt->thd;
DBUG_ENTER("mysql_test_do_fields");
- if (tables && check_table_access(thd, SELECT_ACL, tables, UINT_MAX, FALSE))
+ if (tables && check_table_access(thd, SELECT_ACL, tables, FALSE,
+ UINT_MAX, FALSE))
DBUG_RETURN(TRUE);
if (open_normal_and_derived_tables(thd, tables, 0))
@@ -1436,8 +1511,9 @@ static bool mysql_test_set_fields(Prepared_statement *stmt,
THD *thd= stmt->thd;
set_var_base *var;
- if ((tables && check_table_access(thd, SELECT_ACL, tables, UINT_MAX, FALSE))
- || open_normal_and_derived_tables(thd, tables, 0))
+ if ((tables && check_table_access(thd, SELECT_ACL, tables, FALSE,
+ UINT_MAX, FALSE)) ||
+ open_normal_and_derived_tables(thd, tables, 0))
goto error;
while ((var= it++))
@@ -1472,7 +1548,8 @@ static bool mysql_test_call_fields(Prepared_statement *stmt,
THD *thd= stmt->thd;
Item *item;
- if ((tables && check_table_access(thd, SELECT_ACL, tables, UINT_MAX, FALSE)) ||
+ if ((tables && check_table_access(thd, SELECT_ACL, tables, FALSE,
+ UINT_MAX, FALSE)) ||
open_normal_and_derived_tables(thd, tables, 0))
goto err;
@@ -1835,6 +1912,10 @@ static bool check_prepared_statement(Prepared_statement *stmt)
lex->select_lex.context.resolve_in_table_list_only(select_lex->
get_table_list());
+ /* Reset warning count for each query that uses tables */
+ if (tables)
+ thd->warning_info->opt_clear_warning_info(thd->query_id);
+
switch (sql_command) {
case SQLCOM_REPLACE:
case SQLCOM_INSERT:
@@ -1916,30 +1997,6 @@ static bool check_prepared_statement(Prepared_statement *stmt)
Note that we don't need to have cases in this list if they are
marked with CF_STATUS_COMMAND in sql_command_flags
*/
- case SQLCOM_SHOW_PROCESSLIST:
- case SQLCOM_SHOW_STORAGE_ENGINES:
- case SQLCOM_SHOW_PRIVILEGES:
- case SQLCOM_SHOW_COLUMN_TYPES:
- case SQLCOM_SHOW_ENGINE_LOGS:
- case SQLCOM_SHOW_ENGINE_STATUS:
- case SQLCOM_SHOW_ENGINE_MUTEX:
- case SQLCOM_SHOW_CREATE_DB:
- case SQLCOM_SHOW_GRANTS:
- case SQLCOM_SHOW_BINLOG_EVENTS:
- case SQLCOM_SHOW_MASTER_STAT:
- case SQLCOM_SHOW_SLAVE_STAT:
- case SQLCOM_SHOW_CREATE_PROC:
- case SQLCOM_SHOW_CREATE_FUNC:
- case SQLCOM_SHOW_CREATE_EVENT:
- case SQLCOM_SHOW_CREATE_TRIGGER:
- case SQLCOM_SHOW_CREATE:
- case SQLCOM_SHOW_PROC_CODE:
- case SQLCOM_SHOW_FUNC_CODE:
- case SQLCOM_SHOW_AUTHORS:
- case SQLCOM_SHOW_CONTRIBUTORS:
- case SQLCOM_SHOW_WARNS:
- case SQLCOM_SHOW_ERRORS:
- case SQLCOM_SHOW_BINLOGS:
case SQLCOM_DROP_TABLE:
case SQLCOM_RENAME_TABLE:
case SQLCOM_ALTER_TABLE:
@@ -2062,7 +2119,6 @@ void mysqld_stmt_prepare(THD *thd, const char *packet, uint packet_length)
{
Protocol *save_protocol= thd->protocol;
Prepared_statement *stmt;
- bool error;
DBUG_ENTER("mysqld_stmt_prepare");
DBUG_PRINT("prep_query", ("%s", packet));
@@ -2082,22 +2138,12 @@ void mysqld_stmt_prepare(THD *thd, const char *packet, uint packet_length)
DBUG_VOID_RETURN;
}
- /* Reset warnings from previous command */
- mysql_reset_errors(thd, 0);
sp_cache_flush_obsolete(&thd->sp_proc_cache);
sp_cache_flush_obsolete(&thd->sp_func_cache);
thd->protocol= &thd->protocol_binary;
- if (!(specialflag & SPECIAL_NO_PRIOR))
- my_pthread_setprio(pthread_self(),QUERY_PRIOR);
-
- error= stmt->prepare(packet, packet_length);
-
- if (!(specialflag & SPECIAL_NO_PRIOR))
- my_pthread_setprio(pthread_self(),WAIT_PRIOR);
-
- if (error)
+ if (stmt->prepare(packet, packet_length))
{
/* Statement map deletes statement on erase */
thd->stmt_map.erase(stmt);
@@ -2147,9 +2193,9 @@ static const char *get_dynamic_sql_string(LEX *lex, uint *query_len)
convert it for error messages to be uniform.
*/
if ((entry=
- (user_var_entry*)hash_search(&thd->user_vars,
- (uchar*)lex->prepared_stmt_code.str,
- lex->prepared_stmt_code.length))
+ (user_var_entry*)my_hash_search(&thd->user_vars,
+ (uchar*)lex->prepared_stmt_code.str,
+ lex->prepared_stmt_code.length))
&& entry->value)
{
my_bool is_var_null;
@@ -2460,7 +2506,7 @@ void mysqld_stmt_execute(THD *thd, char *packet_arg, uint packet_length)
DBUG_VOID_RETURN;
}
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
thd->profiling.set_query_source(stmt->query(), stmt->query_length());
#endif
DBUG_PRINT("exec_query", ("%s", stmt->query()));
@@ -2479,7 +2525,6 @@ void mysqld_stmt_execute(THD *thd, char *packet_arg, uint packet_length)
DBUG_EXECUTE_IF("close_conn_after_stmt_execute", vio_close(thd->net.vio););
DBUG_VOID_RETURN;
-
}
@@ -2659,7 +2704,7 @@ void mysqld_stmt_close(THD *thd, char *packet)
Prepared_statement *stmt;
DBUG_ENTER("mysqld_stmt_close");
- thd->main_da.disable_status();
+ thd->stmt_da->disable_status();
if (!(stmt= find_prepared_statement(thd, stmt_id)))
DBUG_VOID_RETURN;
@@ -2734,7 +2779,7 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length)
status_var_increment(thd->status_var.com_stmt_send_long_data);
- thd->main_da.disable_status();
+ thd->stmt_da->disable_status();
#ifndef EMBEDDED_LIBRARY
/* Minimal size of long data packet is 6 bytes */
if (packet_length < MYSQL_LONG_DATA_HEADER)
@@ -2788,19 +2833,19 @@ Select_fetch_protocol_binary::Select_fetch_protocol_binary(THD *thd_arg)
:protocol(thd_arg)
{}
-bool Select_fetch_protocol_binary::send_fields(List<Item> &list, uint flags)
+bool Select_fetch_protocol_binary::send_result_set_metadata(List<Item> &list, uint flags)
{
bool rc;
Protocol *save_protocol= thd->protocol;
/*
- Protocol::send_fields caches the information about column types:
+ Protocol::send_result_set_metadata caches the information about column types:
this information is later used to send data. Therefore, the same
dedicated Protocol object must be used for all operations with
a cursor.
*/
thd->protocol= &protocol;
- rc= select_send::send_fields(list, flags);
+ rc= select_send::send_result_set_metadata(list, flags);
thd->protocol= save_protocol;
return rc;
@@ -2825,6 +2870,94 @@ Select_fetch_protocol_binary::send_data(List<Item> &fields)
return rc;
}
+/*******************************************************************
+* Reprepare_observer
+*******************************************************************/
+/** Push an error to the error stack and return TRUE for now. */
+
+bool
+Reprepare_observer::report_error(THD *thd)
+{
+ /*
+ This 'error' is purely internal to the server:
+ - No exception handler is invoked,
+ - No condition is added in the condition area (warn_list).
+ The diagnostics area is set to an error status to enforce
+ that this thread execution stops and returns to the caller,
+ backtracking all the way to Prepared_statement::execute_loop().
+ */
+ thd->stmt_da->set_error_status(thd, ER_NEED_REPREPARE,
+ ER(ER_NEED_REPREPARE), "HY000");
+ m_invalidated= TRUE;
+
+ return TRUE;
+}
+
+
+/*******************************************************************
+* Server_runnable
+*******************************************************************/
+
+Server_runnable::~Server_runnable()
+{
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+Execute_sql_statement::
+Execute_sql_statement(LEX_STRING sql_text)
+ :m_sql_text(sql_text)
+{}
+
+
+/**
+ Parse and execute a statement. Does not prepare the query.
+
+ Allows to execute a statement from within another statement.
+ The main property of the implementation is that it does not
+ affect the environment -- i.e. you can run many
+ executions without having to cleanup/reset THD in between.
+*/
+
+bool
+Execute_sql_statement::execute_server_code(THD *thd)
+{
+ bool error;
+
+ if (alloc_query(thd, m_sql_text.str, m_sql_text.length))
+ return TRUE;
+
+ Parser_state parser_state(thd, thd->query(), thd->query_length());
+
+ parser_state.m_lip.multi_statements= FALSE;
+ lex_start(thd);
+
+ error= parse_sql(thd, &parser_state, NULL) || thd->is_error();
+
+ if (error)
+ goto end;
+
+ thd->lex->set_trg_event_type_for_tables();
+
+ error= mysql_execute_command(thd);
+
+ if (thd->killed_errno())
+ {
+ if (! thd->stmt_da->is_set())
+ thd->send_kill_message();
+ }
+
+ /* report error issued during command execution */
+ if (error == 0 && thd->spcont == NULL)
+ general_log_write(thd, COM_STMT_EXECUTE,
+ thd->query(), thd->query_length());
+
+end:
+ lex_end(thd->lex);
+
+ return error;
+}
+
/***************************************************************************
Prepared_statement
****************************************************************************/
@@ -2922,7 +3055,8 @@ void Prepared_statement::cleanup_stmt()
DBUG_ENTER("Prepared_statement::cleanup_stmt");
DBUG_PRINT("enter",("stmt: 0x%lx", (long) this));
- DBUG_ASSERT(lex->sphead == 0);
+ delete lex->sphead;
+ lex->sphead= 0;
/* The order is important */
lex->unit.cleanup();
cleanup_items(free_list);
@@ -3035,6 +3169,7 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len)
Parser_state parser_state(thd, thd->query(), thd->query_length());
parser_state.m_lip.stmt_prepare_mode= TRUE;
+ parser_state.m_lip.multi_statements= FALSE;
lex_start(thd);
error= parse_sql(thd, & parser_state, NULL) ||
@@ -3265,7 +3400,7 @@ reexecute:
reprepare_observer.is_invalidated() &&
reprepare_attempt++ < MAX_REPREPARE_ATTEMPTS)
{
- DBUG_ASSERT(thd->main_da.sql_errno() == ER_NEED_REPREPARE);
+ DBUG_ASSERT(thd->stmt_da->sql_errno() == ER_NEED_REPREPARE);
thd->clear_error();
error= reprepare();
@@ -3279,6 +3414,45 @@ reexecute:
}
+bool
+Prepared_statement::execute_server_runnable(Server_runnable *server_runnable)
+{
+ Statement stmt_backup;
+ bool error;
+ Query_arena *save_stmt_arena= thd->stmt_arena;
+ Item_change_list save_change_list;
+ thd->change_list= save_change_list;
+
+ state= CONVENTIONAL_EXECUTION;
+
+ if (!(lex= new (mem_root) st_lex_local))
+ return TRUE;
+
+ thd->set_n_backup_statement(this, &stmt_backup);
+ thd->set_n_backup_active_arena(this, &stmt_backup);
+ thd->stmt_arena= this;
+
+ error= server_runnable->execute_server_code(thd);
+
+ delete lex->sphead;
+ lex->sphead= 0;
+ /* The order is important */
+ lex->unit.cleanup();
+ close_thread_tables(thd);
+ thd->cleanup_after_query();
+
+ thd->restore_active_arena(this, &stmt_backup);
+ thd->restore_backup_statement(this, &stmt_backup);
+ thd->stmt_arena= save_stmt_arena;
+
+ save_change_list= thd->change_list;
+
+ /* Items and memory will freed in destructor */
+
+ return error;
+}
+
+
/**
Reprepare this prepared statement.
@@ -3328,12 +3502,12 @@ Prepared_statement::reprepare()
#endif
/*
Clear possible warnings during reprepare, it has to be completely
- transparent to the user. We use mysql_reset_errors() since
+ transparent to the user. We use clear_warning_info() since
there were no separate query id issued for re-prepare.
Sic: we can't simply silence warnings during reprepare, because if
it's failed, we need to return all the warnings to the user.
*/
- mysql_reset_errors(thd, TRUE);
+ thd->warning_info->clear_warning_info(thd->query_id);
}
return error;
}
@@ -3613,6 +3787,15 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor)
if (state == Query_arena::PREPARED)
state= Query_arena::EXECUTED;
+ if (this->lex->sql_command == SQLCOM_CALL)
+ {
+ if (is_sql_prepare())
+ thd->protocol_text.send_out_parameters(&this->lex->param_list);
+ else
+ thd->protocol->send_out_parameters(&this->lex->param_list);
+ }
+
+
/*
Log COM_EXECUTE to the general log. Note, that in case of SQL
prepared statements this causes two records to be output:
@@ -3646,3 +3829,573 @@ void Prepared_statement::deallocate()
/* Statement map calls delete stmt on erase */
thd->stmt_map.erase(this);
}
+
+
+/***************************************************************************
+* Ed_result_set
+***************************************************************************/
+/**
+ Use operator delete to free memory of Ed_result_set.
+ Accessing members of a class after the class has been destroyed
+ is a violation of the C++ standard but is commonly used in the
+ server code.
+*/
+
+void Ed_result_set::operator delete(void *ptr, size_t size) throw ()
+{
+ if (ptr)
+ {
+ /*
+ Make a stack copy, otherwise free_root() will attempt to
+ write to freed memory.
+ */
+ MEM_ROOT own_root= ((Ed_result_set*) ptr)->m_mem_root;
+ free_root(&own_root, MYF(0));
+ }
+}
+
+
+/**
+ Initialize an instance of Ed_result_set.
+
+ Instances of the class, as well as all result set rows, are
+ always allocated in the memory root passed over as the second
+ argument. In the constructor, we take over ownership of the
+ memory root. It will be freed when the class is destroyed.
+
+ sic: Ed_result_est is not designed to be allocated on stack.
+*/
+
+Ed_result_set::Ed_result_set(List<Ed_row> *rows_arg,
+ size_t column_count_arg,
+ MEM_ROOT *mem_root_arg)
+ :m_mem_root(*mem_root_arg),
+ m_column_count(column_count_arg),
+ m_rows(rows_arg),
+ m_next_rset(NULL)
+{
+ /* Take over responsibility for the memory */
+ clear_alloc_root(mem_root_arg);
+}
+
+/***************************************************************************
+* Ed_result_set
+***************************************************************************/
+
+/**
+ Create a new "execute direct" connection.
+*/
+
+Ed_connection::Ed_connection(THD *thd)
+ :m_warning_info(thd->query_id),
+ m_thd(thd),
+ m_rsets(0),
+ m_current_rset(0)
+{
+}
+
+
+/**
+ Free all result sets of the previous statement, if any,
+ and reset warnings and errors.
+
+ Called before execution of the next query.
+*/
+
+void
+Ed_connection::free_old_result()
+{
+ while (m_rsets)
+ {
+ Ed_result_set *rset= m_rsets->m_next_rset;
+ delete m_rsets;
+ m_rsets= rset;
+ }
+ m_current_rset= m_rsets;
+ m_diagnostics_area.reset_diagnostics_area();
+ m_warning_info.clear_warning_info(m_thd->query_id);
+}
+
+
+/**
+ A simple wrapper that uses a helper class to execute SQL statements.
+*/
+
+bool
+Ed_connection::execute_direct(LEX_STRING sql_text)
+{
+ Execute_sql_statement execute_sql_statement(sql_text);
+ DBUG_PRINT("ed_query", ("%s", sql_text.str));
+
+ return execute_direct(&execute_sql_statement);
+}
+
+
+/**
+ Execute a fragment of server functionality without an effect on
+ thd, and store results in memory.
+
+ Conventions:
+ - the code fragment must finish with OK, EOF or ERROR.
+ - the code fragment doesn't have to close thread tables,
+ free memory, commit statement transaction or do any other
+ cleanup that is normally done in the end of dispatch_command().
+
+ @param server_runnable A code fragment to execute.
+*/
+
+bool Ed_connection::execute_direct(Server_runnable *server_runnable)
+{
+ bool rc= FALSE;
+ Protocol_local protocol_local(m_thd, this);
+ Prepared_statement stmt(m_thd);
+ Protocol *save_protocol= m_thd->protocol;
+ Diagnostics_area *save_diagnostics_area= m_thd->stmt_da;
+ Warning_info *save_warning_info= m_thd->warning_info;
+
+ DBUG_ENTER("Ed_connection::execute_direct");
+
+ free_old_result(); /* Delete all data from previous execution, if any */
+
+ m_thd->protocol= &protocol_local;
+ m_thd->stmt_da= &m_diagnostics_area;
+ m_thd->warning_info= &m_warning_info;
+
+ rc= stmt.execute_server_runnable(server_runnable);
+ m_thd->protocol->end_statement();
+
+ m_thd->protocol= save_protocol;
+ m_thd->stmt_da= save_diagnostics_area;
+ m_thd->warning_info= save_warning_info;
+ /*
+ Protocol_local makes use of m_current_rset to keep
+ track of the last result set, while adding result sets to the end.
+ Reset it to point to the first result set instead.
+ */
+ m_current_rset= m_rsets;
+
+ DBUG_RETURN(rc);
+}
+
+
+/**
+ A helper method that is called only during execution.
+
+ Although Ed_connection doesn't support multi-statements,
+ a statement may generate many result sets. All subsequent
+ result sets are appended to the end.
+
+ @pre This is called only by Protocol_local.
+*/
+
+void
+Ed_connection::add_result_set(Ed_result_set *ed_result_set)
+{
+ if (m_rsets)
+ {
+ m_current_rset->m_next_rset= ed_result_set;
+ /* While appending, use m_current_rset as a pointer to the tail. */
+ m_current_rset= ed_result_set;
+ }
+ else
+ m_current_rset= m_rsets= ed_result_set;
+}
+
+
+/**
+ Release ownership of the current result set to the client.
+
+ Since we use a simple linked list for result sets,
+ this method uses a linear search of the previous result
+ set to exclude the released instance from the list.
+
+ @todo Use double-linked list, when this is really used.
+
+ XXX: This has never been tested with more than one result set!
+
+ @pre There must be a result set.
+*/
+
+Ed_result_set *
+Ed_connection::store_result_set()
+{
+ Ed_result_set *ed_result_set;
+
+ DBUG_ASSERT(m_current_rset);
+
+ if (m_current_rset == m_rsets)
+ {
+ /* Assign the return value */
+ ed_result_set= m_current_rset;
+ /* Exclude the return value from the list. */
+ m_current_rset= m_rsets= m_rsets->m_next_rset;
+ }
+ else
+ {
+ Ed_result_set *prev_rset= m_rsets;
+ /* Assign the return value. */
+ ed_result_set= m_current_rset;
+
+ /* Exclude the return value from the list */
+ while (prev_rset->m_next_rset != m_current_rset)
+ prev_rset= ed_result_set->m_next_rset;
+ m_current_rset= prev_rset->m_next_rset= m_current_rset->m_next_rset;
+ }
+ ed_result_set->m_next_rset= NULL; /* safety */
+
+ return ed_result_set;
+}
+
+/*************************************************************************
+* Protocol_local
+**************************************************************************/
+
+Protocol_local::Protocol_local(THD *thd, Ed_connection *ed_connection)
+ :Protocol(thd),
+ m_connection(ed_connection),
+ m_rset(NULL),
+ m_column_count(0),
+ m_current_row(NULL),
+ m_current_column(NULL)
+{
+ clear_alloc_root(&m_rset_root);
+}
+
+/**
+ Called between two result set rows.
+
+ Prepare structures to fill result set rows.
+ Unfortunately, we can't return an error here. If memory allocation
+ fails, we'll have to return an error later. And so is done
+ in methods such as @sa store_column().
+*/
+
+void Protocol_local::prepare_for_resend()
+{
+ DBUG_ASSERT(alloc_root_inited(&m_rset_root));
+
+ opt_add_row_to_rset();
+ /* Start a new row. */
+ m_current_row= (Ed_column *) alloc_root(&m_rset_root,
+ sizeof(Ed_column) * m_column_count);
+ m_current_column= m_current_row;
+}
+
+
+/**
+ In "real" protocols this is called to finish a result set row.
+ Unused in the local implementation.
+*/
+
+bool Protocol_local::write()
+{
+ return FALSE;
+}
+
+/**
+ A helper function to add the current row to the current result
+ set. Called in @sa prepare_for_resend(), when a new row is started,
+ and in send_eof(), when the result set is finished.
+*/
+
+void Protocol_local::opt_add_row_to_rset()
+{
+ if (m_current_row)
+ {
+ /* Add the old row to the result set */
+ Ed_row *ed_row= new (&m_rset_root) Ed_row(m_current_row, m_column_count);
+ if (ed_row)
+ m_rset->push_back(ed_row, &m_rset_root);
+ }
+}
+
+
+/**
+ Add a NULL column to the current row.
+*/
+
+bool Protocol_local::store_null()
+{
+ if (m_current_column == NULL)
+ return TRUE; /* prepare_for_resend() failed to allocate memory. */
+
+ bzero(m_current_column, sizeof(*m_current_column));
+ ++m_current_column;
+ return FALSE;
+}
+
+
+/**
+ A helper method to add any column to the current row
+ in its binary form.
+
+ Allocates memory for the data in the result set memory root.
+*/
+
+bool Protocol_local::store_column(const void *data, size_t length)
+{
+ if (m_current_column == NULL)
+ return TRUE; /* prepare_for_resend() failed to allocate memory. */
+ /*
+ alloc_root() automatically aligns memory, so we don't need to
+ do any extra alignment if we're pointing to, say, an integer.
+ */
+ m_current_column->str= (char*) memdup_root(&m_rset_root,
+ data,
+ length + 1 /* Safety */);
+ if (! m_current_column->str)
+ return TRUE;
+ m_current_column->str[length]= '\0'; /* Safety */
+ m_current_column->length= length;
+ ++m_current_column;
+ return FALSE;
+}
+
+
+/**
+ Store a string value in a result set column, optionally
+ having converted it to character_set_results.
+*/
+
+bool
+Protocol_local::store_string(const char *str, size_t length,
+ CHARSET_INFO *src_cs, CHARSET_INFO *dst_cs)
+{
+ /* Store with conversion */
+ uint error_unused;
+
+ if (dst_cs && !my_charset_same(src_cs, dst_cs) &&
+ src_cs != &my_charset_bin &&
+ dst_cs != &my_charset_bin)
+ {
+ if (convert->copy(str, length, src_cs, dst_cs, &error_unused))
+ return TRUE;
+ str= convert->ptr();
+ length= convert->length();
+ }
+ return store_column(str, length);
+}
+
+
+/** Store a tiny int as is (1 byte) in a result set column. */
+
+bool Protocol_local::store_tiny(longlong value)
+{
+ char v= (char) value;
+ return store_column(&v, 1);
+}
+
+
+/** Store a short as is (2 bytes, host order) in a result set column. */
+
+bool Protocol_local::store_short(longlong value)
+{
+ int16 v= (int16) value;
+ return store_column(&v, 2);
+}
+
+
+/** Store a "long" as is (4 bytes, host order) in a result set column. */
+
+bool Protocol_local::store_long(longlong value)
+{
+ int32 v= (int32) value;
+ return store_column(&v, 4);
+}
+
+
+/** Store a "longlong" as is (8 bytes, host order) in a result set column. */
+
+bool Protocol_local::store_longlong(longlong value, bool unsigned_flag)
+{
+ int64 v= (int64) value;
+ return store_column(&v, 8);
+}
+
+
+/** Store a decimal in string format in a result set column */
+
+bool Protocol_local::store_decimal(const my_decimal *value)
+{
+ char buf[DECIMAL_MAX_STR_LENGTH];
+ String str(buf, sizeof (buf), &my_charset_bin);
+ int rc;
+
+ rc= my_decimal2string(E_DEC_FATAL_ERROR, value, 0, 0, 0, &str);
+
+ if (rc)
+ return TRUE;
+
+ return store_column(str.ptr(), str.length());
+}
+
+
+/** Convert to cs_results and store a string. */
+
+bool Protocol_local::store(const char *str, size_t length,
+ CHARSET_INFO *src_cs)
+{
+ CHARSET_INFO *dst_cs;
+
+ dst_cs= m_connection->m_thd->variables.character_set_results;
+ return store_string(str, length, src_cs, dst_cs);
+}
+
+
+/** Store a string. */
+
+bool Protocol_local::store(const char *str, size_t length,
+ CHARSET_INFO *src_cs, CHARSET_INFO *dst_cs)
+{
+ return store_string(str, length, src_cs, dst_cs);
+}
+
+
+/* Store MYSQL_TIME (in binary format) */
+
+bool Protocol_local::store(MYSQL_TIME *time)
+{
+ return store_column(time, sizeof(MYSQL_TIME));
+}
+
+
+/** Store MYSQL_TIME (in binary format) */
+
+bool Protocol_local::store_date(MYSQL_TIME *time)
+{
+ return store_column(time, sizeof(MYSQL_TIME));
+}
+
+
+/** Store MYSQL_TIME (in binary format) */
+
+bool Protocol_local::store_time(MYSQL_TIME *time)
+{
+ return store_column(time, sizeof(MYSQL_TIME));
+}
+
+
+/* Store a floating point number, as is. */
+
+bool Protocol_local::store(float value, uint32 decimals, String *buffer)
+{
+ return store_column(&value, sizeof(float));
+}
+
+
+/* Store a double precision number, as is. */
+
+bool Protocol_local::store(double value, uint32 decimals, String *buffer)
+{
+ return store_column(&value, sizeof (double));
+}
+
+
+/* Store a Field. */
+
+bool Protocol_local::store(Field *field)
+{
+ if (field->is_null())
+ return store_null();
+ return field->send_binary(this);
+}
+
+
+/** Called to start a new result set. */
+
+bool Protocol_local::send_result_set_metadata(List<Item> *columns, uint)
+{
+ DBUG_ASSERT(m_rset == 0 && !alloc_root_inited(&m_rset_root));
+
+ init_sql_alloc(&m_rset_root, MEM_ROOT_BLOCK_SIZE, 0);
+
+ if (! (m_rset= new (&m_rset_root) List<Ed_row>))
+ return TRUE;
+
+ m_column_count= columns->elements;
+
+ return FALSE;
+}
+
+
+/**
+ Normally this is a separate result set with OUT parameters
+ of stored procedures. Currently unsupported for the local
+ version.
+*/
+
+bool Protocol_local::send_out_parameters(List<Item_param> *sp_params)
+{
+ return FALSE;
+}
+
+
+/** Called for statements that don't have a result set, at statement end. */
+
+bool
+Protocol_local::send_ok(uint server_status, uint statement_warn_count,
+ ulonglong affected_rows, ulonglong last_insert_id,
+ const char *message)
+{
+ /*
+ Just make sure nothing is sent to the client, we have grabbed
+ the status information in the connection diagnostics area.
+ */
+ return FALSE;
+}
+
+
+/**
+ Called at the end of a result set. Append a complete
+ result set to the list in Ed_connection.
+
+ Don't send anything to the client, but instead finish
+ building of the result set at hand.
+*/
+
+bool Protocol_local::send_eof(uint server_status, uint statement_warn_count)
+{
+ Ed_result_set *ed_result_set;
+
+ DBUG_ASSERT(m_rset);
+
+ opt_add_row_to_rset();
+ m_current_row= 0;
+
+ ed_result_set= new (&m_rset_root) Ed_result_set(m_rset, m_column_count,
+ &m_rset_root);
+
+ m_rset= NULL;
+
+ if (! ed_result_set)
+ return TRUE;
+
+ /* In case of successful allocation memory ownership was transferred. */
+ DBUG_ASSERT(!alloc_root_inited(&m_rset_root));
+
+ /*
+ Link the created Ed_result_set instance into the list of connection
+ result sets. Never fails.
+ */
+ m_connection->add_result_set(ed_result_set);
+ return FALSE;
+}
+
+
+/** Called to send an error to the client at the end of a statement. */
+
+bool
+Protocol_local::send_error(uint sql_errno, const char *err_msg, const char*)
+{
+ /*
+ Just make sure that nothing is sent to the client (default
+ implementation).
+ */
+ return FALSE;
+}
+
+
+#ifdef EMBEDDED_LIBRARY
+void Protocol_local::remove_last_row()
+{ }
+#endif
diff --git a/sql/sql_prepare.h b/sql/sql_prepare.h
new file mode 100644
index 00000000000..11017b127b1
--- /dev/null
+++ b/sql/sql_prepare.h
@@ -0,0 +1,367 @@
+#ifndef SQL_PREPARE_H
+#define SQL_PREPARE_H
+/* Copyright (C) 1995-2008 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "sql_error.h"
+
+class THD;
+struct LEX;
+
+/**
+ An interface that is used to take an action when
+ the locking module notices that a table version has changed
+ since the last execution. "Table" here may refer to any kind of
+ table -- a base table, a temporary table, a view or an
+ information schema table.
+
+ When we open and lock tables for execution of a prepared
+ statement, we must verify that they did not change
+ since statement prepare. If some table did change, the statement
+ parse tree *may* be no longer valid, e.g. in case it contains
+ optimizations that depend on table metadata.
+
+ This class provides an interface (a method) that is
+ invoked when such a situation takes place.
+ The implementation of the method simply reports an error, but
+ the exact details depend on the nature of the SQL statement.
+
+ At most 1 instance of this class is active at a time, in which
+ case THD::m_reprepare_observer is not NULL.
+
+ @sa check_and_update_table_version() for details of the
+ version tracking algorithm
+
+ @sa Open_tables_state::m_reprepare_observer for the life cycle
+ of metadata observers.
+*/
+
+class Reprepare_observer
+{
+public:
+ /**
+ Check if a change of metadata is OK. In future
+ the signature of this method may be extended to accept the old
+ and the new versions, but since currently the check is very
+ simple, we only need the THD to report an error.
+ */
+ bool report_error(THD *thd);
+ bool is_invalidated() const { return m_invalidated; }
+ void reset_reprepare_observer() { m_invalidated= FALSE; }
+private:
+ bool m_invalidated;
+};
+
+
+void mysqld_stmt_prepare(THD *thd, const char *packet, uint packet_length);
+void mysqld_stmt_execute(THD *thd, char *packet, uint packet_length);
+void mysqld_stmt_close(THD *thd, char *packet);
+void mysql_sql_stmt_prepare(THD *thd);
+void mysql_sql_stmt_execute(THD *thd);
+void mysql_sql_stmt_close(THD *thd);
+void mysqld_stmt_fetch(THD *thd, char *packet, uint packet_length);
+void mysqld_stmt_reset(THD *thd, char *packet);
+void mysql_stmt_get_longdata(THD *thd, char *pos, ulong packet_length);
+void reinit_stmt_before_use(THD *thd, LEX *lex);
+
+/**
+ Execute a fragment of server code in an isolated context, so that
+ it doesn't leave any effect on THD. THD must have no open tables.
+ The code must not leave any open tables around.
+ The result of execution (if any) is stored in Ed_result.
+*/
+
+class Server_runnable
+{
+public:
+ virtual bool execute_server_code(THD *thd)= 0;
+ virtual ~Server_runnable();
+};
+
+
+/**
+ Execute direct interface.
+
+ @todo Implement support for prelocked mode.
+*/
+
+class Ed_row;
+
+/**
+ Ed_result_set -- a container with result set rows.
+ @todo Implement support for result set metadata and
+ automatic type conversion.
+*/
+
+class Ed_result_set: public Sql_alloc
+{
+public:
+ operator List<Ed_row>&() { return *m_rows; }
+ unsigned int size() const { return m_rows->elements; }
+
+ Ed_result_set(List<Ed_row> *rows_arg, size_t column_count,
+ MEM_ROOT *mem_root_arg);
+
+ /** We don't call member destructors, they all are POD types. */
+ ~Ed_result_set() {}
+
+ size_t get_field_count() const { return m_column_count; }
+
+ static void operator delete(void *ptr, size_t size) throw ();
+private:
+ Ed_result_set(const Ed_result_set &); /* not implemented */
+ Ed_result_set &operator=(Ed_result_set &); /* not implemented */
+private:
+ MEM_ROOT m_mem_root;
+ size_t m_column_count;
+ List<Ed_row> *m_rows;
+ Ed_result_set *m_next_rset;
+ friend class Ed_connection;
+};
+
+
+class Ed_connection
+{
+public:
+ /**
+ Construct a new "execute direct" connection.
+
+ The connection can be used to execute SQL statements.
+ If the connection failed to initialize, the error
+ will be returned on the attempt to execute a statement.
+
+ @pre thd must have no open tables
+ while the connection is used. However,
+ Ed_connection works okay in LOCK TABLES mode.
+ Other properties of THD, such as the current warning
+ information, errors, etc. do not matter and are
+ preserved by Ed_connection. One thread may have many
+ Ed_connections created for it.
+ */
+ Ed_connection(THD *thd);
+
+ /**
+ Execute one SQL statement.
+
+ Until this method is executed, no other methods of
+ Ed_connection can be used. Life cycle of Ed_connection is:
+
+ Initialized -> a statement has been executed ->
+ look at result, move to next result ->
+ look at result, move to next result ->
+ ...
+ moved beyond the last result == Initialized.
+
+ This method can be called repeatedly. Once it's invoked,
+ results of the previous execution are lost.
+
+ A result of execute_direct() can be either:
+
+ - success, no result set rows. In this case get_field_count()
+ returns 0. This happens after execution of INSERT, UPDATE,
+ DELETE, DROP and similar statements. Some other methods, such
+ as get_affected_rows() can be used to retrieve additional
+ result information.
+
+ - success, there are some result set rows (maybe 0). E.g.
+ happens after SELECT. In this case get_field_count() returns
+ the number of columns in a result set and store_result()
+ can be used to retrieve a result set..
+
+ - an error, methods to retrieve error information can
+ be used.
+
+ @return execution status
+ @retval FALSE success, use get_field_count()
+ to determine what to do next.
+ @retval TRUE error, use get_last_error()
+ to see the error number.
+ */
+ bool execute_direct(LEX_STRING sql_text);
+
+ /**
+ Same as the previous, but takes an instance of Server_runnable
+ instead of SQL statement text.
+
+ @return execution status
+
+ @retval FALSE success, use get_field_count()
+ if your code fragment is supposed to
+ return a result set
+ @retval TRUE failure
+ */
+ bool execute_direct(Server_runnable *server_runnable);
+
+ /**
+ Get the number of result set fields.
+
+ This method is valid only if we have a result:
+ execute_direct() has been called. Otherwise
+ the returned value is undefined.
+
+ @sa Documentation for C API function
+ mysql_field_count()
+ */
+ ulong get_field_count() const
+ {
+ return m_current_rset ? m_current_rset->get_field_count() : 0;
+ }
+
+ /**
+ Get the number of affected (deleted, updated)
+ rows for the current statement. Can be
+ used for statements with get_field_count() == 0.
+
+ @sa Documentation for C API function
+ mysql_affected_rows().
+ */
+ ulonglong get_affected_rows() const
+ {
+ return m_diagnostics_area.affected_rows();
+ }
+
+ /**
+ Get the last insert id, if any.
+
+ @sa Documentation for mysql_insert_id().
+ */
+ ulonglong get_last_insert_id() const
+ {
+ return m_diagnostics_area.last_insert_id();
+ }
+
+ /**
+ Get the total number of warnings for the last executed
+ statement. Note, that there is only one warning list even
+ if a statement returns multiple results.
+
+ @sa Documentation for C API function
+ mysql_num_warnings().
+ */
+ ulong get_warn_count() const
+ {
+ return m_warning_info.warn_count();
+ }
+ /**
+ Get the server warnings as a result set.
+ The result set has fixed metadata:
+ The first column is the level.
+ The second is a numeric code.
+ The third is warning text.
+ */
+ List<MYSQL_ERROR> *get_warn_list() { return &m_warning_info.warn_list(); }
+ /**
+ The following members are only valid if execute_direct()
+ or move_to_next_result() returned an error.
+ They never fail, but if they are called when there is no
+ result, or no error, the result is not defined.
+ */
+ const char *get_last_error() const { return m_diagnostics_area.message(); }
+ unsigned int get_last_errno() const { return m_diagnostics_area.sql_errno(); }
+ const char *get_last_sqlstate() const { return m_diagnostics_area.get_sqlstate(); }
+
+ /**
+ Provided get_field_count() is not 0, this never fails. You don't
+ need to free the result set, this is done automatically when
+ you advance to the next result set or destroy the connection.
+ Not returning const because of List iterator not accepting
+ Should be used when you would like Ed_connection to manage
+ result set memory for you.
+ */
+ Ed_result_set *use_result_set() { return m_current_rset; }
+ /**
+ Provided get_field_count() is not 0, this never fails. You
+ must free the returned result set. This can be called only
+ once after execute_direct().
+ Should be used when you would like to get the results
+ and destroy the connection.
+ */
+ Ed_result_set *store_result_set();
+
+ /**
+ If the query returns multiple results, this method
+ can be checked if there is another result beyond the next
+ one.
+ Never fails.
+ */
+ bool has_next_result() const { return test(m_current_rset->m_next_rset); }
+ /**
+ Only valid to call if has_next_result() returned true.
+ Otherwise the result is undefined.
+ */
+ bool move_to_next_result()
+ {
+ m_current_rset= m_current_rset->m_next_rset;
+ return test(m_current_rset);
+ }
+
+ ~Ed_connection() { free_old_result(); }
+private:
+ Diagnostics_area m_diagnostics_area;
+ Warning_info m_warning_info;
+ /**
+ Execute direct interface does not support multi-statements, only
+ multi-results. So we never have a situation when we have
+ a mix of result sets and OK or error packets. We either
+ have a single result set, a single error, or a single OK,
+ or we have a series of result sets, followed by an OK or error.
+ */
+ THD *m_thd;
+ Ed_result_set *m_rsets;
+ Ed_result_set *m_current_rset;
+ friend class Protocol_local;
+private:
+ void free_old_result();
+ void add_result_set(Ed_result_set *ed_result_set);
+private:
+ Ed_connection(const Ed_connection &); /* not implemented */
+ Ed_connection &operator=(Ed_connection &); /* not implemented */
+};
+
+
+/** One result set column. */
+
+struct Ed_column: public LEX_STRING
+{
+ /** Implementation note: destructor for this class is never called. */
+};
+
+
+/** One result set record. */
+
+class Ed_row: public Sql_alloc
+{
+public:
+ const Ed_column &operator[](const unsigned int column_index) const
+ {
+ return *get_column(column_index);
+ }
+ const Ed_column *get_column(const unsigned int column_index) const
+ {
+ DBUG_ASSERT(column_index < size());
+ return m_column_array + column_index;
+ }
+ size_t size() const { return m_column_count; }
+
+ Ed_row(Ed_column *column_array_arg, size_t column_count_arg)
+ :m_column_array(column_array_arg),
+ m_column_count(column_count_arg)
+ {}
+private:
+ Ed_column *m_column_array;
+ size_t m_column_count; /* TODO: change to point to metadata */
+};
+
+#endif // SQL_PREPARE_H
diff --git a/sql/sql_profile.cc b/sql/sql_profile.cc
index 8c9b147089f..69e5bc3cbb4 100644
--- a/sql/sql_profile.cc
+++ b/sql/sql_profile.cc
@@ -47,7 +47,7 @@ const char * const _unknown_func_ = "<unknown>";
int fill_query_profile_statistics_info(THD *thd, TABLE_LIST *tables,
Item *cond)
{
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
return(thd->profiling.fill_statistics_info(thd, tables, cond));
#else
my_error(ER_FEATURE_DISABLED, MYF(0), "SHOW PROFILE", "enable-profiling");
@@ -129,7 +129,7 @@ int make_profile_table_for_show(THD *thd, ST_SCHEMA_TABLE *schema_table)
}
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
#define RUSAGE_USEC(tv) ((tv).tv_sec*1000*1000 + (tv).tv_usec)
#define RUSAGE_DIFF_USEC(tv1, tv2) (RUSAGE_USEC((tv1))-RUSAGE_USEC((tv2)))
@@ -415,7 +415,7 @@ bool PROFILING::show_profiles()
MYSQL_TYPE_DOUBLE));
field_list.push_back(new Item_empty_string("Query", 40));
- if (thd->protocol->send_fields(&field_list,
+ if (thd->protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
diff --git a/sql/sql_profile.h b/sql/sql_profile.h
index 245959e0953..bffe1cb576b 100644
--- a/sql/sql_profile.h
+++ b/sql/sql_profile.h
@@ -41,7 +41,7 @@ int make_profile_table_for_show(THD *thd, ST_SCHEMA_TABLE *schema_table);
#define PROFILE_ALL (uint)(~0)
-#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
+#if defined(ENABLED_PROFILING)
#include "mysql_priv.h"
#ifdef HAVE_SYS_RESOURCE_H
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index b8f2e1e39bf..4e5ce08ab5d 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -21,6 +21,7 @@
#include "log_event.h"
#include "rpl_filter.h"
#include <my_dir.h>
+#include "rpl_handler.h"
int max_binlog_dump_events = 0; // unlimited
my_bool opt_sporadic_binlog_dump_fail = 0;
@@ -80,6 +81,32 @@ static int fake_rotate_event(NET* net, String* packet, char* log_file_name,
DBUG_RETURN(0);
}
+/*
+ Reset thread transmit packet buffer for event sending
+
+ This function allocates header bytes for event transmission, and
+ should be called before store the event data to the packet buffer.
+*/
+static int reset_transmit_packet(THD *thd, ushort flags,
+ ulong *ev_offset, const char **errmsg)
+{
+ int ret= 0;
+ String *packet= &thd->packet;
+
+ /* reserve and set default header */
+ packet->length(0);
+ packet->set("\0", 1, &my_charset_bin);
+
+ if (RUN_HOOK(binlog_transmit, reserve_header, (thd, flags, packet)))
+ {
+ *errmsg= "Failed to run hook 'reserve_header'";
+ my_errno= ER_UNKNOWN_ERROR;
+ ret= 1;
+ }
+ *ev_offset= packet->length();
+ return ret;
+}
+
static int send_file(THD *thd)
{
NET* net = &thd->net;
@@ -336,6 +363,73 @@ Increase max_allowed_packet on master";
}
+/**
+ An auxiliary function for calling in mysql_binlog_send
+ to initialize the heartbeat timeout in waiting for a binlogged event.
+
+ @param[in] thd THD to access a user variable
+
+ @return heartbeat period an ulonglong of nanoseconds
+ or zero if heartbeat was not demanded by slave
+*/
+static ulonglong get_heartbeat_period(THD * thd)
+{
+ my_bool null_value;
+ LEX_STRING name= { C_STRING_WITH_LEN("master_heartbeat_period")};
+ user_var_entry *entry=
+ (user_var_entry*) my_hash_search(&thd->user_vars, (uchar*) name.str,
+ name.length);
+ return entry? entry->val_int(&null_value) : 0;
+}
+
+/*
+ Function prepares and sends repliation heartbeat event.
+
+ @param net net object of THD
+ @param packet buffer to store the heartbeat instance
+ @param event_coordinates binlog file name and position of the last
+ real event master sent from binlog
+
+ @note
+ Among three essential pieces of heartbeat data Log_event::when
+ is computed locally.
+ The error to send is serious and should force terminating
+ the dump thread.
+*/
+static int send_heartbeat_event(NET* net, String* packet,
+ const struct event_coordinates *coord)
+{
+ DBUG_ENTER("send_heartbeat_event");
+ char header[LOG_EVENT_HEADER_LEN];
+ /*
+ 'when' (the timestamp) is set to 0 so that slave could distinguish between
+ real and fake Rotate events (if necessary)
+ */
+ memset(header, 0, 4); // when
+
+ header[EVENT_TYPE_OFFSET] = HEARTBEAT_LOG_EVENT;
+
+ char* p= coord->file_name + dirname_length(coord->file_name);
+
+ uint ident_len = strlen(p);
+ ulong event_len = ident_len + LOG_EVENT_HEADER_LEN;
+ int4store(header + SERVER_ID_OFFSET, server_id);
+ int4store(header + EVENT_LEN_OFFSET, event_len);
+ int2store(header + FLAGS_OFFSET, 0);
+
+ int4store(header + LOG_POS_OFFSET, coord->pos); // log_pos
+
+ packet->append(header, sizeof(header));
+ packet->append(p, ident_len); // log_file_name
+
+ if (my_net_write(net, (uchar*) packet->ptr(), packet->length()) ||
+ net_flush(net))
+ {
+ DBUG_RETURN(-1);
+ }
+ DBUG_RETURN(0);
+}
+
/*
TODO: Clean up loop to only have one call to send_file()
*/
@@ -346,6 +440,9 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos,
LOG_INFO linfo;
char *log_file_name = linfo.log_file_name;
char search_file_name[FN_REFLEN], *name;
+
+ ulong ev_offset;
+
IO_CACHE log;
File file = -1;
String* packet = &thd->packet;
@@ -361,6 +458,30 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos,
DBUG_PRINT("enter",("log_ident: '%s' pos: %ld", log_ident, (long) pos));
bzero((char*) &log,sizeof(log));
+ /*
+ heartbeat_period from @master_heartbeat_period user variable
+ */
+ ulonglong heartbeat_period= get_heartbeat_period(thd);
+ struct timespec heartbeat_buf;
+ struct event_coordinates coord_buf;
+ struct timespec *heartbeat_ts= NULL;
+ struct event_coordinates *coord= NULL;
+ if (heartbeat_period != LL(0))
+ {
+ heartbeat_ts= &heartbeat_buf;
+ set_timespec_nsec(*heartbeat_ts, 0);
+ coord= &coord_buf;
+ coord->file_name= log_file_name; // initialization basing on what slave remembers
+ coord->pos= pos;
+ }
+ sql_print_information("Start binlog_dump to slave_server(%d), pos(%s, %lu)",
+ thd->server_id, log_ident, (ulong)pos);
+ if (RUN_HOOK(binlog_transmit, transmit_start, (thd, flags, log_ident, pos)))
+ {
+ errmsg= "Failed to run hook 'transmit_start'";
+ my_errno= ER_UNKNOWN_ERROR;
+ goto err;
+ }
#ifndef DBUG_OFF
if (opt_sporadic_binlog_dump_fail && (binlog_dump_count++ % 2))
@@ -416,11 +537,9 @@ impossible position";
goto err;
}
- /*
- We need to start a packet with something other than 255
- to distinguish it from error
- */
- packet->set("\0", 1, &my_charset_bin); /* This is the start of a new packet */
+ /* reset transmit packet for the fake rotate event below */
+ if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg))
+ goto err;
/*
Tell the client about the log name with a fake Rotate event;
@@ -460,7 +579,7 @@ impossible position";
my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG;
goto err;
}
- packet->set("\0", 1, &my_charset_bin);
+
/*
Adding MAX_LOG_EVENT_HEADER_LEN, since a binlog event can become
this larger than the corresponding packet (query) sent
@@ -476,6 +595,11 @@ impossible position";
log_lock = mysql_bin_log.get_log_lock();
if (pos > BIN_LOG_HEADER_SIZE)
{
+ /* reset transmit packet for the event read from binary log
+ file */
+ if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg))
+ goto err;
+
/*
Try to find a Format_description_log_event at the beginning of
the binlog
@@ -483,29 +607,30 @@ impossible position";
if (!(error = Log_event::read_log_event(&log, packet, log_lock)))
{
/*
- The packet has offsets equal to the normal offsets in a binlog
- event +1 (the first character is \0).
+ The packet has offsets equal to the normal offsets in a
+ binlog event + ev_offset (the first ev_offset characters are
+ the header (default \0)).
*/
DBUG_PRINT("info",
("Looked for a Format_description_log_event, found event type %d",
- (*packet)[EVENT_TYPE_OFFSET+1]));
- if ((*packet)[EVENT_TYPE_OFFSET+1] == FORMAT_DESCRIPTION_EVENT)
+ (*packet)[EVENT_TYPE_OFFSET+ev_offset]));
+ if ((*packet)[EVENT_TYPE_OFFSET+ev_offset] == FORMAT_DESCRIPTION_EVENT)
{
- binlog_can_be_corrupted= test((*packet)[FLAGS_OFFSET+1] &
+ binlog_can_be_corrupted= test((*packet)[FLAGS_OFFSET+ev_offset] &
LOG_EVENT_BINLOG_IN_USE_F);
- (*packet)[FLAGS_OFFSET+1] &= ~LOG_EVENT_BINLOG_IN_USE_F;
+ (*packet)[FLAGS_OFFSET+ev_offset] &= ~LOG_EVENT_BINLOG_IN_USE_F;
/*
mark that this event with "log_pos=0", so the slave
should not increment master's binlog position
(rli->group_master_log_pos)
*/
- int4store((char*) packet->ptr()+LOG_POS_OFFSET+1, 0);
+ int4store((char*) packet->ptr()+LOG_POS_OFFSET+ev_offset, 0);
/*
if reconnect master sends FD event with `created' as 0
to avoid destroying temp tables.
*/
int4store((char*) packet->ptr()+LOG_EVENT_MINIMAL_HEADER_LEN+
- ST_CREATED_OFFSET+1, (ulong) 0);
+ ST_CREATED_OFFSET+ev_offset, (ulong) 0);
/* send it */
if (my_net_write(net, (uchar*) packet->ptr(), packet->length()))
{
@@ -531,8 +656,6 @@ impossible position";
Format_description_log_event will be found naturally if it is written.
*/
}
- /* reset the packet as we wrote to it in any case */
- packet->set("\0", 1, &my_charset_bin);
} /* end of if (pos > BIN_LOG_HEADER_SIZE); */
else
{
@@ -544,6 +667,12 @@ impossible position";
while (!net->error && net->vio != 0 && !thd->killed)
{
+ Log_event_type event_type= UNKNOWN_EVENT;
+
+ /* reset the transmit packet for the event read from binary log
+ file */
+ if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg))
+ goto err;
while (!(error = Log_event::read_log_event(&log, packet, log_lock)))
{
#ifndef DBUG_OFF
@@ -555,16 +684,31 @@ impossible position";
goto err;
}
#endif
+ /*
+ log's filename does not change while it's active
+ */
+ if (coord)
+ coord->pos= uint4korr(packet->ptr() + ev_offset + LOG_POS_OFFSET);
- if ((*packet)[EVENT_TYPE_OFFSET+1] == FORMAT_DESCRIPTION_EVENT)
+ event_type= (Log_event_type)((*packet)[LOG_EVENT_OFFSET+ev_offset]);
+ if (event_type == FORMAT_DESCRIPTION_EVENT)
{
- binlog_can_be_corrupted= test((*packet)[FLAGS_OFFSET+1] &
+ binlog_can_be_corrupted= test((*packet)[FLAGS_OFFSET+ev_offset] &
LOG_EVENT_BINLOG_IN_USE_F);
- (*packet)[FLAGS_OFFSET+1] &= ~LOG_EVENT_BINLOG_IN_USE_F;
+ (*packet)[FLAGS_OFFSET+ev_offset] &= ~LOG_EVENT_BINLOG_IN_USE_F;
}
- else if ((*packet)[EVENT_TYPE_OFFSET+1] == STOP_EVENT)
+ else if (event_type == STOP_EVENT)
binlog_can_be_corrupted= FALSE;
+ pos = my_b_tell(&log);
+ if (RUN_HOOK(binlog_transmit, before_send_event,
+ (thd, flags, packet, log_file_name, pos)))
+ {
+ my_errno= ER_UNKNOWN_ERROR;
+ errmsg= "run 'before_send_event' hook failed";
+ goto err;
+ }
+
if (my_net_write(net, (uchar*) packet->ptr(), packet->length()))
{
errmsg = "Failed on my_net_write()";
@@ -572,9 +716,8 @@ impossible position";
goto err;
}
- DBUG_PRINT("info", ("log event code %d",
- (*packet)[LOG_EVENT_OFFSET+1] ));
- if ((*packet)[LOG_EVENT_OFFSET+1] == LOAD_EVENT)
+ DBUG_PRINT("info", ("log event code %d", event_type));
+ if (event_type == LOAD_EVENT)
{
if (send_file(thd))
{
@@ -583,7 +726,17 @@ impossible position";
goto err;
}
}
- packet->set("\0", 1, &my_charset_bin);
+
+ if (RUN_HOOK(binlog_transmit, after_send_event, (thd, flags, packet)))
+ {
+ errmsg= "Failed to run hook 'after_send_event'";
+ my_errno= ER_UNKNOWN_ERROR;
+ goto err;
+ }
+
+ /* reset transmit packet for next loop */
+ if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg))
+ goto err;
}
/*
@@ -634,6 +787,11 @@ impossible position";
}
#endif
+ /* reset the transmit packet for the event read from binary log
+ file */
+ if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg))
+ goto err;
+
/*
No one will update the log while we are reading
now, but we'll be quick and just read one record
@@ -650,34 +808,86 @@ impossible position";
/* we read successfully, so we'll need to send it to the slave */
pthread_mutex_unlock(log_lock);
read_packet = 1;
+ if (coord)
+ coord->pos= uint4korr(packet->ptr() + ev_offset + LOG_POS_OFFSET);
+ event_type= (Log_event_type)((*packet)[LOG_EVENT_OFFSET+ev_offset]);
break;
case LOG_READ_EOF:
+ {
+ int ret;
+ ulong signal_cnt;
DBUG_PRINT("wait",("waiting for data in binary log"));
if (thd->server_id==0) // for mysqlbinlog (mysqlbinlog.server_id==0)
{
pthread_mutex_unlock(log_lock);
goto end;
}
- if (!thd->killed)
- {
- /* Note that the following call unlocks lock_log */
- mysql_bin_log.wait_for_update(thd, 0);
- }
- else
- pthread_mutex_unlock(log_lock);
- DBUG_PRINT("wait",("binary log received update"));
- break;
- default:
+#ifndef DBUG_OFF
+ ulong hb_info_counter= 0;
+#endif
+ signal_cnt= mysql_bin_log.signal_cnt;
+ do
+ {
+ if (coord)
+ {
+ DBUG_ASSERT(heartbeat_ts && heartbeat_period != LL(0));
+ set_timespec_nsec(*heartbeat_ts, heartbeat_period);
+ }
+ ret= mysql_bin_log.wait_for_update_bin_log(thd, heartbeat_ts);
+ DBUG_ASSERT(ret == 0 || heartbeat_period != LL(0) && coord != NULL);
+ if (ret == ETIMEDOUT || ret == ETIME)
+ {
+#ifndef DBUG_OFF
+ if (hb_info_counter < 3)
+ {
+ sql_print_information("master sends heartbeat message");
+ hb_info_counter++;
+ if (hb_info_counter == 3)
+ sql_print_information("the rest of heartbeat info skipped ...");
+ }
+#endif
+ /* reset transmit packet for the heartbeat event */
+ if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg))
+ goto err;
+ if (send_heartbeat_event(net, packet, coord))
+ {
+ errmsg = "Failed on my_net_write()";
+ my_errno= ER_UNKNOWN_ERROR;
+ pthread_mutex_unlock(log_lock);
+ goto err;
+ }
+ }
+ else
+ {
+ DBUG_ASSERT(ret == 0 && signal_cnt != mysql_bin_log.signal_cnt ||
+ thd->killed);
+ DBUG_PRINT("wait",("binary log received update"));
+ }
+ } while (signal_cnt == mysql_bin_log.signal_cnt && !thd->killed);
+ pthread_mutex_unlock(log_lock);
+ }
+ break;
+
+ default:
pthread_mutex_unlock(log_lock);
test_for_non_eof_log_read_errors(error, &errmsg);
goto err;
}
if (read_packet)
- {
- thd_proc_info(thd, "Sending binlog event to slave");
+ {
+ thd_proc_info(thd, "Sending binlog event to slave");
+ pos = my_b_tell(&log);
+ if (RUN_HOOK(binlog_transmit, before_send_event,
+ (thd, flags, packet, log_file_name, pos)))
+ {
+ my_errno= ER_UNKNOWN_ERROR;
+ errmsg= "run 'before_send_event' hook failed";
+ goto err;
+ }
+
if (my_net_write(net, (uchar*) packet->ptr(), packet->length()) )
{
errmsg = "Failed on my_net_write()";
@@ -685,7 +895,7 @@ impossible position";
goto err;
}
- if ((*packet)[LOG_EVENT_OFFSET+1] == LOAD_EVENT)
+ if (event_type == LOAD_EVENT)
{
if (send_file(thd))
{
@@ -694,11 +904,13 @@ impossible position";
goto err;
}
}
- packet->set("\0", 1, &my_charset_bin);
- /*
- No need to net_flush because we will get to flush later when
- we hit EOF pretty quick
- */
+
+ if (RUN_HOOK(binlog_transmit, after_send_event, (thd, flags, packet)))
+ {
+ my_errno= ER_UNKNOWN_ERROR;
+ errmsg= "Failed to run hook 'after_send_event'";
+ goto err;
+ }
}
log.error=0;
@@ -728,6 +940,10 @@ impossible position";
end_io_cache(&log);
(void) my_close(file, MYF(MY_WME));
+ /* reset transmit packet for the possible fake rotate event */
+ if (reset_transmit_packet(thd, flags, &ev_offset, &errmsg))
+ goto err;
+
/*
Call fake_rotate_event() in case the previous log (the one which
we have just finished reading) did not contain a Rotate event
@@ -745,8 +961,8 @@ impossible position";
goto err;
}
- packet->length(0);
- packet->append('\0');
+ if (coord)
+ coord->file_name= log_file_name; // reset to the next
}
}
@@ -754,6 +970,7 @@ end:
end_io_cache(&log);
(void)my_close(file, MYF(MY_WME));
+ RUN_HOOK(binlog_transmit, transmit_stop, (thd, flags));
my_eof(thd);
thd_proc_info(thd, "Waiting to finalize termination");
pthread_mutex_lock(&LOCK_thread_count);
@@ -764,6 +981,7 @@ end:
err:
thd_proc_info(thd, "Waiting to finalize termination");
end_io_cache(&log);
+ RUN_HOOK(binlog_transmit, transmit_stop, (thd, flags));
/*
Exclude iteration through thread list
this is needed for purge_logs() - it will iterate through
@@ -790,7 +1008,7 @@ err:
@param mi Pointer to Master_info object for the slave's IO thread.
- @param net_report If true, saves the exit status into thd->main_da.
+ @param net_report If true, saves the exit status into thd->stmt_da.
@retval 0 success
@retval 1 error
@@ -928,7 +1146,7 @@ int start_slave(THD* thd , Master_info* mi, bool net_report)
@param mi Pointer to Master_info object for the slave's IO thread.
- @param net_report If true, saves the exit status into thd->main_da.
+ @param net_report If true, saves the exit status into thd->stmt_da.
@retval 0 success
@retval 1 error
@@ -1058,6 +1276,7 @@ int reset_slave(THD *thd, Master_info* mi)
goto err;
}
+ RUN_HOOK(binlog_relay_io, after_reset_slave, (thd, mi));
err:
unlock_slave_threads(mi);
if (error)
@@ -1131,26 +1350,40 @@ bool change_master(THD* thd, Master_info* mi)
int thread_mask;
const char* errmsg= 0;
bool need_relay_log_purge= 1;
+ bool ret= FALSE;
DBUG_ENTER("change_master");
lock_slave_threads(mi);
init_thread_mask(&thread_mask,mi,0 /*not inverse*/);
+ LEX_MASTER_INFO* lex_mi= &thd->lex->mi;
if (thread_mask) // We refuse if any slave thread is running
{
my_message(ER_SLAVE_MUST_STOP, ER(ER_SLAVE_MUST_STOP), MYF(0));
- unlock_slave_threads(mi);
- DBUG_RETURN(TRUE);
+ ret= TRUE;
+ goto err;
}
thd_proc_info(thd, "Changing master");
- LEX_MASTER_INFO* lex_mi= &thd->lex->mi;
+ /*
+ We need to check if there is an empty master_host. Otherwise
+ change master succeeds, a master.info file is created containing
+ empty master_host string and when issuing: start slave; an error
+ is thrown stating that the server is not configured as slave.
+ (See BUG#28796).
+ */
+ if(lex_mi->host && !*lex_mi->host)
+ {
+ my_error(ER_WRONG_ARGUMENTS, MYF(0), "MASTER_HOST");
+ unlock_slave_threads(mi);
+ DBUG_RETURN(TRUE);
+ }
// TODO: see if needs re-write
if (init_master_info(mi, master_info_file, relay_log_info_file, 0,
thread_mask))
{
my_message(ER_MASTER_INFO, ER(ER_MASTER_INFO), MYF(0));
- unlock_slave_threads(mi);
- DBUG_RETURN(TRUE);
+ ret= TRUE;
+ goto err;
}
/*
@@ -1189,13 +1422,46 @@ bool change_master(THD* thd, Master_info* mi)
mi->port = lex_mi->port;
if (lex_mi->connect_retry)
mi->connect_retry = lex_mi->connect_retry;
+ if (lex_mi->heartbeat_opt != LEX_MASTER_INFO::LEX_MI_UNCHANGED)
+ mi->heartbeat_period = lex_mi->heartbeat_period;
+ else
+ mi->heartbeat_period= (float) min(SLAVE_MAX_HEARTBEAT_PERIOD,
+ (slave_net_timeout/2.0));
+ mi->received_heartbeats= LL(0); // counter lives until master is CHANGEd
+ /*
+ reset the last time server_id list if the current CHANGE MASTER
+ is mentioning IGNORE_SERVER_IDS= (...)
+ */
+ if (lex_mi->repl_ignore_server_ids_opt == LEX_MASTER_INFO::LEX_MI_ENABLE)
+ reset_dynamic(&mi->ignore_server_ids);
+ for (uint i= 0; i < lex_mi->repl_ignore_server_ids.elements; i++)
+ {
+ ulong s_id;
+ get_dynamic(&lex_mi->repl_ignore_server_ids, (uchar*) &s_id, i);
+ if (s_id == ::server_id && replicate_same_server_id)
+ {
+ my_error(ER_SLAVE_IGNORE_SERVER_IDS, MYF(0), s_id);
+ ret= TRUE;
+ goto err;
+ }
+ else
+ {
+ if (bsearch((const ulong *) &s_id,
+ mi->ignore_server_ids.buffer,
+ mi->ignore_server_ids.elements, sizeof(ulong),
+ (int (*) (const void*, const void*))
+ change_master_server_id_cmp) == NULL)
+ insert_dynamic(&mi->ignore_server_ids, (uchar*) &s_id);
+ }
+ }
+ sort_dynamic(&mi->ignore_server_ids, (qsort_cmp) change_master_server_id_cmp);
- if (lex_mi->ssl != LEX_MASTER_INFO::SSL_UNCHANGED)
- mi->ssl= (lex_mi->ssl == LEX_MASTER_INFO::SSL_ENABLE);
+ if (lex_mi->ssl != LEX_MASTER_INFO::LEX_MI_UNCHANGED)
+ mi->ssl= (lex_mi->ssl == LEX_MASTER_INFO::LEX_MI_ENABLE);
- if (lex_mi->ssl_verify_server_cert != LEX_MASTER_INFO::SSL_UNCHANGED)
+ if (lex_mi->ssl_verify_server_cert != LEX_MASTER_INFO::LEX_MI_UNCHANGED)
mi->ssl_verify_server_cert=
- (lex_mi->ssl_verify_server_cert == LEX_MASTER_INFO::SSL_ENABLE);
+ (lex_mi->ssl_verify_server_cert == LEX_MASTER_INFO::LEX_MI_ENABLE);
if (lex_mi->ssl_ca)
strmake(mi->ssl_ca, lex_mi->ssl_ca, sizeof(mi->ssl_ca)-1);
@@ -1218,9 +1484,11 @@ bool change_master(THD* thd, Master_info* mi)
if (lex_mi->relay_log_name)
{
need_relay_log_purge= 0;
- strmake(mi->rli.group_relay_log_name,lex_mi->relay_log_name,
+ char relay_log_name[FN_REFLEN];
+ mi->rli.relay_log.make_log_name(relay_log_name, lex_mi->relay_log_name);
+ strmake(mi->rli.group_relay_log_name, relay_log_name,
sizeof(mi->rli.group_relay_log_name)-1);
- strmake(mi->rli.event_relay_log_name,lex_mi->relay_log_name,
+ strmake(mi->rli.event_relay_log_name, relay_log_name,
sizeof(mi->rli.event_relay_log_name)-1);
}
@@ -1267,8 +1535,8 @@ bool change_master(THD* thd, Master_info* mi)
if (flush_master_info(mi, 0))
{
my_error(ER_RELAY_LOG_INIT, MYF(0), "Failed to flush master info file");
- unlock_slave_threads(mi);
- DBUG_RETURN(TRUE);
+ ret= TRUE;
+ goto err;
}
if (need_relay_log_purge)
{
@@ -1279,8 +1547,8 @@ bool change_master(THD* thd, Master_info* mi)
&errmsg))
{
my_error(ER_RELAY_LOG_FAIL, MYF(0), errmsg);
- unlock_slave_threads(mi);
- DBUG_RETURN(TRUE);
+ ret= TRUE;
+ goto err;
}
}
else
@@ -1295,8 +1563,8 @@ bool change_master(THD* thd, Master_info* mi)
&msg, 0))
{
my_error(ER_RELAY_LOG_INIT, MYF(0), msg);
- unlock_slave_threads(mi);
- DBUG_RETURN(TRUE);
+ ret= TRUE;
+ goto err;
}
}
/*
@@ -1333,10 +1601,13 @@ bool change_master(THD* thd, Master_info* mi)
pthread_cond_broadcast(&mi->data_cond);
pthread_mutex_unlock(&mi->rli.data_lock);
+err:
unlock_slave_threads(mi);
thd_proc_info(thd, 0);
- my_ok(thd);
- DBUG_RETURN(FALSE);
+ if (ret == FALSE)
+ my_ok(thd);
+ delete_dynamic(&lex_mi->repl_ignore_server_ids); //freeing of parser-time alloc
+ DBUG_RETURN(ret);
}
@@ -1357,7 +1628,11 @@ int reset_master(THD* thd)
ER(ER_FLUSH_MASTER_BINLOG_CLOSED), MYF(ME_BELL+ME_WAITTANG));
return 1;
}
- return mysql_bin_log.reset_logs(thd);
+
+ if (mysql_bin_log.reset_logs(thd))
+ return 1;
+ RUN_HOOK(binlog_transmit, after_reset_master, (thd, 0 /* flags */));
+ return 0;
}
int cmp_master_pos(const char* log_file_name1, ulonglong log_pos1,
@@ -1395,24 +1670,41 @@ bool mysql_show_binlog_events(THD* thd)
bool ret = TRUE;
IO_CACHE log;
File file = -1;
+ MYSQL_BIN_LOG *binary_log= NULL;
DBUG_ENTER("mysql_show_binlog_events");
Log_event::init_show_field_list(&field_list);
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
Format_description_log_event *description_event= new
Format_description_log_event(3); /* MySQL 4.0 by default */
- /*
- Wait for handlers to insert any pending information
- into the binlog. For e.g. ndb which updates the binlog asynchronously
- this is needed so that the uses sees all its own commands in the binlog
- */
- ha_binlog_wait(thd);
+ DBUG_ASSERT(thd->lex->sql_command == SQLCOM_SHOW_BINLOG_EVENTS ||
+ thd->lex->sql_command == SQLCOM_SHOW_RELAYLOG_EVENTS);
- if (mysql_bin_log.is_open())
+ /* select wich binary log to use: binlog or relay */
+ if ( thd->lex->sql_command == SQLCOM_SHOW_BINLOG_EVENTS )
+ {
+ /*
+ Wait for handlers to insert any pending information
+ into the binlog. For e.g. ndb which updates the binlog asynchronously
+ this is needed so that the uses sees all its own commands in the binlog
+ */
+ ha_binlog_wait(thd);
+
+ binary_log= &mysql_bin_log;
+ }
+ else /* showing relay log contents */
+ {
+ if (!active_mi)
+ DBUG_RETURN(TRUE);
+
+ binary_log= &(active_mi->rli.relay_log);
+ }
+
+ if (binary_log->is_open())
{
LEX_MASTER_INFO *lex_mi= &thd->lex->mi;
SELECT_LEX_UNIT *unit= &thd->lex->unit;
@@ -1420,7 +1712,7 @@ bool mysql_show_binlog_events(THD* thd)
my_off_t pos = max(BIN_LOG_HEADER_SIZE, lex_mi->pos); // user-friendly
char search_file_name[FN_REFLEN], *name;
const char *log_file_name = lex_mi->log_file_name;
- pthread_mutex_t *log_lock = mysql_bin_log.get_log_lock();
+ pthread_mutex_t *log_lock = binary_log->get_log_lock();
LOG_INFO linfo;
Log_event* ev;
@@ -1430,13 +1722,13 @@ bool mysql_show_binlog_events(THD* thd)
name= search_file_name;
if (log_file_name)
- mysql_bin_log.make_log_name(search_file_name, log_file_name);
+ binary_log->make_log_name(search_file_name, log_file_name);
else
name=0; // Find first log
linfo.index_file_offset = 0;
- if (mysql_bin_log.find_log_pos(&linfo, name, 1))
+ if (binary_log->find_log_pos(&linfo, name, 1))
{
errmsg = "Could not find target log";
goto err;
@@ -1557,7 +1849,7 @@ bool show_binlog_info(THD* thd)
field_list.push_back(new Item_empty_string("Binlog_Do_DB",255));
field_list.push_back(new Item_empty_string("Binlog_Ignore_DB",255));
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
protocol->prepare_for_resend();
@@ -1603,13 +1895,13 @@ bool show_binlogs(THD* thd)
if (!mysql_bin_log.is_open())
{
my_message(ER_NO_BINARY_LOGGING, ER(ER_NO_BINARY_LOGGING), MYF(0));
- return 1;
+ DBUG_RETURN(TRUE);
}
field_list.push_back(new Item_empty_string("Log_name", 255));
field_list.push_back(new Item_return_int("File_size", 20,
MYSQL_TYPE_LONGLONG));
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
@@ -1739,6 +2031,26 @@ public:
bool update(THD *thd, set_var *var);
};
+static void fix_slave_net_timeout(THD *thd, enum_var_type type)
+{
+ DBUG_ENTER("fix_slave_net_timeout");
+#ifdef HAVE_REPLICATION
+ pthread_mutex_lock(&LOCK_active_mi);
+ DBUG_PRINT("info",("slave_net_timeout=%lu mi->heartbeat_period=%.3f",
+ slave_net_timeout,
+ (active_mi? active_mi->heartbeat_period : 0.0)));
+ if (active_mi && slave_net_timeout < active_mi->heartbeat_period)
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE,
+ "The currect value for master_heartbeat_period"
+ " exceeds the new value of `slave_net_timeout' sec."
+ " A sensible value for the period should be"
+ " less than the timeout.");
+ pthread_mutex_unlock(&LOCK_active_mi);
+#endif
+ DBUG_VOID_RETURN;
+}
+
static sys_var_chain vars = { NULL, NULL };
static sys_var_const sys_log_slave_updates(&vars, "log_slave_updates",
@@ -1755,6 +2067,16 @@ static sys_var_const sys_relay_log_info_file(&vars, "relay_log_info_file",
(uchar*) &relay_log_info_file);
static sys_var_bool_ptr sys_relay_log_purge(&vars, "relay_log_purge",
&relay_log_purge);
+static sys_var_bool_ptr sys_relay_log_recovery(&vars, "relay_log_recovery",
+ &relay_log_recovery);
+static sys_var_uint_ptr sys_sync_binlog_period(&vars, "sync_binlog",
+ &sync_binlog_period);
+static sys_var_uint_ptr sys_sync_relaylog_period(&vars, "sync_relay_log",
+ &sync_relaylog_period);
+static sys_var_uint_ptr sys_sync_relayloginfo_period(&vars, "sync_relay_log_info",
+ &sync_relayloginfo_period);
+static sys_var_uint_ptr sys_sync_masterinfo_period(&vars, "sync_master_info",
+ &sync_masterinfo_period);
static sys_var_const sys_relay_log_space_limit(&vars,
"relay_log_space_limit",
OPT_GLOBAL, SHOW_LONGLONG,
@@ -1764,13 +2086,13 @@ static sys_var_const sys_slave_load_tmpdir(&vars, "slave_load_tmpdir",
OPT_GLOBAL, SHOW_CHAR_PTR,
(uchar*) &slave_load_tmpdir);
static sys_var_long_ptr sys_slave_net_timeout(&vars, "slave_net_timeout",
- &slave_net_timeout);
+ &slave_net_timeout,
+ fix_slave_net_timeout);
static sys_var_const sys_slave_skip_errors(&vars, "slave_skip_errors",
OPT_GLOBAL, SHOW_CHAR,
(uchar*) slave_skip_error_names);
static sys_var_long_ptr sys_slave_trans_retries(&vars, "slave_transaction_retries",
&slave_trans_retries);
-static sys_var_sync_binlog_period sys_sync_binlog_period(&vars, "sync_binlog", &sync_binlog_period);
static sys_var_slave_skip_counter sys_slave_skip_counter(&vars, "sql_slave_skip_counter");
@@ -1812,12 +2134,6 @@ bool sys_var_slave_skip_counter::update(THD *thd, set_var *var)
}
-bool sys_var_sync_binlog_period::update(THD *thd, set_var *var)
-{
- sync_binlog_period= (ulong) var->save_result.ulonglong_value;
- return 0;
-}
-
int init_replication_sys_vars()
{
if (mysql_add_sys_var_chain(vars.first, my_long_options))
@@ -1829,6 +2145,5 @@ int init_replication_sys_vars()
return 0;
}
-#endif /* HAVE_REPLICATION */
-
+#endif /* HAVE_REPLICATION */
diff --git a/sql/sql_repl.h b/sql/sql_repl.h
index d5c9040f8dc..aa71ac96ff8 100644
--- a/sql/sql_repl.h
+++ b/sql/sql_repl.h
@@ -13,6 +13,9 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#ifndef SQL_REPL_INCLUDED
+#define SQL_REPL_INCLUDED
+
#include "rpl_filter.h"
#ifdef HAVE_REPLICATION
@@ -65,3 +68,4 @@ int init_replication_sys_vars();
#endif /* HAVE_REPLICATION */
+#endif /* SQL_REPL_INCLUDED */
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index d9549002121..18c79f6827d 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -223,6 +223,7 @@ static void update_tmptable_sum_func(Item_sum **func,TABLE *tmp_table);
static void copy_sum_funcs(Item_sum **func_ptr, Item_sum **end);
static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab);
static bool setup_sum_funcs(THD *thd, Item_sum **func_ptr);
+static bool prepare_sum_aggregators(Item_sum **func_ptr, bool need_distinct);
static bool init_sum_functions(Item_sum **func, Item_sum **end);
static bool update_sum_func(Item_sum **func);
static void select_describe(JOIN *join, bool need_tmp_table,bool need_order,
@@ -1026,13 +1027,13 @@ JOIN::optimize()
}
if (const_tables && !thd->locked_tables &&
!(select_options & SELECT_NO_UNLOCK))
- mysql_unlock_some_tables(thd, table, const_tables);
+ mysql_unlock_some_tables(thd, all_tables, const_tables);
if (!conds && outer_join)
{
/* Handle the case where we have an OUTER JOIN without a WHERE */
conds=new Item_int((longlong) 1,1); // Always true
}
- select= make_select(*table, const_table_map,
+ select= make_select(*all_tables, const_table_map,
const_table_map, conds, 1, &error);
if (error)
{ /* purecov: inspected */
@@ -1269,7 +1270,11 @@ JOIN::optimize()
if (test_if_subpart(group_list, order) ||
(!group_list && tmp_table_param.sum_func_count))
+ {
order=0;
+ if (is_indexed_agg_distinct(this, NULL))
+ sort_and_group= 0;
+ }
// Can't use sort on head table if using join buffering
if (full_join)
@@ -1459,8 +1464,16 @@ JOIN::optimize()
single table queries, thus it is sufficient to test only the first
join_tab element of the plan for its access method.
*/
+ bool need_distinct= TRUE;
if (join_tab->is_using_loose_index_scan())
+ {
tmp_table_param.precomputed_group_by= TRUE;
+ if (join_tab->is_using_agg_loose_index_scan())
+ {
+ need_distinct= FALSE;
+ tmp_table_param.precomputed_group_by= FALSE;
+ }
+ }
/* Create a tmp table if distinct or if the sort is too complicated */
if (need_tmp)
@@ -1521,6 +1534,7 @@ JOIN::optimize()
HA_POS_ERROR, HA_POS_ERROR, FALSE) ||
alloc_group_fields(this, group_list) ||
make_sum_func_list(all_fields, fields_list, 1) ||
+ prepare_sum_aggregators(sum_funcs, need_distinct) ||
setup_sum_funcs(thd, sum_funcs))
{
DBUG_RETURN(1);
@@ -1530,6 +1544,7 @@ JOIN::optimize()
else
{
if (make_sum_func_list(all_fields, fields_list, 0) ||
+ prepare_sum_aggregators(sum_funcs, need_distinct) ||
setup_sum_funcs(thd, sum_funcs))
{
DBUG_RETURN(1);
@@ -1714,8 +1729,8 @@ JOIN::exec()
(zero_result_cause?zero_result_cause:"No tables used"));
else
{
- if (result->send_fields(*columns_list,
- Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
+ if (result->send_result_set_metadata(*columns_list,
+ Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
{
DBUG_VOID_RETURN;
}
@@ -2002,7 +2017,9 @@ JOIN::exec()
}
}
if (curr_join->make_sum_func_list(*curr_all_fields, *curr_fields_list,
- 1, TRUE))
+ 1, TRUE) ||
+ prepare_sum_aggregators(curr_join->sum_funcs,
+ !curr_join->join_tab->is_using_agg_loose_index_scan()))
DBUG_VOID_RETURN;
curr_join->group_list= 0;
if (!curr_join->sort_and_group &&
@@ -2106,13 +2123,17 @@ JOIN::exec()
if (curr_join->make_sum_func_list(*curr_all_fields, *curr_fields_list,
1, TRUE) ||
+ prepare_sum_aggregators(curr_join->sum_funcs,
+ !curr_join->join_tab ||
+ !curr_join->join_tab->
+ is_using_agg_loose_index_scan()) ||
setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) ||
thd->is_fatal_error)
DBUG_VOID_RETURN;
}
if (curr_join->group_list || curr_join->order)
{
- DBUG_PRINT("info",("Sorting for send_fields"));
+ DBUG_PRINT("info",("Sorting for send_result_set_metadata"));
thd_proc_info(thd, "Sorting result");
/* If we have already done the group, add HAVING to sorted table */
if (curr_join->tmp_having && ! curr_join->group_list &&
@@ -2248,7 +2269,7 @@ JOIN::exec()
{
thd_proc_info(thd, "Sending data");
DBUG_PRINT("info", ("%s", thd->proc_info));
- result->send_fields((procedure ? curr_join->procedure_fields_list :
+ result->send_result_set_metadata((procedure ? curr_join->procedure_fields_list :
*curr_fields_list),
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF);
error= do_select(curr_join, curr_fields_list, NULL, procedure);
@@ -2695,7 +2716,10 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables_arg, COND *conds,
goto error; // Fatal error
}
else
+ {
found_const_table_map|= s->table->map;
+ s->table->pos_in_table_list->optimized_away= TRUE;
+ }
}
/* loop until no more const tables are found */
@@ -2932,7 +2956,7 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables_arg, COND *conds,
join->join_tab=stat;
join->map2table=stat_ref;
- join->table= join->all_tables=table_vector;
+ join->all_tables= table_vector;
join->const_tables=const_count;
join->found_const_table_map=found_const_table_map;
@@ -3637,16 +3661,16 @@ add_ft_keys(DYNAMIC_ARRAY *keyuse_array,
Item_func *arg0=(Item_func *)(func->arguments()[0]),
*arg1=(Item_func *)(func->arguments()[1]);
if (arg1->const_item() &&
- ((functype == Item_func::GE_FUNC && arg1->val_real() > 0) ||
- (functype == Item_func::GT_FUNC && arg1->val_real() >=0)) &&
arg0->type() == Item::FUNC_ITEM &&
- arg0->functype() == Item_func::FT_FUNC)
+ arg0->functype() == Item_func::FT_FUNC &&
+ ((functype == Item_func::GE_FUNC && arg1->val_real() > 0) ||
+ (functype == Item_func::GT_FUNC && arg1->val_real() >=0)))
cond_func=(Item_func_match *) arg0;
else if (arg0->const_item() &&
- ((functype == Item_func::LE_FUNC && arg0->val_real() > 0) ||
- (functype == Item_func::LT_FUNC && arg0->val_real() >=0)) &&
arg1->type() == Item::FUNC_ITEM &&
- arg1->functype() == Item_func::FT_FUNC)
+ arg1->functype() == Item_func::FT_FUNC &&
+ ((functype == Item_func::LE_FUNC && arg0->val_real() > 0) ||
+ (functype == Item_func::LT_FUNC && arg0->val_real() >=0)))
cond_func=(Item_func_match *) arg1;
}
}
@@ -3984,6 +4008,82 @@ static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array)
/**
+ Check for the presence of AGGFN(DISTINCT a) queries that may be subject
+ to loose index scan.
+
+
+ Check if the query is a subject to AGGFN(DISTINCT) using loose index scan
+ (QUICK_GROUP_MIN_MAX_SELECT).
+ Optionally (if out_args is supplied) will push the arguments of
+ AGGFN(DISTINCT) to the list
+
+ @param join the join to check
+ @param[out] out_args list of aggregate function arguments
+ @return does the query qualify for indexed AGGFN(DISTINCT)
+ @retval true it does
+ @retval false AGGFN(DISTINCT) must apply distinct in it.
+*/
+
+bool
+is_indexed_agg_distinct(JOIN *join, List<Item_field> *out_args)
+{
+ Item_sum **sum_item_ptr;
+ bool result= false;
+
+ if (join->tables != 1 || /* reference more than 1 table */
+ join->select_distinct || /* or a DISTINCT */
+ join->select_lex->olap == ROLLUP_TYPE) /* Check (B3) for ROLLUP */
+ return false;
+
+ if (join->make_sum_func_list(join->all_fields, join->fields_list, true))
+ return false;
+
+ for (sum_item_ptr= join->sum_funcs; *sum_item_ptr; sum_item_ptr++)
+ {
+ Item_sum *sum_item= *sum_item_ptr;
+ Item *expr;
+ /* aggregate is not AGGFN(DISTINCT) or more than 1 argument to it */
+ switch (sum_item->sum_func())
+ {
+ case Item_sum::MIN_FUNC:
+ case Item_sum::MAX_FUNC:
+ continue;
+ case Item_sum::COUNT_DISTINCT_FUNC:
+ break;
+ case Item_sum::AVG_DISTINCT_FUNC:
+ case Item_sum::SUM_DISTINCT_FUNC:
+ if (sum_item->get_arg_count() == 1)
+ break;
+ /* fall through */
+ default: return false;
+ }
+ /*
+ We arrive here for every COUNT(DISTINCT),AVG(DISTINCT) or SUM(DISTINCT).
+ Collect the arguments of the aggregate functions to a list.
+ We don't worry about duplicates as these will be sorted out later in
+ get_best_group_min_max
+ */
+ for (uint i= 0; i < sum_item->get_arg_count(); i++)
+ {
+ expr= sum_item->get_arg(i);
+ /* The AGGFN(DISTINCT) arg is not an attribute? */
+ if (expr->real_item()->type() != Item::FIELD_ITEM)
+ return false;
+
+ /*
+ If we came to this point the AGGFN(DISTINCT) loose index scan
+ optimization is applicable
+ */
+ if (out_args)
+ out_args->push_back((Item_field *) expr);
+ result= true;
+ }
+ }
+ return result;
+}
+
+
+/**
Discover the indexes that can be used for GROUP BY or DISTINCT queries.
If the query has a GROUP BY clause, find all indexes that contain all
@@ -4025,6 +4125,10 @@ add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab)
item->walk(&Item::collect_item_field_processor, 0,
(uchar*) &indexed_fields);
}
+ else if (is_indexed_agg_distinct(join, &indexed_fields))
+ {
+ join->sort_and_group= 1;
+ }
else
return;
@@ -4272,7 +4376,8 @@ best_access_path(JOIN *join,
in ReuseRangeEstimateForRef-3.
*/
if (table->quick_keys.is_set(key) &&
- const_part & (1 << table->quick_key_parts[key]) &&
+ (const_part & ((1 << table->quick_key_parts[key])-1)) ==
+ ((1 << table->quick_key_parts[key])-1) &&
table->quick_n_ranges[key] == 1 &&
records > (double) table->quick_rows[key])
{
@@ -5005,8 +5110,8 @@ greedy_search(JOIN *join,
the interleaving state to the one of the non-extended partial plan
on exit.
*/
- IF_DBUG(bool is_interleave_error= )
- check_interleaving_with_nj (best_table);
+ bool is_interleave_error __attribute__((unused))=
+ check_interleaving_with_nj (best_table);
/* This has been already checked by best_extension_by_limited_search */
DBUG_ASSERT(!is_interleave_error);
@@ -5542,7 +5647,7 @@ get_best_combination(JOIN *join)
{
TABLE *form;
*j= *join->best_positions[tablenr].table;
- form=join->table[tablenr]=j->table;
+ form=join->all_tables[tablenr]=j->table;
used_tables|= form->map;
form->reginfo.join_tab=j;
if (!*j->on_expr_ref)
@@ -5816,7 +5921,7 @@ JOIN::make_simple_join(JOIN *parent, TABLE *tmp_table)
DBUG_RETURN(TRUE); /* purecov: inspected */
join_tab= parent->join_tab_reexec;
- table= &parent->table_reexec[0]; parent->table_reexec[0]= tmp_table;
+ parent->table_reexec[0]= tmp_table;
tables= 1;
const_tables= 0;
const_table_map= 0;
@@ -6873,24 +6978,23 @@ void JOIN::cleanup(bool full)
{
DBUG_ENTER("JOIN::cleanup");
- if (table)
+ if (all_tables)
{
JOIN_TAB *tab,*end;
/*
Only a sorted table may be cached. This sorted table is always the
- first non const table in join->table
+ first non const table in join->all_tables
*/
if (tables > const_tables) // Test for not-const tables
{
- free_io_cache(table[const_tables]);
- filesort_free_buffers(table[const_tables],full);
+ free_io_cache(all_tables[const_tables]);
+ filesort_free_buffers(all_tables[const_tables],full);
}
if (full)
{
for (tab= join_tab, end= tab+tables; tab != end; tab++)
tab->cleanup();
- table= 0;
}
else
{
@@ -7189,7 +7293,7 @@ return_zero_rows(JOIN *join, select_result *result,TABLE_LIST *tables,
if (having && having->val_int() == 0)
send_row=0;
}
- if (!(result->send_fields(fields,
+ if (!(result->send_result_set_metadata(fields,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)))
{
bool send_error= FALSE;
@@ -7219,7 +7323,7 @@ static void clear_tables(JOIN *join)
are not re-calculated.
*/
for (uint i=join->const_tables ; i < join->tables ; i++)
- mark_as_null_row(join->table[i]); // All fields are NULL
+ mark_as_null_row(join->all_tables[i]); // All fields are NULL
}
/*****************************************************************************
@@ -9164,7 +9268,7 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value)
thd->substitute_null_with_insert_id))
{
#ifdef HAVE_QUERY_CACHE
- query_cache_abort(&thd->net);
+ query_cache_abort(&thd->query_cache_tls);
#endif
COND *new_cond;
if ((new_cond= new Item_func_eq(args[0],
@@ -9714,7 +9818,7 @@ void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps)
Create a temp table according to a field list.
Given field pointers are changed to point at tmp_table for
- send_fields. The table object is self contained: it's
+ send_result_set_metadata. The table object is self contained: it's
allocated in its own memory root, as well as Field objects
created for table columns.
This function will replace Item_sum items in 'fields' list with
@@ -9899,6 +10003,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
share->primary_key= MAX_KEY; // Indicate no primary key
share->keys_for_keyread.init();
share->keys_in_use.init();
+ if (param->schema_table)
+ share->db= INFORMATION_SCHEMA_NAME;
/* Calculate which type of fields we will store in the temporary table */
@@ -10454,6 +10560,7 @@ TABLE *create_virtual_tmp_table(THD *thd, List<Create_field> &field_list)
bzero(share, sizeof(*share));
table->field= field;
table->s= share;
+ table->temp_pool_slot= MY_BIT_NONE;
share->blob_field= blob_field;
share->fields= field_count;
share->blob_ptr_size= portable_sizeof_char_ptr;
@@ -10957,7 +11064,6 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
so we don't touch it here.
*/
join->examined_rows++;
- join->thd->row_count++;
DBUG_ASSERT(join->examined_rows <= 1);
}
else if (join->send_row_on_empty_set())
@@ -10979,26 +11085,7 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
if (error == NESTED_LOOP_NO_MORE_ROWS)
error= NESTED_LOOP_OK;
- if (error == NESTED_LOOP_OK)
- {
- /*
- Sic: this branch works even if rc != 0, e.g. when
- send_data above returns an error.
- */
- if (!table) // If sending data to client
- {
- /*
- The following will unlock all cursors if the command wasn't an
- update command
- */
- join->join_free(); // Unlock all cursors
- if (join->result->send_eof())
- rc= 1; // Don't send error
- }
- DBUG_PRINT("info",("%ld records output", (long) join->send_records));
- }
- else
- rc= -1;
+
if (table)
{
int tmp, new_errno= 0;
@@ -11015,6 +11102,29 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
if (new_errno)
table->file->print_error(new_errno,MYF(0));
}
+ else
+ {
+ /*
+ The following will unlock all cursors if the command wasn't an
+ update command
+ */
+ join->join_free(); // Unlock all cursors
+ }
+ if (error == NESTED_LOOP_OK)
+ {
+ /*
+ Sic: this branch works even if rc != 0, e.g. when
+ send_data above returns an error.
+ */
+ if (!table) // If sending data to client
+ {
+ if (join->result->send_eof())
+ rc= 1; // Don't send error
+ }
+ DBUG_PRINT("info",("%ld records output", (long) join->send_records));
+ }
+ else
+ rc= -1;
#ifndef DBUG_OFF
if (rc)
{
@@ -11211,7 +11321,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
/* Set first_unmatched for the last inner table of this group */
join_tab->last_inner->first_unmatched= join_tab;
}
- join->thd->row_count= 0;
+ join->thd->warning_info->reset_current_row_for_warning();
error= (*join_tab->read_first_record)(join_tab);
rc= evaluate_join_record(join, join_tab, error);
@@ -11332,7 +11442,6 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
(See above join->return_tab= tab).
*/
join->examined_rows++;
- join->thd->row_count++;
DBUG_PRINT("counts", ("join->examined_rows++: %lu",
(ulong) join->examined_rows));
@@ -11341,6 +11450,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
enum enum_nested_loop_state rc;
/* A match from join_tab is found for the current partial join. */
rc= (*join_tab->next_select)(join, join_tab+1, 0);
+ join->thd->warning_info->inc_current_row_for_warning();
if (rc != NESTED_LOOP_OK && rc != NESTED_LOOP_NO_MORE_ROWS)
return rc;
if (join->return_tab < join_tab)
@@ -11354,7 +11464,10 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
return NESTED_LOOP_NO_MORE_ROWS;
}
else
+ {
+ join->thd->warning_info->inc_current_row_for_warning();
join_tab->read_record.unlock_row(join_tab);
+ }
}
else
{
@@ -11363,7 +11476,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
with the beginning coinciding with the current partial join.
*/
join->examined_rows++;
- join->thd->row_count++;
+ join->thd->warning_info->inc_current_row_for_warning();
join_tab->read_record.unlock_row(join_tab);
}
return NESTED_LOOP_OK;
@@ -11907,10 +12020,8 @@ join_init_quick_read_record(JOIN_TAB *tab)
}
-int rr_sequential(READ_RECORD *info);
-int init_read_record_seq(JOIN_TAB *tab)
+int read_first_record_seq(JOIN_TAB *tab)
{
- tab->read_record.read_record= rr_sequential;
if (tab->read_record.file->ha_rnd_init(1))
return 1;
return (*tab->read_record.read_record)(&tab->read_record);
@@ -13922,8 +14033,8 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
extra_length= ALIGN_SIZE(key_length)-key_length;
}
- if (hash_init(&hash, &my_charset_bin, (uint) file->stats.records, 0,
- key_length, (hash_get_key) 0, 0, 0))
+ if (my_hash_init(&hash, &my_charset_bin, (uint) file->stats.records, 0,
+ key_length, (my_hash_get_key) 0, 0, 0))
{
my_free((char*) key_buffer,MYF(0));
DBUG_RETURN(1);
@@ -13964,7 +14075,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
key_pos+= *field_length++;
}
/* Check if it exists before */
- if (hash_search(&hash, org_key_pos, key_length))
+ if (my_hash_search(&hash, org_key_pos, key_length))
{
/* Duplicated found ; Remove the row */
if ((error=file->ha_delete_row(record)))
@@ -13978,14 +14089,14 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
key_pos+=extra_length;
}
my_free((char*) key_buffer,MYF(0));
- hash_free(&hash);
+ my_hash_free(&hash);
file->extra(HA_EXTRA_NO_CACHE);
(void) file->ha_rnd_end();
DBUG_RETURN(0);
err:
my_free((char*) key_buffer,MYF(0));
- hash_free(&hash);
+ my_hash_free(&hash);
file->extra(HA_EXTRA_NO_CACHE);
(void) file->ha_rnd_end();
if (error)
@@ -14666,7 +14777,7 @@ setup_new_fields(THD *thd, List<Item> &fields,
optimize away 'order by'.
*/
-static ORDER *
+ORDER *
create_distinct_group(THD *thd, Item **ref_pointer_array,
ORDER *order_list, List<Item> &fields,
List<Item> &all_fields,
@@ -15021,7 +15132,7 @@ test_if_group_changed(List<Cached_item> &list)
Only FIELD_ITEM:s and FUNC_ITEM:s needs to be saved between groups.
Change old item_field to use a new field with points at saved fieldvalue
- This function is only called before use of send_fields.
+ This function is only called before use of send_result_set_metadata.
@param thd THD pointer
@param param temporary table parameters
@@ -15052,7 +15163,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
Item *pos;
List_iterator_fast<Item> li(all_fields);
Copy_field *copy= NULL;
- IF_DBUG(Copy_field *copy_start);
+ Copy_field *copy_start __attribute__((unused));
res_selected_fields.empty();
res_all_fields.empty();
List_iterator_fast<Item> itr(res_all_fields);
@@ -15065,7 +15176,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
goto err2;
param->copy_funcs.empty();
- IF_DBUG(copy_start= copy);
+ copy_start= copy;
for (i= 0; (pos= li++); i++)
{
Field *field;
@@ -15261,7 +15372,7 @@ bool JOIN::alloc_func_list()
Initialize 'sum_funcs' array with all Item_sum objects.
@param field_list All items
- @param send_fields Items in select list
+ @param send_result_set_metadata Items in select list
@param before_group_by Set to 1 if this is called before GROUP BY handling
@param recompute Set to TRUE if sum_funcs must be recomputed
@@ -15271,7 +15382,7 @@ bool JOIN::alloc_func_list()
1 error
*/
-bool JOIN::make_sum_func_list(List<Item> &field_list, List<Item> &send_fields,
+bool JOIN::make_sum_func_list(List<Item> &field_list, List<Item> &send_result_set_metadata,
bool before_group_by, bool recompute)
{
List_iterator_fast<Item> it(field_list);
@@ -15293,7 +15404,7 @@ bool JOIN::make_sum_func_list(List<Item> &field_list, List<Item> &send_fields,
if (before_group_by && rollup.state == ROLLUP::STATE_INITED)
{
rollup.state= ROLLUP::STATE_READY;
- if (rollup_make_fields(field_list, send_fields, &func))
+ if (rollup_make_fields(field_list, send_result_set_metadata, &func))
DBUG_RETURN(TRUE); // Should never happen
}
else if (rollup.state == ROLLUP::STATE_NONE)
@@ -15468,7 +15579,22 @@ static bool setup_sum_funcs(THD *thd, Item_sum **func_ptr)
DBUG_ENTER("setup_sum_funcs");
while ((func= *(func_ptr++)))
{
- if (func->setup(thd))
+ if (func->aggregator_setup(thd))
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+static bool prepare_sum_aggregators(Item_sum **func_ptr, bool need_distinct)
+{
+ Item_sum *func;
+ DBUG_ENTER("prepare_sum_aggregators");
+ while ((func= *(func_ptr++)))
+ {
+ if (func->set_aggregator(need_distinct && func->has_with_distinct() ?
+ Aggregator::DISTINCT_AGGREGATOR :
+ Aggregator::SIMPLE_AGGREGATOR))
DBUG_RETURN(TRUE);
}
DBUG_RETURN(FALSE);
@@ -15518,7 +15644,7 @@ init_sum_functions(Item_sum **func_ptr, Item_sum **end_ptr)
/* If rollup, calculate the upper sum levels */
for ( ; *func_ptr ; func_ptr++)
{
- if ((*func_ptr)->add())
+ if ((*func_ptr)->aggregator_add())
return 1;
}
return 0;
@@ -15530,7 +15656,7 @@ update_sum_func(Item_sum **func_ptr)
{
Item_sum *func;
for (; (func= (Item_sum*) *func_ptr) ; func_ptr++)
- if (func->add())
+ if (func->aggregator_add())
return 1;
return 0;
}
@@ -16475,7 +16601,12 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
if (key_read)
{
if (quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)
+ {
+ QUICK_GROUP_MIN_MAX_SELECT *qgs=
+ (QUICK_GROUP_MIN_MAX_SELECT *) tab->select->quick;
extra.append(STRING_WITH_LEN("; Using index for group-by"));
+ qgs->append_loose_scan_type(&extra);
+ }
else
extra.append(STRING_WITH_LEN("; Using index"));
}
@@ -16606,18 +16737,35 @@ static void print_join(THD *thd,
{
/* List is reversed => we should reverse it before using */
List_iterator_fast<TABLE_LIST> ti(*tables);
- TABLE_LIST **table= (TABLE_LIST **)thd->alloc(sizeof(TABLE_LIST*) *
- tables->elements);
- if (table == 0)
+ TABLE_LIST **table;
+ uint non_const_tables= 0;
+
+ for (TABLE_LIST *t= ti++; t ; t= ti++)
+ if (!t->optimized_away)
+ non_const_tables++;
+ if (!non_const_tables)
+ {
+ str->append(STRING_WITH_LEN("dual"));
+ return; // all tables were optimized away
+ }
+ ti.rewind();
+
+ if (!(table= (TABLE_LIST **)thd->alloc(sizeof(TABLE_LIST*) *
+ non_const_tables)))
return; // out of memory
- for (TABLE_LIST **t= table + (tables->elements - 1); t >= table; t--)
- *t= ti++;
+ TABLE_LIST *tmp, **t= table + (non_const_tables - 1);
+ while ((tmp= ti++))
+ {
+ if (tmp->optimized_away)
+ continue;
+ *t--= tmp;
+ }
DBUG_ASSERT(tables->elements >= 1);
(*table)->print(thd, str, query_type);
- TABLE_LIST **end= table + tables->elements;
+ TABLE_LIST **end= table + non_const_tables;
for (TABLE_LIST **tbl= table + 1; tbl < end; tbl++)
{
TABLE_LIST *curr= *tbl;
diff --git a/sql/sql_select.h b/sql/sql_select.h
index c9cd3ecba42..e049e4ed765 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -1,3 +1,6 @@
+#ifndef SQL_SELECT_INCLUDED
+#define SQL_SELECT_INCLUDED
+
/* Copyright (C) 2000-2006 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -141,7 +144,6 @@ enum enum_nested_loop_state
typedef enum_nested_loop_state
(*Next_select_func)(JOIN *, struct st_join_table *, bool);
-typedef int (*Read_record_func)(struct st_join_table *tab);
Next_select_func setup_end_select_func(JOIN *join);
@@ -169,7 +171,7 @@ typedef struct st_join_table {
*/
uint packed_info;
- Read_record_func read_first_record;
+ READ_RECORD::Setup_func read_first_record;
Next_select_func next_select;
READ_RECORD read_record;
/*
@@ -177,8 +179,8 @@ typedef struct st_join_table {
if it is executed by an alternative full table scan when the left operand of
the subquery predicate is evaluated to NULL.
*/
- Read_record_func save_read_first_record;/* to save read_first_record */
- int (*save_read_record) (READ_RECORD *);/* to save read_record.read_record */
+ READ_RECORD::Setup_func save_read_first_record;/* to save read_first_record */
+ READ_RECORD::Read_func save_read_record;/* to save read_record.read_record */
double worst_seeks;
key_map const_keys; /**< Keys with constant part */
key_map checked_keys; /**< Keys checked in find_best */
@@ -225,6 +227,11 @@ typedef struct st_join_table {
(select->quick->get_type() ==
QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX));
}
+ bool is_using_agg_loose_index_scan ()
+ {
+ return (is_using_loose_index_scan() &&
+ ((QUICK_GROUP_MIN_MAX_SELECT *)select->quick)->is_agg_distinct());
+ }
} JOIN_TAB;
enum_nested_loop_state sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool
@@ -282,7 +289,7 @@ public:
JOIN_TAB *join_tab,**best_ref;
JOIN_TAB **map2table; ///< mapping between table indexes and JOIN_TABs
JOIN_TAB *join_tab_save; ///< saved join_tab for subquery reexecution
- TABLE **table,**all_tables,*sort_by_table;
+ TABLE **all_tables,*sort_by_table;
uint tables,const_tables;
uint send_group_parts;
/**
@@ -438,7 +445,7 @@ public:
select_result *result_arg)
{
join_tab= join_tab_save= 0;
- table= 0;
+ all_tables= 0;
tables= 0;
const_tables= 0;
join_list= 0;
@@ -587,6 +594,8 @@ Field* create_tmp_field_from_field(THD *thd, Field* org_field,
const char *name, TABLE *table,
Item_field *item, uint convert_blob_length);
+bool is_indexed_agg_distinct(JOIN *join, List<Item_field> *out_args);
+
/* functions from opt_sum.cc */
bool simple_pred(Item_func *func_item, Item **args, bool *inv_order);
int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds);
@@ -759,3 +768,4 @@ inline bool optimizer_flag(THD *thd, uint flag)
return (thd->variables.optimizer_switch & flag);
}
+#endif /* SQL_SELECT_INCLUDED */
diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc
index f8a8dea18ff..e8fa3d984a7 100644
--- a/sql/sql_servers.cc
+++ b/sql/sql_servers.cc
@@ -120,8 +120,8 @@ bool servers_init(bool dont_read_servers_table)
DBUG_RETURN(TRUE);
/* initialise our servers cache */
- if (hash_init(&servers_cache, system_charset_info, 32, 0, 0,
- (hash_get_key) servers_cache_get_key, 0, 0))
+ if (my_hash_init(&servers_cache, system_charset_info, 32, 0, 0,
+ (my_hash_get_key) servers_cache_get_key, 0, 0))
{
return_val= TRUE; /* we failed, out of memory? */
goto end;
@@ -242,7 +242,7 @@ bool servers_reload(THD *thd)
if (simple_open_n_lock_tables(thd, tables))
{
sql_print_error("Can't open and lock privilege tables: %s",
- thd->main_da.message());
+ thd->stmt_da->message());
goto end;
}
@@ -646,9 +646,10 @@ delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options)
server_options->server_name_length));
- if (!(server= (FOREIGN_SERVER *) hash_search(&servers_cache,
- (uchar*) server_options->server_name,
- server_options->server_name_length)))
+ if (!(server= (FOREIGN_SERVER *)
+ my_hash_search(&servers_cache,
+ (uchar*) server_options->server_name,
+ server_options->server_name_length)))
{
DBUG_PRINT("info", ("server_name %s length %d not found!",
server_options->server_name,
@@ -663,7 +664,7 @@ delete_server_record_in_cache(LEX_SERVER_OPTIONS *server_options)
server->server_name,
server->server_name_length));
- VOID(hash_delete(&servers_cache, (uchar*) server));
+ VOID(my_hash_delete(&servers_cache, (uchar*) server));
error= 0;
@@ -770,7 +771,7 @@ int update_server_record_in_cache(FOREIGN_SERVER *existing,
/*
delete the existing server struct from the server cache
*/
- VOID(hash_delete(&servers_cache, (uchar*)existing));
+ VOID(my_hash_delete(&servers_cache, (uchar*)existing));
/*
Insert the altered server struct into the server cache
@@ -965,8 +966,8 @@ int create_server(THD *thd, LEX_SERVER_OPTIONS *server_options)
rw_wrlock(&THR_LOCK_servers);
/* hit the memory first */
- if (hash_search(&servers_cache, (uchar*) server_options->server_name,
- server_options->server_name_length))
+ if (my_hash_search(&servers_cache, (uchar*) server_options->server_name,
+ server_options->server_name_length))
goto end;
@@ -1014,9 +1015,9 @@ int alter_server(THD *thd, LEX_SERVER_OPTIONS *server_options)
rw_wrlock(&THR_LOCK_servers);
- if (!(existing= (FOREIGN_SERVER *) hash_search(&servers_cache,
- (uchar*) name.str,
- name.length)))
+ if (!(existing= (FOREIGN_SERVER *) my_hash_search(&servers_cache,
+ (uchar*) name.str,
+ name.length)))
goto end;
altered= (FOREIGN_SERVER *)alloc_root(&mem,
@@ -1195,7 +1196,7 @@ prepare_server_struct_for_update(LEX_SERVER_OPTIONS *server_options,
void servers_free(bool end)
{
DBUG_ENTER("servers_free");
- if (!hash_inited(&servers_cache))
+ if (!my_hash_inited(&servers_cache))
DBUG_VOID_RETURN;
if (!end)
{
@@ -1205,7 +1206,7 @@ void servers_free(bool end)
}
rwlock_destroy(&THR_LOCK_servers);
free_root(&mem,MYF(0));
- hash_free(&servers_cache);
+ my_hash_free(&servers_cache);
DBUG_VOID_RETURN;
}
@@ -1286,9 +1287,9 @@ FOREIGN_SERVER *get_server_by_name(MEM_ROOT *mem, const char *server_name,
DBUG_PRINT("info", ("locking servers_cache"));
rw_rdlock(&THR_LOCK_servers);
- if (!(server= (FOREIGN_SERVER *) hash_search(&servers_cache,
- (uchar*) server_name,
- server_name_length)))
+ if (!(server= (FOREIGN_SERVER *) my_hash_search(&servers_cache,
+ (uchar*) server_name,
+ server_name_length)))
{
DBUG_PRINT("info", ("server_name %s length %u not found!",
server_name, (unsigned) server_name_length));
diff --git a/sql/sql_servers.h b/sql/sql_servers.h
index 63c691893d1..12855f8473c 100644
--- a/sql/sql_servers.h
+++ b/sql/sql_servers.h
@@ -1,3 +1,6 @@
+#ifndef SQL_SERVERS_INCLUDED
+#define SQL_SERVERS_INCLUDED
+
/* Copyright (C) 2006 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -41,3 +44,5 @@ int alter_server(THD *thd, LEX_SERVER_OPTIONS *server_options);
/* lookup functions */
FOREIGN_SERVER *get_server_by_name(MEM_ROOT *mem, const char *server_name,
FOREIGN_SERVER *server_buffer);
+
+#endif /* SQL_SERVERS_INCLUDED */
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 2c1f360104b..b9f5015f8f0 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -25,6 +25,7 @@
#include "sql_trigger.h"
#include "authors.h"
#include "contributors.h"
+#include "sql_partition.h"
#ifdef HAVE_EVENT_SCHEDULER
#include "events.h"
#include "event_data_objects.h"
@@ -77,6 +78,12 @@ static TYPELIB grant_types = { sizeof(grant_names)/sizeof(char **),
static void store_key_options(THD *thd, String *packet, TABLE *table,
KEY *key_info);
+static void get_cs_converted_string_value(THD *thd,
+ String *input_str,
+ String *output_str,
+ CHARSET_INFO *cs,
+ bool use_hex);
+
static void
append_algorithm(TABLE_LIST *table, String *buff);
@@ -216,7 +223,7 @@ bool mysqld_show_authors(THD *thd)
field_list.push_back(new Item_empty_string("Location",40));
field_list.push_back(new Item_empty_string("Comment",80));
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
@@ -250,7 +257,7 @@ bool mysqld_show_contributors(THD *thd)
field_list.push_back(new Item_empty_string("Location",40));
field_list.push_back(new Item_empty_string("Comment",80));
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
@@ -310,6 +317,7 @@ static struct show_privileges_st sys_privileges[]=
{"Shutdown","Server Admin", "To shut down the server"},
{"Super","Server Admin","To use KILL thread, SET GLOBAL, CHANGE MASTER, etc."},
{"Trigger","Tables", "To use triggers"},
+ {"Create tablespace", "Server Admin", "To create/alter/drop tablespaces"},
{"Update", "Tables", "To update existing rows"},
{"Usage","Server Admin","No privileges - allow connect only"},
{NullS, NullS, NullS}
@@ -325,7 +333,7 @@ bool mysqld_show_privileges(THD *thd)
field_list.push_back(new Item_empty_string("Context",15));
field_list.push_back(new Item_empty_string("Comment",NAME_CHAR_LEN));
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
@@ -344,94 +352,6 @@ bool mysqld_show_privileges(THD *thd)
}
-/***************************************************************************
- List all column types
-***************************************************************************/
-
-struct show_column_type_st
-{
- const char *type;
- uint size;
- const char *min_value;
- const char *max_value;
- uint precision;
- uint scale;
- const char *nullable;
- const char *auto_increment;
- const char *unsigned_attr;
- const char *zerofill;
- const char *searchable;
- const char *case_sensitivity;
- const char *default_value;
- const char *comment;
-};
-
-/* TODO: Add remaning types */
-
-static struct show_column_type_st sys_column_types[]=
-{
- {"tinyint",
- 1, "-128", "127", 0, 0, "YES", "YES",
- "NO", "YES", "YES", "NO", "NULL,0",
- "A very small integer"},
- {"tinyint unsigned",
- 1, "0" , "255", 0, 0, "YES", "YES",
- "YES", "YES", "YES", "NO", "NULL,0",
- "A very small integer"},
-};
-
-bool mysqld_show_column_types(THD *thd)
-{
- List<Item> field_list;
- Protocol *protocol= thd->protocol;
- DBUG_ENTER("mysqld_show_column_types");
-
- field_list.push_back(new Item_empty_string("Type",30));
- field_list.push_back(new Item_int("Size",(longlong) 1,
- MY_INT64_NUM_DECIMAL_DIGITS));
- field_list.push_back(new Item_empty_string("Min_Value",20));
- field_list.push_back(new Item_empty_string("Max_Value",20));
- field_list.push_back(new Item_return_int("Prec", 4, MYSQL_TYPE_SHORT));
- field_list.push_back(new Item_return_int("Scale", 4, MYSQL_TYPE_SHORT));
- field_list.push_back(new Item_empty_string("Nullable",4));
- field_list.push_back(new Item_empty_string("Auto_Increment",4));
- field_list.push_back(new Item_empty_string("Unsigned",4));
- field_list.push_back(new Item_empty_string("Zerofill",4));
- field_list.push_back(new Item_empty_string("Searchable",4));
- field_list.push_back(new Item_empty_string("Case_Sensitive",4));
- field_list.push_back(new Item_empty_string("Default",NAME_CHAR_LEN));
- field_list.push_back(new Item_empty_string("Comment",NAME_CHAR_LEN));
-
- if (protocol->send_fields(&field_list,
- Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
- DBUG_RETURN(TRUE);
-
- /* TODO: Change the loop to not use 'i' */
- for (uint i=0; i < sizeof(sys_column_types)/sizeof(sys_column_types[0]); i++)
- {
- protocol->prepare_for_resend();
- protocol->store(sys_column_types[i].type, system_charset_info);
- protocol->store((ulonglong) sys_column_types[i].size);
- protocol->store(sys_column_types[i].min_value, system_charset_info);
- protocol->store(sys_column_types[i].max_value, system_charset_info);
- protocol->store_short((longlong) sys_column_types[i].precision);
- protocol->store_short((longlong) sys_column_types[i].scale);
- protocol->store(sys_column_types[i].nullable, system_charset_info);
- protocol->store(sys_column_types[i].auto_increment, system_charset_info);
- protocol->store(sys_column_types[i].unsigned_attr, system_charset_info);
- protocol->store(sys_column_types[i].zerofill, system_charset_info);
- protocol->store(sys_column_types[i].searchable, system_charset_info);
- protocol->store(sys_column_types[i].case_sensitivity, system_charset_info);
- protocol->store(sys_column_types[i].default_value, system_charset_info);
- protocol->store(sys_column_types[i].comment, system_charset_info);
- if (protocol->write())
- DBUG_RETURN(TRUE);
- }
- my_eof(thd);
- DBUG_RETURN(FALSE);
-}
-
-
/*
find_files() - find files in a given directory.
@@ -560,7 +480,7 @@ find_files(THD *thd, List<LEX_STRING> *files, const char *db,
table_list.table_name= uname;
table_list.table_name_length= file_name_len;
table_list.grant.privilege=col_access;
- if (check_grant(thd, TABLE_ACLS, &table_list, 1, 1, 1))
+ if (check_grant(thd, TABLE_ACLS, &table_list, TRUE, 1, TRUE))
continue;
}
#endif
@@ -644,8 +564,10 @@ public:
return m_view_access_denied_message_ptr;
}
- bool handle_error(uint sql_errno, const char *message,
- MYSQL_ERROR::enum_warning_level level, THD *thd) {
+ bool handle_condition(THD *thd, uint sql_errno, const char */* sqlstate */,
+ MYSQL_ERROR::enum_warning_level level,
+ const char *message, MYSQL_ERROR **/* cond_hdl */)
+ {
/*
The handler does not handle the errors raised by itself.
At this point we know if top_view is really a view.
@@ -719,7 +641,7 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list)
thd->push_internal_handler(&view_error_suppressor);
bool error= open_normal_and_derived_tables(thd, table_list, 0);
thd->pop_internal_handler();
- if (error && thd->main_da.is_error())
+ if (error && thd->is_error())
DBUG_RETURN(TRUE);
}
@@ -761,7 +683,7 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list)
max(buffer.length(),1024)));
}
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
protocol->prepare_for_resend();
@@ -846,7 +768,7 @@ bool mysqld_show_create_db(THD *thd, char *dbname,
field_list.push_back(new Item_empty_string("Database",NAME_CHAR_LEN));
field_list.push_back(new Item_empty_string("Create Database",1024));
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
@@ -914,7 +836,7 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild)
}
restore_record(table, s->default_values); // Get empty record
table->use_all_columns();
- if (thd->protocol->send_fields(&field_list, Protocol::SEND_DEFAULTS))
+ if (thd->protocol->send_result_set_metadata(&field_list, Protocol::SEND_DEFAULTS))
DBUG_VOID_RETURN;
my_eof(thd);
DBUG_VOID_RETURN;
@@ -1572,7 +1494,8 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
((part_syntax= generate_partition_syntax(table->part_info,
&part_syntax_len,
FALSE,
- show_table_options))))
+ show_table_options,
+ NULL, NULL))))
{
packet->append(STRING_WITH_LEN("\n/*!50100"));
packet->append(part_syntax, part_syntax_len);
@@ -1804,7 +1727,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose)
field->maybe_null=1;
field_list.push_back(field=new Item_empty_string("Info",max_query_length));
field->maybe_null=1;
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_VOID_RETURN;
@@ -3093,7 +3016,7 @@ static int fill_schema_table_names(THD *thd, TABLE *table,
default:
DBUG_ASSERT(0);
}
- if (thd->is_error() && thd->main_da.sql_errno() == ER_NO_SUCH_TABLE)
+ if (thd->is_error() && thd->stmt_da->sql_errno() == ER_NO_SUCH_TABLE)
{
thd->clear_error();
return 0;
@@ -3185,12 +3108,31 @@ static int fill_schema_table_from_frm(THD *thd,TABLE *table,
int error;
char key[MAX_DBKEY_LENGTH];
uint key_length;
+ char db_name_buff[NAME_LEN + 1], table_name_buff[NAME_LEN + 1];
bzero((char*) &table_list, sizeof(TABLE_LIST));
bzero((char*) &tbl, sizeof(TABLE));
- table_list.table_name= table_name->str;
- table_list.db= db_name->str;
+ if (lower_case_table_names)
+ {
+ /*
+ In lower_case_table_names > 0 metadata locking and table definition
+ cache subsystems require normalized (lowercased) database and table
+ names as input.
+ */
+ strmov(db_name_buff, db_name->str);
+ strmov(table_name_buff, table_name->str);
+ my_casedn_str(files_charset_info, db_name_buff);
+ my_casedn_str(files_charset_info, table_name_buff);
+ table_list.db= db_name_buff;
+ table_list.table_name= table_name_buff;
+ }
+ else
+ {
+ table_list.table_name= table_name->str;
+ table_list.db= db_name->str;
+ }
+
key_length= create_table_def_key(thd, key, &table_list, 0);
pthread_mutex_lock(&LOCK_open);
share= get_table_share(thd, &table_list, key,
@@ -3228,7 +3170,7 @@ static int fill_schema_table_from_frm(THD *thd,TABLE *table,
{
tbl.s= share;
table_list.table= &tbl;
- table_list.view= (st_lex*) share->is_view;
+ table_list.view= (LEX*) share->is_view;
res= schema_table->process_table(thd, &table_list, table,
res, db_name, table_name);
closefrm(&tbl, true);
@@ -3406,6 +3348,7 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
(!lookup_field_vals.table_value.length ||
lookup_field_vals.wild_table_value))
{
+ table->field[0]->store(STRING_WITH_LEN("def"), system_charset_info);
if (schema_table_store_record(thd, table))
goto err; /* Out of space in temporary table */
continue;
@@ -3457,10 +3400,10 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
can return an error without setting an error message
in THD, which is a hack. This is why we have to
check for res, then for thd->is_error() only then
- for thd->main_da.sql_errno().
+ for thd->stmt_da->sql_errno().
*/
if (res && thd->is_error() &&
- thd->main_da.sql_errno() == ER_NO_SUCH_TABLE)
+ thd->stmt_da->sql_errno() == ER_NO_SUCH_TABLE)
{
/*
Hide error for not existing table.
@@ -3517,6 +3460,7 @@ bool store_schema_shemata(THD* thd, TABLE *table, LEX_STRING *db_name,
CHARSET_INFO *cs)
{
restore_record(table, s->default_values);
+ table->field[0]->store(STRING_WITH_LEN("def"), system_charset_info);
table->field[1]->store(db_name->str, db_name->length, system_charset_info);
table->field[2]->store(cs->csname, strlen(cs->csname), system_charset_info);
table->field[3]->store(cs->name, strlen(cs->name), system_charset_info);
@@ -3607,6 +3551,7 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
DBUG_ENTER("get_schema_tables_record");
restore_record(table, s->default_values);
+ table->field[0]->store(STRING_WITH_LEN("def"), cs);
table->field[1]->store(db_name->str, db_name->length, cs);
table->field[2]->store(table_name->str, table_name->length, cs);
if (res)
@@ -3614,7 +3559,7 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
/*
there was errors during opening tables
*/
- const char *error= thd->is_error() ? thd->main_da.message() : "";
+ const char *error= thd->is_error() ? thd->stmt_da->message() : "";
if (tables->view)
table->field[3]->store(STRING_WITH_LEN("VIEW"), cs);
else if (tables->schema_table)
@@ -3820,7 +3765,7 @@ static int get_schema_column_record(THD *thd, TABLE_LIST *tables,
*/
if (thd->is_error())
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- thd->main_da.sql_errno(), thd->main_da.message());
+ thd->stmt_da->sql_errno(), thd->stmt_da->message());
thd->clear_error();
res= 0;
}
@@ -3854,8 +3799,9 @@ static int get_schema_column_record(THD *thd, TABLE_LIST *tables,
#ifndef NO_EMBEDDED_ACCESS_CHECKS
uint col_access;
- check_access(thd,SELECT_ACL | EXTRA_ACL, db_name->str,
- &tables->grant.privilege, 0, 0, test(tables->schema_table));
+ check_access(thd,SELECT_ACL, db_name->str,
+ &tables->grant.privilege, FALSE, FALSE,
+ test(tables->schema_table));
col_access= get_column_grant(thd, &tables->grant,
db_name->str, table_name->str,
field->field_name) & COL_ACLS;
@@ -3873,6 +3819,7 @@ static int get_schema_column_record(THD *thd, TABLE_LIST *tables,
table->field[17]->store(tmp+1,end == tmp ? 0 : (uint) (end-tmp-1), cs);
#endif
+ table->field[0]->store(STRING_WITH_LEN("def"), cs);
table->field[1]->store(db_name->str, db_name->length, cs);
table->field[2]->store(table_name->str, table_name->length, cs);
table->field[3]->store(field->field_name, strlen(field->field_name),
@@ -4008,7 +3955,9 @@ int fill_schema_charsets(THD *thd, TABLE_LIST *tables, COND *cond)
TABLE *table= tables->table;
CHARSET_INFO *scs= system_charset_info;
- for (cs= all_charsets ; cs < all_charsets+255 ; cs++)
+ for (cs= all_charsets ;
+ cs < all_charsets + array_elements(all_charsets) ;
+ cs++)
{
CHARSET_INFO *tmp_cs= cs[0];
if (tmp_cs && (tmp_cs->state & MY_CS_PRIMARY) &&
@@ -4113,7 +4062,9 @@ int fill_schema_collation(THD *thd, TABLE_LIST *tables, COND *cond)
const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS;
TABLE *table= tables->table;
CHARSET_INFO *scs= system_charset_info;
- for (cs= all_charsets ; cs < all_charsets+255 ; cs++ )
+ for (cs= all_charsets ;
+ cs < all_charsets + array_elements(all_charsets) ;
+ cs++ )
{
CHARSET_INFO **cl;
CHARSET_INFO *tmp_cs= cs[0];
@@ -4121,7 +4072,9 @@ int fill_schema_collation(THD *thd, TABLE_LIST *tables, COND *cond)
(tmp_cs->state & MY_CS_HIDDEN) ||
!(tmp_cs->state & MY_CS_PRIMARY))
continue;
- for (cl= all_charsets; cl < all_charsets+255 ;cl ++)
+ for (cl= all_charsets;
+ cl < all_charsets + array_elements(all_charsets) ;
+ cl ++)
{
CHARSET_INFO *tmp_cl= cl[0];
if (!tmp_cl || !(tmp_cl->state & MY_CS_AVAILABLE) ||
@@ -4154,17 +4107,22 @@ int fill_schema_coll_charset_app(THD *thd, TABLE_LIST *tables, COND *cond)
CHARSET_INFO **cs;
TABLE *table= tables->table;
CHARSET_INFO *scs= system_charset_info;
- for (cs= all_charsets ; cs < all_charsets+255 ; cs++ )
+ for (cs= all_charsets ;
+ cs < all_charsets + array_elements(all_charsets) ;
+ cs++ )
{
CHARSET_INFO **cl;
CHARSET_INFO *tmp_cs= cs[0];
if (!tmp_cs || !(tmp_cs->state & MY_CS_AVAILABLE) ||
!(tmp_cs->state & MY_CS_PRIMARY))
continue;
- for (cl= all_charsets; cl < all_charsets+255 ;cl ++)
+ for (cl= all_charsets;
+ cl < all_charsets + array_elements(all_charsets) ;
+ cl ++)
{
CHARSET_INFO *tmp_cl= cl[0];
- if (!tmp_cl || !(tmp_cl->state & MY_CS_AVAILABLE) ||
+ if (!tmp_cl || !(tmp_cl->state & MY_CS_AVAILABLE) ||
+ (tmp_cl->state & MY_CS_HIDDEN) ||
!my_charset_same(tmp_cs,tmp_cl))
continue;
restore_record(table, s->default_values);
@@ -4211,6 +4169,7 @@ bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
table->field[3]->store(sp_name.ptr(), sp_name.length(), cs);
get_field(thd->mem_root, proc_table->field[3], &tmp_string);
table->field[0]->store(tmp_string.ptr(), tmp_string.length(), cs);
+ table->field[1]->store(STRING_WITH_LEN("def"), cs);
table->field[2]->store(sp_db.ptr(), sp_db.length(), cs);
get_field(thd->mem_root, proc_table->field[2], &tmp_string);
table->field[4]->store(tmp_string.ptr(), tmp_string.length(), cs);
@@ -4283,7 +4242,8 @@ int fill_schema_proc(THD *thd, TABLE_LIST *tables, COND *cond)
proc_tables.table_name= proc_tables.alias= (char*) "proc";
proc_tables.table_name_length= 4;
proc_tables.lock_type= TL_READ;
- full_access= !check_table_access(thd, SELECT_ACL, &proc_tables, 1, TRUE);
+ full_access= !check_table_access(thd, SELECT_ACL, &proc_tables, FALSE,
+ 1, TRUE);
if (!(proc_table= open_proc_table_for_read(thd, &open_tables_state_backup)))
{
DBUG_RETURN(1);
@@ -4332,7 +4292,7 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables,
*/
if (thd->is_error())
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- thd->main_da.sql_errno(), thd->main_da.message());
+ thd->stmt_da->sql_errno(), thd->stmt_da->message());
thd->clear_error();
res= 0;
}
@@ -4353,6 +4313,7 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables,
for (uint j=0 ; j < key_info->key_parts ; j++,key_part++)
{
restore_record(table, s->default_values);
+ table->field[0]->store(STRING_WITH_LEN("def"), cs);
table->field[1]->store(db_name->str, db_name->length, cs);
table->field[2]->store(table_name->str, table_name->length, cs);
table->field[3]->store((longlong) ((key_info->flags &
@@ -4469,6 +4430,7 @@ static int get_schema_views_record(THD *thd, TABLE_LIST *tables,
tmp_db_name= db_name;
tmp_table_name= table_name;
}
+ table->field[0]->store(STRING_WITH_LEN("def"), cs);
table->field[1]->store(tmp_db_name->str, tmp_db_name->length, cs);
table->field[2]->store(tmp_table_name->str, tmp_table_name->length, cs);
if (!only_share)
@@ -4545,7 +4507,7 @@ static int get_schema_views_record(THD *thd, TABLE_LIST *tables,
DBUG_RETURN(1);
if (res && thd->is_error())
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- thd->main_da.sql_errno(), thd->main_da.message());
+ thd->stmt_da->sql_errno(), thd->stmt_da->message());
}
if (res)
thd->clear_error();
@@ -4559,6 +4521,7 @@ bool store_constraints(THD *thd, TABLE *table, LEX_STRING *db_name,
{
CHARSET_INFO *cs= system_charset_info;
restore_record(table, s->default_values);
+ table->field[0]->store(STRING_WITH_LEN("def"), cs);
table->field[1]->store(db_name->str, db_name->length, cs);
table->field[2]->store(key_name, key_len, cs);
table->field[3]->store(db_name->str, db_name->length, cs);
@@ -4578,7 +4541,7 @@ static int get_schema_constraints_record(THD *thd, TABLE_LIST *tables,
{
if (thd->is_error())
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- thd->main_da.sql_errno(), thd->main_da.message());
+ thd->stmt_da->sql_errno(), thd->stmt_da->message());
thd->clear_error();
DBUG_RETURN(0);
}
@@ -4643,10 +4606,12 @@ static bool store_trigger(THD *thd, TABLE *table, LEX_STRING *db_name,
LEX_STRING sql_mode_rep;
restore_record(table, s->default_values);
+ table->field[0]->store(STRING_WITH_LEN("def"), cs);
table->field[1]->store(db_name->str, db_name->length, cs);
table->field[2]->store(trigger_name->str, trigger_name->length, cs);
table->field[3]->store(trg_event_type_names[event].str,
trg_event_type_names[event].length, cs);
+ table->field[4]->store(STRING_WITH_LEN("def"), cs);
table->field[5]->store(db_name->str, db_name->length, cs);
table->field[6]->store(table_name->str, table_name->length, cs);
table->field[9]->store(trigger_stmt->str, trigger_stmt->length, cs);
@@ -4683,7 +4648,7 @@ static int get_schema_triggers_record(THD *thd, TABLE_LIST *tables,
{
if (thd->is_error())
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- thd->main_da.sql_errno(), thd->main_da.message());
+ thd->stmt_da->sql_errno(), thd->stmt_da->message());
thd->clear_error();
DBUG_RETURN(0);
}
@@ -4692,7 +4657,7 @@ static int get_schema_triggers_record(THD *thd, TABLE_LIST *tables,
Table_triggers_list *triggers= tables->table->triggers;
int event, timing;
- if (check_table_access(thd, TRIGGER_ACL, tables, 1, TRUE))
+ if (check_table_access(thd, TRIGGER_ACL, tables, FALSE, 1, TRUE))
goto ret;
for (event= 0; event < (int)TRG_EVENT_MAX; event++)
@@ -4742,8 +4707,10 @@ void store_key_column_usage(TABLE *table, LEX_STRING *db_name,
longlong idx)
{
CHARSET_INFO *cs= system_charset_info;
+ table->field[0]->store(STRING_WITH_LEN("def"), cs);
table->field[1]->store(db_name->str, db_name->length, cs);
table->field[2]->store(key_name, key_len, cs);
+ table->field[3]->store(STRING_WITH_LEN("def"), cs);
table->field[4]->store(db_name->str, db_name->length, cs);
table->field[5]->store(table_name->str, table_name->length, cs);
table->field[6]->store(con_type, con_len, cs);
@@ -4762,7 +4729,7 @@ static int get_schema_key_column_usage_record(THD *thd,
{
if (thd->is_error())
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- thd->main_da.sql_errno(), thd->main_da.message());
+ thd->stmt_da->sql_errno(), thd->stmt_da->message());
thd->clear_error();
DBUG_RETURN(0);
}
@@ -4856,6 +4823,57 @@ static void collect_partition_expr(List<char> &field_list, String *str)
}
return;
}
+
+
+/*
+ Convert a string in a given character set to a string which can be
+ used for FRM file storage in which case use_hex is TRUE and we store
+ the character constants as hex strings in the character set encoding
+ their field have. In the case of SHOW CREATE TABLE and the
+ PARTITIONS information schema table we instead provide utf8 strings
+ to the user and convert to the utf8 character set.
+
+ SYNOPSIS
+ get_cs_converted_part_value_from_string()
+ item Item from which constant comes
+ input_str String as provided by val_str after
+ conversion to character set
+ output_str Out value: The string created
+ cs Character set string is encoded in
+ NULL for INT_RESULT's here
+ use_hex TRUE => hex string created
+ FALSE => utf8 constant string created
+
+ RETURN VALUES
+ TRUE Error
+ FALSE Ok
+*/
+
+int get_cs_converted_part_value_from_string(THD *thd,
+ Item *item,
+ String *input_str,
+ String *output_str,
+ CHARSET_INFO *cs,
+ bool use_hex)
+{
+ if (item->result_type() == INT_RESULT)
+ {
+ longlong value= item->val_int();
+ output_str->set(value, system_charset_info);
+ return FALSE;
+ }
+ if (!input_str)
+ {
+ my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
+ return TRUE;
+ }
+ get_cs_converted_string_value(thd,
+ input_str,
+ output_str,
+ cs,
+ use_hex);
+ return FALSE;
+}
#endif
@@ -4869,6 +4887,7 @@ static void store_schema_partitions_record(THD *thd, TABLE *schema_table,
PARTITION_INFO stat_info;
MYSQL_TIME time;
file->get_dynamic_partition_info(&stat_info, part_id);
+ table->field[0]->store(STRING_WITH_LEN("def"), cs);
table->field[12]->store((longlong) stat_info.records, TRUE);
table->field[13]->store((longlong) stat_info.mean_rec_length, TRUE);
table->field[14]->store((longlong) stat_info.data_file_length, TRUE);
@@ -4936,6 +4955,51 @@ static void store_schema_partitions_record(THD *thd, TABLE *schema_table,
return;
}
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+static int
+get_partition_column_description(THD *thd,
+ partition_info *part_info,
+ part_elem_value *list_value,
+ String &tmp_str)
+{
+ uint num_elements= part_info->part_field_list.elements;
+ uint i;
+ DBUG_ENTER("get_partition_column_description");
+
+ for (i= 0; i < num_elements; i++)
+ {
+ part_column_list_val *col_val= &list_value->col_val_array[i];
+ if (col_val->max_value)
+ tmp_str.append(partition_keywords[PKW_MAXVALUE].str);
+ else if (col_val->null_value)
+ tmp_str.append("NULL");
+ else
+ {
+ char buffer[MAX_KEY_LENGTH];
+ String str(buffer, sizeof(buffer), &my_charset_bin);
+ String val_conv;
+ Item *item= col_val->item_expression;
+
+ if (!(item= part_info->get_column_item(item,
+ part_info->part_field_array[i])))
+ {
+ DBUG_RETURN(1);
+ }
+ String *res= item->val_str(&str);
+ if (get_cs_converted_part_value_from_string(thd, item, res, &val_conv,
+ part_info->part_field_array[i]->charset(),
+ FALSE))
+ {
+ DBUG_RETURN(1);
+ }
+ tmp_str.append(val_conv);
+ }
+ if (i != num_elements - 1)
+ tmp_str.append(",");
+ }
+ DBUG_RETURN(0);
+}
+#endif /* WITH_PARTITION_STORAGE_ENGINE */
static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables,
TABLE *table, bool res,
@@ -4957,7 +5021,7 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables,
{
if (thd->is_error())
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- thd->main_da.sql_errno(), thd->main_da.message());
+ thd->stmt_da->sql_errno(), thd->stmt_da->message());
thd->clear_error();
DBUG_RETURN(0);
}
@@ -4971,6 +5035,7 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables,
uint part_pos= 0, part_id= 0;
restore_record(table, s->default_values);
+ table->field[0]->store(STRING_WITH_LEN("def"), cs);
table->field[1]->store(db_name->str, db_name->length, cs);
table->field[2]->store(table_name->str, table_name->length, cs);
@@ -4978,12 +5043,18 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables,
/* Partition method*/
switch (part_info->part_type) {
case RANGE_PARTITION:
- table->field[7]->store(partition_keywords[PKW_RANGE].str,
- partition_keywords[PKW_RANGE].length, cs);
- break;
case LIST_PARTITION:
- table->field[7]->store(partition_keywords[PKW_LIST].str,
- partition_keywords[PKW_LIST].length, cs);
+ tmp_res.length(0);
+ if (part_info->part_type == RANGE_PARTITION)
+ tmp_res.append(partition_keywords[PKW_RANGE].str,
+ partition_keywords[PKW_RANGE].length);
+ else
+ tmp_res.append(partition_keywords[PKW_LIST].str,
+ partition_keywords[PKW_LIST].length);
+ if (part_info->column_list)
+ tmp_res.append(partition_keywords[PKW_COLUMNS].str,
+ partition_keywords[PKW_COLUMNS].length);
+ table->field[7]->store(tmp_res.ptr(), tmp_res.length(), cs);
break;
case HASH_PARTITION:
tmp_res.length(0);
@@ -5061,36 +5132,70 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables,
/* Partition description */
if (part_info->part_type == RANGE_PARTITION)
{
- if (part_elem->range_value != LONGLONG_MAX)
- table->field[11]->store((longlong) part_elem->range_value, FALSE);
+ if (part_info->column_list)
+ {
+ List_iterator<part_elem_value> list_val_it(part_elem->list_val_list);
+ part_elem_value *list_value= list_val_it++;
+ tmp_str.length(0);
+ if (get_partition_column_description(thd,
+ part_info,
+ list_value,
+ tmp_str))
+ {
+ DBUG_RETURN(1);
+ }
+ table->field[11]->store(tmp_str.ptr(), tmp_str.length(), cs);
+ }
else
- table->field[11]->store(partition_keywords[PKW_MAXVALUE].str,
+ {
+ if (part_elem->range_value != LONGLONG_MAX)
+ table->field[11]->store((longlong) part_elem->range_value, FALSE);
+ else
+ table->field[11]->store(partition_keywords[PKW_MAXVALUE].str,
partition_keywords[PKW_MAXVALUE].length, cs);
+ }
table->field[11]->set_notnull();
}
else if (part_info->part_type == LIST_PARTITION)
{
List_iterator<part_elem_value> list_val_it(part_elem->list_val_list);
part_elem_value *list_value;
- uint no_items= part_elem->list_val_list.elements;
+ uint num_items= part_elem->list_val_list.elements;
tmp_str.length(0);
tmp_res.length(0);
if (part_elem->has_null_value)
{
tmp_str.append("NULL");
- if (no_items > 0)
+ if (num_items > 0)
tmp_str.append(",");
}
while ((list_value= list_val_it++))
{
- if (!list_value->unsigned_flag)
- tmp_res.set(list_value->value, cs);
+ if (part_info->column_list)
+ {
+ if (part_info->part_field_list.elements > 1U)
+ tmp_str.append("(");
+ if (get_partition_column_description(thd,
+ part_info,
+ list_value,
+ tmp_str))
+ {
+ DBUG_RETURN(1);
+ }
+ if (part_info->part_field_list.elements > 1U)
+ tmp_str.append(")");
+ }
else
- tmp_res.set((ulonglong)list_value->value, cs);
- tmp_str.append(tmp_res);
- if (--no_items != 0)
+ {
+ if (!list_value->unsigned_flag)
+ tmp_res.set(list_value->value, cs);
+ else
+ tmp_res.set((ulonglong)list_value->value, cs);
+ tmp_str.append(tmp_res);
+ }
+ if (--num_items != 0)
tmp_str.append(",");
- };
+ }
table->field[11]->store(tmp_str.ptr(), tmp_str.length(), cs);
table->field[11]->set_notnull();
}
@@ -5231,8 +5336,7 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table)
is_schema_db(et.dbname.str)))
DBUG_RETURN(0);
- /* ->field[0] is EVENT_CATALOG and is by default NULL */
-
+ sch_table->field[ISE_EVENT_CATALOG]->store(STRING_WITH_LEN("def"), scs);
sch_table->field[ISE_EVENT_SCHEMA]->
store(et.dbname.str, et.dbname.length,scs);
sch_table->field[ISE_EVENT_NAME]->
@@ -5495,7 +5599,7 @@ get_referential_constraints_record(THD *thd, TABLE_LIST *tables,
{
if (thd->is_error())
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- thd->main_da.sql_errno(), thd->main_da.message());
+ thd->stmt_da->sql_errno(), thd->stmt_da->message());
thd->clear_error();
DBUG_RETURN(0);
}
@@ -5513,10 +5617,12 @@ get_referential_constraints_record(THD *thd, TABLE_LIST *tables,
while ((f_key_info= it++))
{
restore_record(table, s->default_values);
+ table->field[0]->store(STRING_WITH_LEN("def"), cs);
table->field[1]->store(db_name->str, db_name->length, cs);
table->field[9]->store(table_name->str, table_name->length, cs);
table->field[2]->store(f_key_info->forein_id->str,
f_key_info->forein_id->length, cs);
+ table->field[3]->store(STRING_WITH_LEN("def"), cs);
table->field[4]->store(f_key_info->referenced_db->str,
f_key_info->referenced_db->length, cs);
table->field[10]->store(f_key_info->referenced_table->str,
@@ -6182,7 +6288,7 @@ int fill_schema_files(THD *thd, TABLE_LIST *tables, COND *cond)
ST_FIELD_INFO schema_fields_info[]=
{
- {"CATALOG_NAME", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
+ {"CATALOG_NAME", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"SCHEMA_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Database",
SKIP_OPEN_TABLE},
{"DEFAULT_CHARACTER_SET_NAME", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0, 0,
@@ -6196,7 +6302,7 @@ ST_FIELD_INFO schema_fields_info[]=
ST_FIELD_INFO tables_fields_info[]=
{
- {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
+ {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Name",
SKIP_OPEN_TABLE},
@@ -6235,7 +6341,7 @@ ST_FIELD_INFO tables_fields_info[]=
ST_FIELD_INFO columns_fields_info[]=
{
- {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FRM_ONLY},
+ {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
{"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
{"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
{"COLUMN_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Field",
@@ -6309,7 +6415,7 @@ ST_FIELD_INFO engines_fields_info[]=
ST_FIELD_INFO events_fields_info[]=
{
- {"EVENT_CATALOG", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
+ {"EVENT_CATALOG", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"EVENT_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Db",
SKIP_OPEN_TABLE},
{"EVENT_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Name",
@@ -6358,7 +6464,7 @@ ST_FIELD_INFO coll_charset_app_fields_info[]=
ST_FIELD_INFO proc_fields_info[]=
{
{"SPECIFIC_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
- {"ROUTINE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
+ {"ROUTINE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"ROUTINE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Db",
SKIP_OPEN_TABLE},
{"ROUTINE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Name",
@@ -6380,7 +6486,7 @@ ST_FIELD_INFO proc_fields_info[]=
{"CREATED", 0, MYSQL_TYPE_DATETIME, 0, 0, "Created", SKIP_OPEN_TABLE},
{"LAST_ALTERED", 0, MYSQL_TYPE_DATETIME, 0, 0, "Modified", SKIP_OPEN_TABLE},
{"SQL_MODE", 32*256, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
- {"ROUTINE_COMMENT", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Comment",
+ {"ROUTINE_COMMENT", 65535, MYSQL_TYPE_STRING, 0, 0, "Comment",
SKIP_OPEN_TABLE},
{"DEFINER", 77, MYSQL_TYPE_STRING, 0, 0, "Definer", SKIP_OPEN_TABLE},
{"CHARACTER_SET_CLIENT", MY_CS_NAME_SIZE, MYSQL_TYPE_STRING, 0, 0,
@@ -6395,7 +6501,7 @@ ST_FIELD_INFO proc_fields_info[]=
ST_FIELD_INFO stat_fields_info[]=
{
- {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FRM_ONLY},
+ {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
{"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
{"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Table", OPEN_FRM_ONLY},
{"NON_UNIQUE", 1, MYSQL_TYPE_LONGLONG, 0, 0, "Non_unique", OPEN_FRM_ONLY},
@@ -6419,7 +6525,7 @@ ST_FIELD_INFO stat_fields_info[]=
ST_FIELD_INFO view_fields_info[]=
{
- {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FRM_ONLY},
+ {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
{"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
{"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FRM_ONLY},
{"VIEW_DEFINITION", 65535, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
@@ -6438,7 +6544,7 @@ ST_FIELD_INFO view_fields_info[]=
ST_FIELD_INFO user_privileges_fields_info[]=
{
{"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
- {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
+ {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"PRIVILEGE_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"IS_GRANTABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
@@ -6448,7 +6554,7 @@ ST_FIELD_INFO user_privileges_fields_info[]=
ST_FIELD_INFO schema_privileges_fields_info[]=
{
{"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
- {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
+ {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"PRIVILEGE_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"IS_GRANTABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
@@ -6459,7 +6565,7 @@ ST_FIELD_INFO schema_privileges_fields_info[]=
ST_FIELD_INFO table_privileges_fields_info[]=
{
{"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
- {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
+ {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"PRIVILEGE_TYPE", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
@@ -6471,7 +6577,7 @@ ST_FIELD_INFO table_privileges_fields_info[]=
ST_FIELD_INFO column_privileges_fields_info[]=
{
{"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
- {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
+ {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"COLUMN_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
@@ -6483,7 +6589,7 @@ ST_FIELD_INFO column_privileges_fields_info[]=
ST_FIELD_INFO table_constraints_fields_info[]=
{
- {"CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
+ {"CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
{"CONSTRAINT_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
OPEN_FULL_TABLE},
{"CONSTRAINT_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
@@ -6498,12 +6604,12 @@ ST_FIELD_INFO table_constraints_fields_info[]=
ST_FIELD_INFO key_column_usage_fields_info[]=
{
- {"CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
+ {"CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
{"CONSTRAINT_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
OPEN_FULL_TABLE},
{"CONSTRAINT_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
OPEN_FULL_TABLE},
- {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
+ {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
{"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
{"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
{"COLUMN_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
@@ -6522,7 +6628,7 @@ ST_FIELD_INFO key_column_usage_fields_info[]=
ST_FIELD_INFO table_names_fields_info[]=
{
- {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
+ {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"TABLE_SCHEMA",NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Tables_in_",
SKIP_OPEN_TABLE},
@@ -6545,12 +6651,12 @@ ST_FIELD_INFO open_tables_fields_info[]=
ST_FIELD_INFO triggers_fields_info[]=
{
- {"TRIGGER_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
+ {"TRIGGER_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
{"TRIGGER_SCHEMA",NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
{"TRIGGER_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, "Trigger",
OPEN_FULL_TABLE},
{"EVENT_MANIPULATION", 6, MYSQL_TYPE_STRING, 0, 0, "Event", OPEN_FULL_TABLE},
- {"EVENT_OBJECT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0,
+ {"EVENT_OBJECT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0,
OPEN_FULL_TABLE},
{"EVENT_OBJECT_SCHEMA",NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
OPEN_FULL_TABLE},
@@ -6583,7 +6689,7 @@ ST_FIELD_INFO triggers_fields_info[]=
ST_FIELD_INFO partitions_fields_info[]=
{
- {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
+ {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
{"TABLE_SCHEMA",NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
{"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
{"PARTITION_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
@@ -6593,7 +6699,7 @@ ST_FIELD_INFO partitions_fields_info[]=
(MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), 0, OPEN_FULL_TABLE},
{"SUBPARTITION_ORDINAL_POSITION", 21 , MYSQL_TYPE_LONGLONG, 0,
(MY_I_S_MAYBE_NULL | MY_I_S_UNSIGNED), 0, OPEN_FULL_TABLE},
- {"PARTITION_METHOD", 12, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
+ {"PARTITION_METHOD", 18, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
{"SUBPARTITION_METHOD", 12, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
{"PARTITION_EXPRESSION", 65535, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
{"SUBPARTITION_EXPRESSION", 65535, MYSQL_TYPE_STRING, 0, 1, 0,
@@ -6673,7 +6779,7 @@ ST_FIELD_INFO files_fields_info[]=
{"FILE_TYPE", 20, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"TABLESPACE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0,
SKIP_OPEN_TABLE},
- {"TABLE_CATALOG", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
+ {"TABLE_CATALOG", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{"TABLE_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
{"TABLE_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
{"LOGFILE_GROUP_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0,
@@ -6734,12 +6840,12 @@ void init_fill_schema_files_row(TABLE* table)
ST_FIELD_INFO referential_constraints_fields_info[]=
{
- {"CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0, OPEN_FULL_TABLE},
+ {"CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0, OPEN_FULL_TABLE},
{"CONSTRAINT_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
OPEN_FULL_TABLE},
{"CONSTRAINT_NAME", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
OPEN_FULL_TABLE},
- {"UNIQUE_CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0,
+ {"UNIQUE_CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, 0,
OPEN_FULL_TABLE},
{"UNIQUE_CONSTRAINT_SCHEMA", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 0, 0,
OPEN_FULL_TABLE},
@@ -6792,11 +6898,12 @@ ST_SCHEMA_TABLE schema_tables[]=
fill_variables, make_old_format, 0, 0, -1, 0, 0},
{"KEY_COLUMN_USAGE", key_column_usage_fields_info, create_schema_table,
get_all_tables, 0, get_schema_key_column_usage_record, 4, 5, 0,
- OPEN_TABLE_ONLY},
+ OPTIMIZE_I_S_TABLE|OPEN_TABLE_ONLY},
{"OPEN_TABLES", open_tables_fields_info, create_schema_table,
fill_open_tables, make_old_format, 0, -1, -1, 1, 0},
{"PARTITIONS", partitions_fields_info, create_schema_table,
- get_all_tables, 0, get_schema_partitions_record, 1, 2, 0, OPEN_TABLE_ONLY},
+ get_all_tables, 0, get_schema_partitions_record, 1, 2, 0,
+ OPTIMIZE_I_S_TABLE|OPEN_TABLE_ONLY},
{"PLUGINS", plugin_fields_info, create_schema_table,
fill_plugins, make_old_format, 0, -1, -1, 0, 0},
{"PROCESSLIST", processlist_fields_info, create_schema_table,
@@ -6806,7 +6913,7 @@ ST_SCHEMA_TABLE schema_tables[]=
NULL, -1, -1, false, 0},
{"REFERENTIAL_CONSTRAINTS", referential_constraints_fields_info,
create_schema_table, get_all_tables, 0, get_referential_constraints_record,
- 1, 9, 0, OPEN_TABLE_ONLY},
+ 1, 9, 0, OPTIMIZE_I_S_TABLE|OPEN_TABLE_ONLY},
{"ROUTINES", proc_fields_info, create_schema_table,
fill_schema_proc, make_proc_old_format, 0, -1, -1, 0, 0},
{"SCHEMATA", schema_fields_info, create_schema_table,
@@ -6826,14 +6933,15 @@ ST_SCHEMA_TABLE schema_tables[]=
get_all_tables, make_old_format, get_schema_tables_record, 1, 2, 0,
OPTIMIZE_I_S_TABLE},
{"TABLE_CONSTRAINTS", table_constraints_fields_info, create_schema_table,
- get_all_tables, 0, get_schema_constraints_record, 3, 4, 0, OPEN_TABLE_ONLY},
+ get_all_tables, 0, get_schema_constraints_record, 3, 4, 0,
+ OPTIMIZE_I_S_TABLE|OPEN_TABLE_ONLY},
{"TABLE_NAMES", table_names_fields_info, create_schema_table,
get_all_tables, make_table_names_old_format, 0, 1, 2, 1, 0},
{"TABLE_PRIVILEGES", table_privileges_fields_info, create_schema_table,
fill_schema_table_privileges, 0, 0, -1, -1, 0, 0},
{"TRIGGERS", triggers_fields_info, create_schema_table,
get_all_tables, make_old_format, get_schema_triggers_record, 5, 6, 0,
- OPEN_TABLE_ONLY},
+ OPTIMIZE_I_S_TABLE|OPEN_TABLE_ONLY},
{"USER_PRIVILEGES", user_privileges_fields_info, create_schema_table,
fill_schema_user_privileges, 0, 0, -1, -1, 0, 0},
{"VARIABLES", variables_fields_info, create_schema_table, fill_variables,
@@ -6994,7 +7102,7 @@ static bool show_create_trigger_impl(THD *thd,
fields.push_back(new Item_empty_string("Database Collation",
MY_CS_NAME_SIZE));
- if (p->send_fields(&fields, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
+ if (p->send_result_set_metadata(&fields, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
return TRUE;
/* Send data. */
@@ -7153,7 +7261,7 @@ bool show_create_trigger(THD *thd, const sp_name *trg_name)
if (!lst)
return TRUE;
- if (check_table_access(thd, TRIGGER_ACL, lst, 1, TRUE))
+ if (check_table_access(thd, TRIGGER_ACL, lst, FALSE, 1, TRUE))
{
my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "TRIGGER");
return TRUE;
@@ -7207,3 +7315,95 @@ bool show_create_trigger(THD *thd, const sp_name *trg_name)
status and client connection will be closed.
*/
}
+
+/*
+ Convert a string in character set in column character set format
+ to utf8 character set if possible, the utf8 character set string
+ will later possibly be converted to character set used by client.
+ Thus we attempt conversion from column character set to both
+ utf8 and to character set client.
+
+ Examples of strings that should fail conversion to utf8 are unassigned
+ characters as e.g. 0x81 in cp1250 (Windows character set for for countries
+ like Czech and Poland). Example of string that should fail conversion to
+ character set on client (e.g. if this is latin1) is 0x2020 (daggger) in
+ ucs2.
+
+ If the conversion fails we will as a fall back convert the string to
+ hex encoded format. The caller of the function can also ask for hex
+ encoded format of output string unconditionally.
+
+ SYNOPSIS
+ get_cs_converted_string_value()
+ thd Thread object
+ input_str Input string in cs character set
+ output_str Output string to be produced in utf8
+ cs Character set of input string
+ use_hex Use hex string unconditionally
+
+
+ RETURN VALUES
+ No return value
+*/
+
+static void get_cs_converted_string_value(THD *thd,
+ String *input_str,
+ String *output_str,
+ CHARSET_INFO *cs,
+ bool use_hex)
+{
+
+ output_str->length(0);
+ if (input_str->length() == 0)
+ {
+ output_str->append("''");
+ return;
+ }
+ if (!use_hex)
+ {
+ String try_val;
+ uint try_conv_error= 0;
+
+ try_val.copy(input_str->ptr(), input_str->length(), cs,
+ thd->variables.character_set_client, &try_conv_error);
+ if (!try_conv_error)
+ {
+ String val;
+ uint conv_error= 0;
+
+ val.copy(input_str->ptr(), input_str->length(), cs,
+ system_charset_info, &conv_error);
+ if (!conv_error)
+ {
+ append_unescaped(output_str, val.ptr(), val.length());
+ return;
+ }
+ }
+ /* We had a conversion error, use hex encoded string for safety */
+ }
+ {
+ const uchar *ptr;
+ uint i, len;
+ char buf[3];
+
+ output_str->append("_");
+ output_str->append(cs->csname);
+ output_str->append(" ");
+ output_str->append("0x");
+ len= input_str->length();
+ ptr= (uchar*)input_str->ptr();
+ for (i= 0; i < len; i++)
+ {
+ uint high, low;
+
+ high= (*ptr) >> 4;
+ low= (*ptr) & 0x0F;
+ buf[0]= _dig_vec_upper[high];
+ buf[1]= _dig_vec_upper[low];
+ buf[2]= 0;
+ output_str->append((const char*)buf);
+ ptr++;
+ }
+ }
+ return;
+}
diff --git a/sql/sql_signal.cc b/sql/sql_signal.cc
new file mode 100644
index 00000000000..c9ab37272b8
--- /dev/null
+++ b/sql/sql_signal.cc
@@ -0,0 +1,510 @@
+/* Copyright (C) 2008 Sun Microsystems, Inc
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "mysql_priv.h"
+#include "sp_head.h"
+#include "sp_pcontext.h"
+#include "sp_rcontext.h"
+#include "sql_signal.h"
+
+/*
+ The parser accepts any error code (desired)
+ The runtime internally supports any error code (desired)
+ The client server protocol is limited to 16 bits error codes (restriction)
+ Enforcing the 65535 limit in the runtime until the protocol can change.
+*/
+#define MAX_MYSQL_ERRNO UINT_MAX16
+
+const LEX_STRING Diag_condition_item_names[]=
+{
+ { C_STRING_WITH_LEN("CLASS_ORIGIN") },
+ { C_STRING_WITH_LEN("SUBCLASS_ORIGIN") },
+ { C_STRING_WITH_LEN("CONSTRAINT_CATALOG") },
+ { C_STRING_WITH_LEN("CONSTRAINT_SCHEMA") },
+ { C_STRING_WITH_LEN("CONSTRAINT_NAME") },
+ { C_STRING_WITH_LEN("CATALOG_NAME") },
+ { C_STRING_WITH_LEN("SCHEMA_NAME") },
+ { C_STRING_WITH_LEN("TABLE_NAME") },
+ { C_STRING_WITH_LEN("COLUMN_NAME") },
+ { C_STRING_WITH_LEN("CURSOR_NAME") },
+ { C_STRING_WITH_LEN("MESSAGE_TEXT") },
+ { C_STRING_WITH_LEN("MYSQL_ERRNO") },
+
+ { C_STRING_WITH_LEN("CONDITION_IDENTIFIER") },
+ { C_STRING_WITH_LEN("CONDITION_NUMBER") },
+ { C_STRING_WITH_LEN("CONNECTION_NAME") },
+ { C_STRING_WITH_LEN("MESSAGE_LENGTH") },
+ { C_STRING_WITH_LEN("MESSAGE_OCTET_LENGTH") },
+ { C_STRING_WITH_LEN("PARAMETER_MODE") },
+ { C_STRING_WITH_LEN("PARAMETER_NAME") },
+ { C_STRING_WITH_LEN("PARAMETER_ORDINAL_POSITION") },
+ { C_STRING_WITH_LEN("RETURNED_SQLSTATE") },
+ { C_STRING_WITH_LEN("ROUTINE_CATALOG") },
+ { C_STRING_WITH_LEN("ROUTINE_NAME") },
+ { C_STRING_WITH_LEN("ROUTINE_SCHEMA") },
+ { C_STRING_WITH_LEN("SERVER_NAME") },
+ { C_STRING_WITH_LEN("SPECIFIC_NAME") },
+ { C_STRING_WITH_LEN("TRIGGER_CATALOG") },
+ { C_STRING_WITH_LEN("TRIGGER_NAME") },
+ { C_STRING_WITH_LEN("TRIGGER_SCHEMA") }
+};
+
+const LEX_STRING Diag_statement_item_names[]=
+{
+ { C_STRING_WITH_LEN("NUMBER") },
+ { C_STRING_WITH_LEN("MORE") },
+ { C_STRING_WITH_LEN("COMMAND_FUNCTION") },
+ { C_STRING_WITH_LEN("COMMAND_FUNCTION_CODE") },
+ { C_STRING_WITH_LEN("DYNAMIC_FUNCTION") },
+ { C_STRING_WITH_LEN("DYNAMIC_FUNCTION_CODE") },
+ { C_STRING_WITH_LEN("ROW_COUNT") },
+ { C_STRING_WITH_LEN("TRANSACTIONS_COMMITTED") },
+ { C_STRING_WITH_LEN("TRANSACTIONS_ROLLED_BACK") },
+ { C_STRING_WITH_LEN("TRANSACTION_ACTIVE") }
+};
+
+Set_signal_information::Set_signal_information()
+{
+ clear();
+}
+
+Set_signal_information::Set_signal_information(
+ const Set_signal_information& set)
+{
+ memcpy(m_item, set.m_item, sizeof(m_item));
+}
+
+void Set_signal_information::clear()
+{
+ memset(m_item, 0, sizeof(m_item));
+}
+
+void Signal_common::assign_defaults(MYSQL_ERROR *cond,
+ bool set_level_code,
+ MYSQL_ERROR::enum_warning_level level,
+ int sqlcode)
+{
+ if (set_level_code)
+ {
+ cond->m_level= level;
+ cond->m_sql_errno= sqlcode;
+ }
+ if (! cond->get_message_text())
+ cond->set_builtin_message_text(ER(sqlcode));
+}
+
+void Signal_common::eval_defaults(THD *thd, MYSQL_ERROR *cond)
+{
+ DBUG_ASSERT(cond);
+
+ const char* sqlstate;
+ bool set_defaults= (m_cond != 0);
+
+ if (set_defaults)
+ {
+ /*
+ SIGNAL is restricted in sql_yacc.yy to only signal SQLSTATE conditions.
+ */
+ DBUG_ASSERT(m_cond->type == sp_cond_type::state);
+ sqlstate= m_cond->sqlstate;
+ cond->set_sqlstate(sqlstate);
+ }
+ else
+ sqlstate= cond->get_sqlstate();
+
+ DBUG_ASSERT(sqlstate);
+ /* SQLSTATE class "00": illegal, rejected in the parser. */
+ DBUG_ASSERT((sqlstate[0] != '0') || (sqlstate[1] != '0'));
+
+ if ((sqlstate[0] == '0') && (sqlstate[1] == '1'))
+ {
+ /* SQLSTATE class "01": warning. */
+ assign_defaults(cond, set_defaults,
+ MYSQL_ERROR::WARN_LEVEL_WARN, ER_SIGNAL_WARN);
+ }
+ else if ((sqlstate[0] == '0') && (sqlstate[1] == '2'))
+ {
+ /* SQLSTATE class "02": not found. */
+ assign_defaults(cond, set_defaults,
+ MYSQL_ERROR::WARN_LEVEL_ERROR, ER_SIGNAL_NOT_FOUND);
+ }
+ else
+ {
+ /* other SQLSTATE classes : error. */
+ assign_defaults(cond, set_defaults,
+ MYSQL_ERROR::WARN_LEVEL_ERROR, ER_SIGNAL_EXCEPTION);
+ }
+}
+
+static bool assign_fixed_string(MEM_ROOT *mem_root,
+ CHARSET_INFO *dst_cs,
+ size_t max_char,
+ String *dst,
+ const String* src)
+{
+ bool truncated;
+ size_t numchars;
+ CHARSET_INFO *src_cs;
+ const char* src_str;
+ const char* src_end;
+ size_t src_len;
+ size_t to_copy;
+ char* dst_str;
+ size_t dst_len;
+ size_t dst_copied;
+ uint32 dummy_offset;
+
+ src_str= src->ptr();
+ if (src_str == NULL)
+ {
+ dst->set((const char*) NULL, 0, dst_cs);
+ return false;
+ }
+
+ src_cs= src->charset();
+ src_len= src->length();
+ src_end= src_str + src_len;
+ numchars= src_cs->cset->numchars(src_cs, src_str, src_end);
+
+ if (numchars <= max_char)
+ {
+ to_copy= src->length();
+ truncated= false;
+ }
+ else
+ {
+ numchars= max_char;
+ to_copy= dst_cs->cset->charpos(dst_cs, src_str, src_end, numchars);
+ truncated= true;
+ }
+
+ if (String::needs_conversion(to_copy, src_cs, dst_cs, & dummy_offset))
+ {
+ dst_len= numchars * dst_cs->mbmaxlen;
+ dst_str= (char*) alloc_root(mem_root, dst_len + 1);
+ if (dst_str)
+ {
+ const char* well_formed_error_pos;
+ const char* cannot_convert_error_pos;
+ const char* from_end_pos;
+
+ dst_copied= well_formed_copy_nchars(dst_cs, dst_str, dst_len,
+ src_cs, src_str, src_len,
+ numchars,
+ & well_formed_error_pos,
+ & cannot_convert_error_pos,
+ & from_end_pos);
+ DBUG_ASSERT(dst_copied <= dst_len);
+ dst_len= dst_copied; /* In case the copy truncated the data */
+ dst_str[dst_copied]= '\0';
+ }
+ }
+ else
+ {
+ dst_len= to_copy;
+ dst_str= (char*) alloc_root(mem_root, dst_len + 1);
+ if (dst_str)
+ {
+ memcpy(dst_str, src_str, to_copy);
+ dst_str[to_copy]= '\0';
+ }
+ }
+ dst->set(dst_str, dst_len, dst_cs);
+
+ return truncated;
+}
+
+static int assign_condition_item(MEM_ROOT *mem_root, const char* name, THD *thd,
+ Item *set, String *ci)
+{
+ char str_buff[(64+1)*4]; /* Room for a null terminated UTF8 String 64 */
+ String str_value(str_buff, sizeof(str_buff), & my_charset_utf8_bin);
+ String *str;
+ bool truncated;
+
+ DBUG_ENTER("assign_condition_item");
+
+ if (set->is_null())
+ {
+ thd->raise_error_printf(ER_WRONG_VALUE_FOR_VAR, name, "NULL");
+ DBUG_RETURN(1);
+ }
+
+ str= set->val_str(& str_value);
+ truncated= assign_fixed_string(mem_root, & my_charset_utf8_bin, 64, ci, str);
+ if (truncated)
+ {
+ if (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES |
+ MODE_STRICT_ALL_TABLES))
+ {
+ thd->raise_error_printf(ER_COND_ITEM_TOO_LONG, name);
+ DBUG_RETURN(1);
+ }
+
+ thd->raise_warning_printf(WARN_COND_ITEM_TRUNCATED, name);
+ }
+
+ DBUG_RETURN(0);
+}
+
+
+int Signal_common::eval_signal_informations(THD *thd, MYSQL_ERROR *cond)
+{
+ struct cond_item_map
+ {
+ enum enum_diag_condition_item_name m_item;
+ String MYSQL_ERROR::*m_member;
+ };
+
+ static cond_item_map map[]=
+ {
+ { DIAG_CLASS_ORIGIN, & MYSQL_ERROR::m_class_origin },
+ { DIAG_SUBCLASS_ORIGIN, & MYSQL_ERROR::m_subclass_origin },
+ { DIAG_CONSTRAINT_CATALOG, & MYSQL_ERROR::m_constraint_catalog },
+ { DIAG_CONSTRAINT_SCHEMA, & MYSQL_ERROR::m_constraint_schema },
+ { DIAG_CONSTRAINT_NAME, & MYSQL_ERROR::m_constraint_name },
+ { DIAG_CATALOG_NAME, & MYSQL_ERROR::m_catalog_name },
+ { DIAG_SCHEMA_NAME, & MYSQL_ERROR::m_schema_name },
+ { DIAG_TABLE_NAME, & MYSQL_ERROR::m_table_name },
+ { DIAG_COLUMN_NAME, & MYSQL_ERROR::m_column_name },
+ { DIAG_CURSOR_NAME, & MYSQL_ERROR::m_cursor_name }
+ };
+
+ Item *set;
+ String str_value;
+ String *str;
+ int i;
+ uint j;
+ int result= 1;
+ enum enum_diag_condition_item_name item_enum;
+ String *member;
+ const LEX_STRING *name;
+
+ DBUG_ENTER("Signal_common::eval_signal_informations");
+
+ for (i= FIRST_DIAG_SET_PROPERTY;
+ i <= LAST_DIAG_SET_PROPERTY;
+ i++)
+ {
+ set= m_set_signal_information.m_item[i];
+ if (set)
+ {
+ if (! set->fixed)
+ {
+ if (set->fix_fields(thd, & set))
+ goto end;
+ m_set_signal_information.m_item[i]= set;
+ }
+ }
+ }
+
+ /*
+ Generically assign all the UTF8 String 64 condition items
+ described in the map.
+ */
+ for (j= 0; j < array_elements(map); j++)
+ {
+ item_enum= map[j].m_item;
+ set= m_set_signal_information.m_item[item_enum];
+ if (set != NULL)
+ {
+ member= & (cond->* map[j].m_member);
+ name= & Diag_condition_item_names[item_enum];
+ if (assign_condition_item(cond->m_mem_root, name->str, thd, set, member))
+ goto end;
+ }
+ }
+
+ /*
+ Assign the remaining attributes.
+ */
+
+ set= m_set_signal_information.m_item[DIAG_MESSAGE_TEXT];
+ if (set != NULL)
+ {
+ if (set->is_null())
+ {
+ thd->raise_error_printf(ER_WRONG_VALUE_FOR_VAR,
+ "MESSAGE_TEXT", "NULL");
+ goto end;
+ }
+ /*
+ Enforce that SET MESSAGE_TEXT = <value> evaluates the value
+ as VARCHAR(128) CHARACTER SET UTF8.
+ */
+ bool truncated;
+ String utf8_text;
+ str= set->val_str(& str_value);
+ truncated= assign_fixed_string(thd->mem_root, & my_charset_utf8_bin, 128,
+ & utf8_text, str);
+ if (truncated)
+ {
+ if (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES |
+ MODE_STRICT_ALL_TABLES))
+ {
+ thd->raise_error_printf(ER_COND_ITEM_TOO_LONG,
+ "MESSAGE_TEXT");
+ goto end;
+ }
+
+ thd->raise_warning_printf(WARN_COND_ITEM_TRUNCATED,
+ "MESSAGE_TEXT");
+ }
+
+ /*
+ See the comments
+ "Design notes about MYSQL_ERROR::m_message_text."
+ in file sql_error.cc
+ */
+ String converted_text;
+ converted_text.set_charset(error_message_charset_info);
+ converted_text.append(utf8_text.ptr(), utf8_text.length(),
+ utf8_text.charset());
+ cond->set_builtin_message_text(converted_text.c_ptr_safe());
+ }
+
+ set= m_set_signal_information.m_item[DIAG_MYSQL_ERRNO];
+ if (set != NULL)
+ {
+ if (set->is_null())
+ {
+ thd->raise_error_printf(ER_WRONG_VALUE_FOR_VAR,
+ "MYSQL_ERRNO", "NULL");
+ goto end;
+ }
+ longlong code= set->val_int();
+ if ((code <= 0) || (code > MAX_MYSQL_ERRNO))
+ {
+ str= set->val_str(& str_value);
+ thd->raise_error_printf(ER_WRONG_VALUE_FOR_VAR,
+ "MYSQL_ERRNO", str->c_ptr_safe());
+ goto end;
+ }
+ cond->m_sql_errno= (int) code;
+ }
+
+ /*
+ The various item->val_xxx() methods don't return an error code,
+ but flag thd in case of failure.
+ */
+ if (! thd->is_error())
+ result= 0;
+
+end:
+ for (i= FIRST_DIAG_SET_PROPERTY;
+ i <= LAST_DIAG_SET_PROPERTY;
+ i++)
+ {
+ set= m_set_signal_information.m_item[i];
+ if (set)
+ {
+ if (set->fixed)
+ set->cleanup();
+ }
+ }
+
+ DBUG_RETURN(result);
+}
+
+bool Signal_common::raise_condition(THD *thd, MYSQL_ERROR *cond)
+{
+ bool result= TRUE;
+
+ DBUG_ENTER("Signal_common::raise_condition");
+
+ DBUG_ASSERT(m_lex->query_tables == NULL);
+
+ eval_defaults(thd, cond);
+ if (eval_signal_informations(thd, cond))
+ DBUG_RETURN(result);
+
+ /* SIGNAL should not signal WARN_LEVEL_NOTE */
+ DBUG_ASSERT((cond->m_level == MYSQL_ERROR::WARN_LEVEL_WARN) ||
+ (cond->m_level == MYSQL_ERROR::WARN_LEVEL_ERROR));
+
+ MYSQL_ERROR *raised= NULL;
+ raised= thd->raise_condition(cond->get_sql_errno(),
+ cond->get_sqlstate(),
+ cond->get_level(),
+ cond->get_message_text());
+ if (raised)
+ raised->copy_opt_attributes(cond);
+
+ if (cond->m_level == MYSQL_ERROR::WARN_LEVEL_WARN)
+ {
+ my_ok(thd);
+ result= FALSE;
+ }
+
+ DBUG_RETURN(result);
+}
+
+bool Signal_statement::execute(THD *thd)
+{
+ bool result= TRUE;
+ MYSQL_ERROR cond(thd->mem_root);
+
+ DBUG_ENTER("Signal_statement::execute");
+
+ thd->stmt_da->reset_diagnostics_area();
+ thd->row_count_func= 0;
+ thd->warning_info->clear_warning_info(thd->query_id);
+
+ result= raise_condition(thd, &cond);
+
+ DBUG_RETURN(result);
+}
+
+
+bool Resignal_statement::execute(THD *thd)
+{
+ MYSQL_ERROR *signaled;
+ int result= TRUE;
+
+ DBUG_ENTER("Resignal_statement::execute");
+
+ thd->warning_info->m_warn_id= thd->query_id;
+
+ if (! thd->spcont || ! (signaled= thd->spcont->raised_condition()))
+ {
+ thd->raise_error(ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER);
+ DBUG_RETURN(result);
+ }
+
+ if (m_cond == NULL)
+ {
+ /* RESIGNAL without signal_value */
+ result= raise_condition(thd, signaled);
+ DBUG_RETURN(result);
+ }
+
+ /* RESIGNAL with signal_value */
+
+ /* Make room for 2 conditions */
+ thd->warning_info->reserve_space(thd, 2);
+
+ MYSQL_ERROR *raised= NULL;
+ raised= thd->raise_condition_no_handler(signaled->get_sql_errno(),
+ signaled->get_sqlstate(),
+ signaled->get_level(),
+ signaled->get_message_text());
+ if (raised)
+ raised->copy_opt_attributes(signaled);
+
+ result= raise_condition(thd, signaled);
+
+ DBUG_RETURN(result);
+}
+
diff --git a/sql/sql_signal.h b/sql/sql_signal.h
new file mode 100644
index 00000000000..c9c1517f4ad
--- /dev/null
+++ b/sql/sql_signal.h
@@ -0,0 +1,152 @@
+/* Copyright (C) 2008 Sun Microsystems, Inc
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef SQL_SIGNAL_H
+#define SQL_SIGNAL_H
+
+/**
+ Signal_common represents the common properties of the SIGNAL and RESIGNAL
+ statements.
+*/
+class Signal_common : public Sql_statement
+{
+protected:
+ /**
+ Constructor.
+ @param lex the LEX structure for this statement.
+ @param cond the condition signaled if any, or NULL.
+ @param set collection of signal condition item assignments.
+ */
+ Signal_common(LEX *lex,
+ const sp_cond_type_t *cond,
+ const Set_signal_information& set)
+ : Sql_statement(lex),
+ m_cond(cond),
+ m_set_signal_information(set)
+ {}
+
+ virtual ~Signal_common()
+ {}
+
+ /**
+ Assign the condition items 'MYSQL_ERRNO', 'level' and 'MESSAGE_TEXT'
+ default values of a condition.
+ @param cond the condition to update.
+ @param set_level_code true if 'level' and 'MYSQL_ERRNO' needs to be overwritten
+ @param level the level to assign
+ @param sqlcode the sql code to assign
+ */
+ static void assign_defaults(MYSQL_ERROR *cond,
+ bool set_level_code,
+ MYSQL_ERROR::enum_warning_level level,
+ int sqlcode);
+
+ /**
+ Evaluate the condition items 'SQLSTATE', 'MYSQL_ERRNO', 'level' and 'MESSAGE_TEXT'
+ default values for this statement.
+ @param thd the current thread.
+ @param cond the condition to update.
+ */
+ void eval_defaults(THD *thd, MYSQL_ERROR *cond);
+
+ /**
+ Evaluate each signal condition items for this statement.
+ @param thd the current thread.
+ @param cond the condition to update.
+ @return 0 on success.
+ */
+ int eval_signal_informations(THD *thd, MYSQL_ERROR *cond);
+
+ /**
+ Raise a SQL condition.
+ @param thd the current thread.
+ @param cond the condition to raise.
+ @return false on success.
+ */
+ bool raise_condition(THD *thd, MYSQL_ERROR *cond);
+
+ /**
+ The condition to signal or resignal.
+ This member is optional and can be NULL (RESIGNAL).
+ */
+ const sp_cond_type_t *m_cond;
+
+ /**
+ Collection of 'SET item = value' assignments in the
+ SIGNAL/RESIGNAL statement.
+ */
+ Set_signal_information m_set_signal_information;
+};
+
+/**
+ Signal_statement represents a SIGNAL statement.
+*/
+class Signal_statement : public Signal_common
+{
+public:
+ /**
+ Constructor, used to represent a SIGNAL statement.
+ @param lex the LEX structure for this statement.
+ @param cond the SQL condition to signal (required).
+ @param set the collection of signal informations to signal.
+ */
+ Signal_statement(LEX *lex,
+ const sp_cond_type_t *cond,
+ const Set_signal_information& set)
+ : Signal_common(lex, cond, set)
+ {}
+
+ virtual ~Signal_statement()
+ {}
+
+ /**
+ Execute a SIGNAL statement at runtime.
+ @param thd the current thread.
+ @return false on success.
+ */
+ virtual bool execute(THD *thd);
+};
+
+/**
+ Resignal_statement represents a RESIGNAL statement.
+*/
+class Resignal_statement : public Signal_common
+{
+public:
+ /**
+ Constructor, used to represent a RESIGNAL statement.
+ @param lex the LEX structure for this statement.
+ @param cond the SQL condition to resignal (optional, may be NULL).
+ @param set the collection of signal informations to resignal.
+ */
+ Resignal_statement(LEX *lex,
+ const sp_cond_type_t *cond,
+ const Set_signal_information& set)
+ : Signal_common(lex, cond, set)
+ {}
+
+ virtual ~Resignal_statement()
+ {}
+
+ /**
+ Execute a RESIGNAL statement at runtime.
+ @param thd the current thread.
+ @return 0 on success.
+ */
+ virtual bool execute(THD *thd);
+};
+
+#endif
+
diff --git a/sql/sql_sort.h b/sql/sql_sort.h
index 1e9322f7f5b..102b3ef0a11 100644
--- a/sql/sql_sort.h
+++ b/sql/sql_sort.h
@@ -1,3 +1,6 @@
+#ifndef SQL_SORT_INCLUDED
+#define SQL_SORT_INCLUDED
+
/* Copyright (C) 2000 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -87,3 +90,5 @@ int merge_buffers(SORTPARAM *param,IO_CACHE *from_file,
BUFFPEK *lastbuff,BUFFPEK *Fb,
BUFFPEK *Tb,int flag);
void reuse_freed_buff(QUEUE *queue, BUFFPEK *reuse, uint key_length);
+
+#endif /* SQL_SORT_INCLUDED */
diff --git a/sql/sql_string.cc b/sql/sql_string.cc
index 7c9793b273b..593450cacd5 100644
--- a/sql/sql_string.cc
+++ b/sql/sql_string.cc
@@ -794,10 +794,11 @@ String *copy_if_not_alloced(String *to,String *from,uint32 from_length)
*/
-uint32
-copy_and_convert(char *to, uint32 to_length, CHARSET_INFO *to_cs,
- const char *from, uint32 from_length, CHARSET_INFO *from_cs,
- uint *errors)
+static uint32
+copy_and_convert_extended(char *to, uint32 to_length, CHARSET_INFO *to_cs,
+ const char *from, uint32 from_length,
+ CHARSET_INFO *from_cs,
+ uint *errors)
{
int cnvres;
my_wc_t wc;
@@ -849,6 +850,65 @@ outp:
}
+/*
+ Optimized for quick copying of ASCII characters in the range 0x00..0x7F.
+*/
+uint32
+copy_and_convert(char *to, uint32 to_length, CHARSET_INFO *to_cs,
+ const char *from, uint32 from_length, CHARSET_INFO *from_cs,
+ uint *errors)
+{
+ /*
+ If any of the character sets is not ASCII compatible,
+ immediately switch to slow mb_wc->wc_mb method.
+ */
+ if ((to_cs->state | from_cs->state) & MY_CS_NONASCII)
+ return copy_and_convert_extended(to, to_length, to_cs,
+ from, from_length, from_cs, errors);
+
+ uint32 length= min(to_length, from_length), length2= length;
+
+#if defined(__i386__)
+ /*
+ Special loop for i386, it allows to refer to a
+ non-aligned memory block as UINT32, which makes
+ it possible to copy four bytes at once. This
+ gives about 10% performance improvement comparing
+ to byte-by-byte loop.
+ */
+ for ( ; length >= 4; length-= 4, from+= 4, to+= 4)
+ {
+ if ((*(uint32*)from) & 0x80808080)
+ break;
+ *((uint32*) to)= *((const uint32*) from);
+ }
+#endif
+
+ for (; ; *to++= *from++, length--)
+ {
+ if (!length)
+ {
+ *errors= 0;
+ return length2;
+ }
+ if (*((unsigned char*) from) > 0x7F) /* A non-ASCII character */
+ {
+ uint32 copied_length= length2 - length;
+ to_length-= copied_length;
+ from_length-= copied_length;
+ return copied_length + copy_and_convert_extended(to, to_length,
+ to_cs,
+ from, from_length,
+ from_cs,
+ errors);
+ }
+ }
+
+ DBUG_ASSERT(FALSE); // Should never get to here
+ return 0; // Make compiler happy
+}
+
+
/**
Copy string with HEX-encoding of "bad" characters.
diff --git a/sql/sql_string.h b/sql/sql_string.h
index d62908e5d66..75dc1163eec 100644
--- a/sql/sql_string.h
+++ b/sql/sql_string.h
@@ -1,3 +1,6 @@
+#ifndef SQL_STRING_INCLUDED
+#define SQL_STRING_INCLUDED
+
/* Copyright (C) 2000 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -120,6 +123,11 @@ public:
(void) realloc(str_length);
return Ptr;
}
+ LEX_STRING lex_string() const
+ {
+ LEX_STRING lex_string = { (char*) ptr(), length() };
+ return lex_string;
+ }
void set(String &str,uint32 offset,uint32 arg_length)
{
@@ -389,3 +397,5 @@ static inline bool check_if_only_end_space(CHARSET_INFO *cs, char *str,
{
return str+ cs->cset->scan(cs, str, end, MY_SEQ_SPACES) == end;
}
+
+#endif /* SQL_STRING_INCLUDED */
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 869ae42c98c..b648a9b933e 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1585,7 +1585,9 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
{
if (!(part_syntax_buf= generate_partition_syntax(part_info,
&syntax_len,
- TRUE, TRUE)))
+ TRUE, TRUE,
+ lpt->create_info,
+ lpt->alter_info)))
{
DBUG_RETURN(TRUE);
}
@@ -1677,7 +1679,9 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
char *tmp_part_syntax_str;
if (!(part_syntax_buf= generate_partition_syntax(part_info,
&syntax_len,
- TRUE, TRUE)))
+ TRUE, TRUE,
+ lpt->create_info,
+ lpt->alter_info)))
{
error= 1;
goto err;
@@ -2056,6 +2060,12 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
}
DBUG_PRINT("table", ("table: 0x%lx s: 0x%lx", (long) table->table,
table->table ? (long) table->table->s : (long) -1));
+
+ DBUG_EXECUTE_IF("bug43138",
+ my_printf_error(ER_BAD_TABLE_ERROR,
+ ER(ER_BAD_TABLE_ERROR), MYF(0),
+ table->table_name););
+
}
/*
It's safe to unlock LOCK_open: we have an exclusive lock
@@ -2269,17 +2279,19 @@ bool check_duplicates_in_interval(const char *set_or_name,
tmp.count--;
if (find_type2(&tmp, (const char*)*cur_value, *cur_length, cs))
{
+ THD *thd= current_thd;
+ ErrConvString err(*cur_value, *cur_length, cs);
if ((current_thd->variables.sql_mode &
(MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES)))
{
my_error(ER_DUPLICATED_VALUE_IN_TYPE, MYF(0),
- name,*cur_value,set_or_name);
+ name, err.ptr(), set_or_name);
return 1;
}
- push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_NOTE,
- ER_DUPLICATED_VALUE_IN_TYPE,
- ER(ER_DUPLICATED_VALUE_IN_TYPE),
- name,*cur_value,set_or_name);
+ push_warning_printf(thd,MYSQL_ERROR::WARN_LEVEL_NOTE,
+ ER_DUPLICATED_VALUE_IN_TYPE,
+ ER(ER_DUPLICATED_VALUE_IN_TYPE),
+ name, err.ptr(), set_or_name);
(*dup_val_count)++;
}
}
@@ -2496,6 +2508,39 @@ int prepare_create_field(Create_field *sql_field,
DBUG_RETURN(0);
}
+
+/*
+ Get character set from field object generated by parser using
+ default values when not set.
+
+ SYNOPSIS
+ get_sql_field_charset()
+ sql_field The sql_field object
+ create_info Info generated by parser
+
+ RETURN VALUES
+ cs Character set
+*/
+
+CHARSET_INFO* get_sql_field_charset(Create_field *sql_field,
+ HA_CREATE_INFO *create_info)
+{
+ CHARSET_INFO *cs= sql_field->charset;
+
+ if (!cs)
+ cs= create_info->default_table_charset;
+ /*
+ table_charset is set only in ALTER TABLE t1 CONVERT TO CHARACTER SET csname
+ if we want change character set for all varchar/char columns.
+ But the table charset must not affect the BLOB fields, so don't
+ allow to change my_charset_bin to somethig else.
+ */
+ if (create_info->table_charset && cs != &my_charset_bin)
+ cs= create_info->table_charset;
+ return cs;
+}
+
+
/*
Preparation for table creation
@@ -2559,18 +2604,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
executing a prepared statement for the second time.
*/
sql_field->length= sql_field->char_length;
- if (!sql_field->charset)
- sql_field->charset= create_info->default_table_charset;
- /*
- table_charset is set in ALTER TABLE if we want change character set
- for all varchar/char columns.
- But the table charset must not affect the BLOB fields, so don't
- allow to change my_charset_bin to somethig else.
- */
- if (create_info->table_charset && sql_field->charset != &my_charset_bin)
- sql_field->charset= create_info->table_charset;
-
- save_cs= sql_field->charset;
+ save_cs= sql_field->charset= get_sql_field_charset(sql_field,
+ create_info);
if ((sql_field->flags & BINCMP_FLAG) &&
!(sql_field->charset= get_charset_by_csname(sql_field->charset->csname,
MY_CS_BINSORT,MYF(0))))
@@ -2665,7 +2700,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
interval->type_lengths[i],
comma_buf, comma_length, NULL, 0))
{
- my_error(ER_ILLEGAL_VALUE_FOR_TYPE, MYF(0), "set", tmp->ptr());
+ ErrConvString err(tmp->ptr(), tmp->length(), cs);
+ my_error(ER_ILLEGAL_VALUE_FOR_TYPE, MYF(0), "set", err.ptr());
DBUG_RETURN(TRUE);
}
}
@@ -2879,9 +2915,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
while ((key=key_iterator++))
{
- DBUG_PRINT("info", ("key name: '%s' type: %d", key->name ? key->name :
+ DBUG_PRINT("info", ("key name: '%s' type: %d", key->name.str ? key->name.str :
"(none)" , key->type));
- LEX_STRING key_name_str;
if (key->type == Key::FOREIGN_KEY)
{
fk_key_count++;
@@ -2890,7 +2925,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
fk_key->ref_columns.elements != fk_key->columns.elements)
{
my_error(ER_WRONG_FK_DEF, MYF(0),
- (fk_key->name ? fk_key->name : "foreign key without name"),
+ (fk_key->name.str ? fk_key->name.str :
+ "foreign key without name"),
ER(ER_KEY_REF_DO_NOT_MATCH_TABLE_REF));
DBUG_RETURN(TRUE);
}
@@ -2903,12 +2939,10 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
my_error(ER_TOO_MANY_KEY_PARTS,MYF(0),tmp);
DBUG_RETURN(TRUE);
}
- key_name_str.str= (char*) key->name;
- key_name_str.length= key->name ? strlen(key->name) : 0;
- if (check_string_char_length(&key_name_str, "", NAME_CHAR_LEN,
+ if (check_string_char_length(&key->name, "", NAME_CHAR_LEN,
system_charset_info, 1))
{
- my_error(ER_TOO_LONG_IDENT, MYF(0), key->name);
+ my_error(ER_TOO_LONG_IDENT, MYF(0), key->name.str);
DBUG_RETURN(TRUE);
}
key_iterator2.rewind ();
@@ -2922,7 +2956,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
Then we do not need the generated shorter key.
*/
if ((key2->type != Key::FOREIGN_KEY &&
- key2->name != ignore_key &&
+ key2->name.str != ignore_key &&
!foreign_key_prefix(key, key2)))
{
/* TODO: issue warning message */
@@ -2930,10 +2964,10 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if (!key2->generated ||
(key->generated && key->columns.elements <
key2->columns.elements))
- key->name= ignore_key;
+ key->name.str= ignore_key;
else
{
- key2->name= ignore_key;
+ key2->name.str= ignore_key;
key_parts-= key2->columns.elements;
(*key_count)--;
}
@@ -2941,14 +2975,14 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
}
}
}
- if (key->name != ignore_key)
+ if (key->name.str != ignore_key)
key_parts+=key->columns.elements;
else
(*key_count)--;
- if (key->name && !tmp_table && (key->type != Key::PRIMARY) &&
- !my_strcasecmp(system_charset_info,key->name,primary_key_name))
+ if (key->name.str && !tmp_table && (key->type != Key::PRIMARY) &&
+ !my_strcasecmp(system_charset_info, key->name.str, primary_key_name))
{
- my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), key->name);
+ my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), key->name.str);
DBUG_RETURN(TRUE);
}
}
@@ -2971,12 +3005,12 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
uint key_length=0;
Key_part_spec *column;
- if (key->name == ignore_key)
+ if (key->name.str == ignore_key)
{
/* ignore redundant keys */
do
key=key_iterator++;
- while (key && key->name == ignore_key);
+ while (key && key->name.str == ignore_key);
if (!key)
break;
}
@@ -3089,22 +3123,22 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
field=0;
while ((sql_field=it++) &&
my_strcasecmp(system_charset_info,
- column->field_name,
+ column->field_name.str,
sql_field->field_name))
field++;
if (!sql_field)
{
- my_error(ER_KEY_COLUMN_DOES_NOT_EXITS, MYF(0), column->field_name);
+ my_error(ER_KEY_COLUMN_DOES_NOT_EXITS, MYF(0), column->field_name.str);
DBUG_RETURN(TRUE);
}
while ((dup_column= cols2++) != column)
{
if (!my_strcasecmp(system_charset_info,
- column->field_name, dup_column->field_name))
+ column->field_name.str, dup_column->field_name.str))
{
my_printf_error(ER_DUP_FIELDNAME,
ER(ER_DUP_FIELDNAME),MYF(0),
- column->field_name);
+ column->field_name.str);
DBUG_RETURN(TRUE);
}
}
@@ -3118,7 +3152,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
sql_field->charset->mbminlen > 1 || // ucs2 doesn't work yet
(ft_key_charset && sql_field->charset != ft_key_charset))
{
- my_error(ER_BAD_FT_COLUMN, MYF(0), column->field_name);
+ my_error(ER_BAD_FT_COLUMN, MYF(0), column->field_name.str);
DBUG_RETURN(-1);
}
ft_key_charset=sql_field->charset;
@@ -3146,7 +3180,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
{
if (!(file->ha_table_flags() & HA_CAN_INDEX_BLOBS))
{
- my_error(ER_BLOB_USED_AS_KEY, MYF(0), column->field_name);
+ my_error(ER_BLOB_USED_AS_KEY, MYF(0), column->field_name.str);
DBUG_RETURN(TRUE);
}
if (f_is_geom(sql_field->pack_flag) && sql_field->geom_type ==
@@ -3154,7 +3188,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
column->length= 25;
if (!column->length)
{
- my_error(ER_BLOB_KEY_WITHOUT_LENGTH, MYF(0), column->field_name);
+ my_error(ER_BLOB_KEY_WITHOUT_LENGTH, MYF(0), column->field_name.str);
DBUG_RETURN(TRUE);
}
}
@@ -3185,7 +3219,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
key_info->flags|= HA_NULL_PART_KEY;
if (!(file->ha_table_flags() & HA_NULL_IN_KEY))
{
- my_error(ER_NULL_COLUMN_IN_INDEX, MYF(0), column->field_name);
+ my_error(ER_NULL_COLUMN_IN_INDEX, MYF(0), column->field_name.str);
DBUG_RETURN(TRUE);
}
if (key->type == Key::SPATIAL)
@@ -3235,13 +3269,21 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
}
}
else if (!f_is_geom(sql_field->pack_flag) &&
- (column->length > length ||
- !Field::type_can_have_key_part (sql_field->sql_type) ||
- ((f_is_packed(sql_field->pack_flag) ||
- ((file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS) &&
- (key_info->flags & HA_NOSAME))) &&
- column->length != length)))
- {
+ ((column->length > length &&
+ !Field::type_can_have_key_part (sql_field->sql_type)) ||
+ ((f_is_packed(sql_field->pack_flag) ||
+ ((file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS) &&
+ (key_info->flags & HA_NOSAME))) &&
+ column->length != length)))
+ {
+ /* Catch invalid uses of partial keys.
+ A key is identified as 'partial' if column->length != length.
+ A partial key is invalid if they data type does
+ not allow it, or the field is packed (as in MyISAM),
+ or the storage engine doesn't allow prefixed search and
+ the key is primary key.
+ */
+
my_message(ER_WRONG_SUB_KEY, ER(ER_WRONG_SUB_KEY), MYF(0));
DBUG_RETURN(TRUE);
}
@@ -3250,7 +3292,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
}
else if (length == 0)
{
- my_error(ER_WRONG_KEY_COLUMN, MYF(0), column->field_name);
+ my_error(ER_WRONG_KEY_COLUMN, MYF(0), column->field_name.str);
DBUG_RETURN(TRUE);
}
if (length > file->max_key_part_length() && key->type != Key::FULLTEXT)
@@ -3308,7 +3350,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
key_name=primary_key_name;
primary_key=1;
}
- else if (!(key_name = key->name))
+ else if (!(key_name= key->name.str))
key_name=make_unique_key_name(sql_field->field_name,
*key_info_buffer, key_info);
if (check_if_keyname_exists(key_name, *key_info_buffer, key_info))
@@ -3619,6 +3661,9 @@ bool mysql_create_table_no_lock(THD *thd,
}
if (check_engine(thd, table_name, create_info))
DBUG_RETURN(TRUE);
+
+ set_table_default_charset(thd, create_info, (char*) db);
+
db_options= create_info->table_options;
if (create_info->row_type == ROW_TYPE_DYNAMIC)
db_options|=HA_OPTION_PACK_RECORD;
@@ -3712,7 +3757,7 @@ bool mysql_create_table_no_lock(THD *thd,
ha_resolve_storage_engine_name(part_info->default_engine_type),
ha_resolve_storage_engine_name(create_info->db_type)));
if (part_info->check_partition_info(thd, &engine_type, file,
- create_info, TRUE))
+ create_info, FALSE))
goto err;
part_info->default_engine_type= engine_type;
@@ -3722,7 +3767,9 @@ bool mysql_create_table_no_lock(THD *thd,
*/
if (!(part_syntax_buf= generate_partition_syntax(part_info,
&syntax_len,
- TRUE, TRUE)))
+ TRUE, TRUE,
+ create_info,
+ alter_info)))
goto err;
part_info->part_info_string= part_syntax_buf;
part_info->part_info_len= syntax_len;
@@ -3748,9 +3795,9 @@ bool mysql_create_table_no_lock(THD *thd,
creates a proper .par file. The current part_info object is
only used to create the frm-file and .par-file.
*/
- if (part_info->use_default_no_partitions &&
- part_info->no_parts &&
- (int)part_info->no_parts !=
+ if (part_info->use_default_num_partitions &&
+ part_info->num_parts &&
+ (int)part_info->num_parts !=
file->get_default_no_partitions(create_info))
{
uint i;
@@ -3761,13 +3808,13 @@ bool mysql_create_table_no_lock(THD *thd,
(part_it++)->part_state= PART_TO_BE_DROPPED;
}
else if (part_info->is_sub_partitioned() &&
- part_info->use_default_no_subpartitions &&
- part_info->no_subparts &&
- (int)part_info->no_subparts !=
+ part_info->use_default_num_subpartitions &&
+ part_info->num_subparts &&
+ (int)part_info->num_subparts !=
file->get_default_no_partitions(create_info))
{
DBUG_ASSERT(thd->lex->sql_command != SQLCOM_CREATE_TABLE);
- part_info->no_subparts= file->get_default_no_partitions(create_info);
+ part_info->num_subparts= file->get_default_no_partitions(create_info);
}
}
else if (create_info->db_type != engine_type)
@@ -3789,8 +3836,6 @@ bool mysql_create_table_no_lock(THD *thd,
}
#endif
- set_table_default_charset(thd, create_info, (char*) db);
-
if (mysql_prepare_create_table(thd, create_info, alter_info,
internal_tmp_table,
&db_options, file,
@@ -3846,7 +3891,7 @@ bool mysql_create_table_no_lock(THD *thd,
Then she could create the table. This case is pretty obscure and
therefore we don't introduce a new error message only for it.
*/
- if (get_cached_table_share(db, alias))
+ if (get_cached_table_share(db, table_name))
{
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), table_name);
goto unlock_and_end;
@@ -3980,7 +4025,7 @@ bool mysql_create_table(THD *thd, const char *db, const char *table_name,
/* Wait for any database locks */
pthread_mutex_lock(&LOCK_lock_db);
while (!thd->killed &&
- hash_search(&lock_db_cache,(uchar*) db, strlen(db)))
+ my_hash_search(&lock_db_cache,(uchar*) db, strlen(db)))
{
wait_for_condition(thd, &LOCK_lock_db, &COND_refresh);
pthread_mutex_lock(&LOCK_lock_db);
@@ -4520,7 +4565,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
item->maybe_null = 1;
field_list.push_back(item = new Item_empty_string("Msg_text", 255));
item->maybe_null = 1;
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
@@ -4569,6 +4614,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
/*
Set up which partitions that should be processed
if ALTER TABLE t ANALYZE/CHECK/OPTIMIZE/REPAIR PARTITION ..
+ CACHE INDEX/LOAD INDEX for specified partitions
*/
Alter_info *alter_info= &lex->alter_info;
@@ -4579,11 +4625,11 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
DBUG_RETURN(TRUE);
}
- uint no_parts_found;
- uint no_parts_opt= alter_info->partition_names.elements;
- no_parts_found= set_part_state(alter_info, table->table->part_info,
- PART_CHANGED);
- if (no_parts_found != no_parts_opt &&
+ uint num_parts_found;
+ uint num_parts_opt= alter_info->partition_names.elements;
+ num_parts_found= set_part_state(alter_info, table->table->part_info,
+ PART_ADMIN);
+ if (num_parts_found != num_parts_opt &&
(!(alter_info->flags & ALTER_ALL_PARTITION)))
{
char buff[FN_REFLEN + MYSQL_ERRMSG_SIZE];
@@ -4640,17 +4686,17 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
if (!table->table)
{
DBUG_PRINT("admin", ("open table failed"));
- if (!thd->warn_list.elements)
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ if (thd->warning_info->is_empty())
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_CHECK_NO_SUCH_TABLE, ER(ER_CHECK_NO_SUCH_TABLE));
/* if it was a view will check md5 sum */
if (table->view &&
view_checksum(thd, table) == HA_ADMIN_WRONG_CHECKSUM)
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_VIEW_CHECKSUM, ER(ER_VIEW_CHECKSUM));
- if (thd->main_da.is_error() &&
- (thd->main_da.sql_errno() == ER_NO_SUCH_TABLE ||
- thd->main_da.sql_errno() == ER_FILE_NOT_FOUND))
+ if (thd->stmt_da->is_error() &&
+ (thd->stmt_da->sql_errno() == ER_NO_SUCH_TABLE ||
+ thd->stmt_da->sql_errno() == ER_FILE_NOT_FOUND))
/* A missing table is just issued as a failed command */
result_code= HA_ADMIN_FAILED;
else
@@ -4692,7 +4738,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
table->table=0; // For query cache
if (protocol->write())
goto err;
- thd->main_da.reset_diagnostics_area();
+ thd->stmt_da->reset_diagnostics_area();
continue;
/* purecov: end */
}
@@ -4752,8 +4798,8 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
we will store the error message in a result set row
and then clear.
*/
- if (thd->main_da.is_ok())
- thd->main_da.reset_diagnostics_area();
+ if (thd->stmt_da->is_ok())
+ thd->stmt_da->reset_diagnostics_area();
goto send_result;
}
}
@@ -4767,21 +4813,21 @@ send_result:
lex->cleanup_after_one_table_open();
thd->clear_error(); // these errors shouldn't get client
{
- List_iterator_fast<MYSQL_ERROR> it(thd->warn_list);
+ List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list());
MYSQL_ERROR *err;
while ((err= it++))
{
protocol->prepare_for_resend();
protocol->store(table_name, system_charset_info);
protocol->store((char*) operator_name, system_charset_info);
- protocol->store(warning_level_names[err->level].str,
- warning_level_names[err->level].length,
+ protocol->store(warning_level_names[err->get_level()].str,
+ warning_level_names[err->get_level()].length,
system_charset_info);
- protocol->store(err->msg, system_charset_info);
+ protocol->store(err->get_message_text(), system_charset_info);
if (protocol->write())
goto err;
}
- mysql_reset_errors(thd, true);
+ thd->warning_info->clear_warning_info(thd->query_id);
}
protocol->prepare_for_resend();
protocol->store(table_name, system_charset_info);
@@ -4876,8 +4922,8 @@ send_result_message:
we will store the error message in a result set row
and then clear.
*/
- if (thd->main_da.is_ok())
- thd->main_da.reset_diagnostics_area();
+ if (thd->stmt_da->is_ok())
+ thd->stmt_da->reset_diagnostics_area();
ha_autocommit_or_rollback(thd, 0);
close_thread_tables(thd);
if (!result_code) // recreation went ok
@@ -4895,7 +4941,7 @@ send_result_message:
DBUG_ASSERT(thd->is_error());
if (thd->is_error())
{
- const char *err_msg= thd->main_da.message();
+ const char *err_msg= thd->stmt_da->message();
if (!thd->vio_ok())
{
sql_print_error("%s", err_msg);
@@ -5426,7 +5472,7 @@ binlog:
*/
if (!table->view)
{
- IF_DBUG(int result=)
+ int result __attribute__((unused))=
store_create_info(thd, table, &query,
create_info, FALSE /* show_database */);
@@ -6245,6 +6291,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
}
key_part_length /= key_part->field->charset()->mbmaxlen;
key_parts.push_back(new Key_part_spec(cfield->field_name,
+ strlen(cfield->field_name),
key_part_length));
}
if (key_parts.elements)
@@ -6274,7 +6321,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
else
key_type= Key::MULTIPLE;
- key= new Key(key_type, key_name,
+ key= new Key(key_type, key_name, strlen(key_name),
&key_create_info,
test(key_info->flags & HA_GENERATED_KEY),
key_parts);
@@ -6287,10 +6334,10 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
{
if (key->type != Key::FOREIGN_KEY)
new_key_list.push_back(key);
- if (key->name &&
- !my_strcasecmp(system_charset_info,key->name,primary_key_name))
+ if (key->name.str &&
+ !my_strcasecmp(system_charset_info, key->name.str, primary_key_name))
{
- my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), key->name);
+ my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), key->name.str);
goto err;
}
}
@@ -7489,7 +7536,7 @@ view_err:
end_temporary:
my_snprintf(tmp_name, sizeof(tmp_name), ER(ER_INSERT_INFO),
(ulong) (copied + deleted), (ulong) deleted,
- (ulong) thd->cuted_fields);
+ (ulong) thd->warning_info->statement_warn_count());
my_ok(thd, copied + deleted, 0L, tmp_name);
thd->some_tables_deleted=0;
DBUG_RETURN(FALSE);
@@ -7513,7 +7560,8 @@ err:
the table to be altered isn't empty.
Report error here.
*/
- if (alter_info->error_if_not_empty && thd->row_count)
+ if (alter_info->error_if_not_empty &&
+ thd->warning_info->current_row_for_warning())
{
const char *f_val= 0;
enum enum_mysql_timestamp_type t_type= MYSQL_TIMESTAMP_DATE;
@@ -7534,7 +7582,7 @@ err:
}
bool save_abort_on_warning= thd->abort_on_warning;
thd->abort_on_warning= TRUE;
- make_truncated_value_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ make_truncated_value_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
f_val, strlength(f_val), t_type,
alter_info->datetime_field->field_name);
thd->abort_on_warning= save_abort_on_warning;
@@ -7681,7 +7729,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1, 1, FALSE);
if (ignore)
to->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
- thd->row_count= 0;
+ thd->warning_info->reset_current_row_for_warning();
restore_record(to, s->default_values); // Create empty record
while (!(error=info.read_record(&info)))
{
@@ -7691,7 +7739,6 @@ copy_data_between_tables(TABLE *from,TABLE *to,
error= 1;
break;
}
- thd->row_count++;
/* Return error if source table isn't empty. */
if (error_if_not_empty)
{
@@ -7741,6 +7788,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
}
else
found_count++;
+ thd->warning_info->inc_current_row_for_warning();
}
end_read_record(&info);
free_io_cache(from);
@@ -7830,7 +7878,7 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
field_list.push_back(item= new Item_int("Checksum", (longlong) 1,
MY_INT64_NUM_DECIMAL_DIGITS));
item->maybe_null= 1;
- if (protocol->send_fields(&field_list,
+ if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
diff --git a/sql/sql_tablespace.cc b/sql/sql_tablespace.cc
index fcc442a8f9a..ef81c7d847e 100644
--- a/sql/sql_tablespace.cc
+++ b/sql/sql_tablespace.cc
@@ -31,7 +31,7 @@ int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info)
{
hton= ha_default_handlerton(thd);
if (ts_info->storage_engine != 0)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARN_USING_OTHER_HANDLER,
ER(ER_WARN_USING_OTHER_HANDLER),
ha_resolve_storage_engine_name(hton),
@@ -60,7 +60,7 @@ int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info)
}
else
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
ER(ER_ILLEGAL_HA_CREATE_OPTION),
ha_resolve_storage_engine_name(hton),
diff --git a/sql/sql_test.cc b/sql/sql_test.cc
index eeb9a21b6f5..6c0cb08cc79 100644
--- a/sql/sql_test.cc
+++ b/sql/sql_test.cc
@@ -85,7 +85,7 @@ void print_cached_tables(void)
for (idx=unused=0 ; idx < open_cache.records ; idx++)
{
- TABLE *entry=(TABLE*) hash_element(&open_cache,idx);
+ TABLE *entry=(TABLE*) my_hash_element(&open_cache,idx);
printf("%-14.14s %-32s%6ld%8ld%6d %s\n",
entry->s->db.str, entry->s->table_name.str, entry->s->version,
entry->in_use ? entry->in_use->thread_id : 0L,
@@ -113,7 +113,7 @@ void print_cached_tables(void)
if (count != unused)
printf("Unused_links (%d) doesn't match open_cache: %d\n", count,unused);
printf("\nCurrent refresh version: %ld\n",refresh_version);
- if (hash_check(&open_cache))
+ if (my_hash_check(&open_cache))
printf("Error: File hash table is corrupted\n");
fflush(stdout);
VOID(pthread_mutex_unlock(&LOCK_open));
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index a251a533622..61da4617610 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -422,7 +422,7 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
TABLE_LIST **save_query_tables_own_last= thd->lex->query_tables_own_last;
thd->lex->query_tables_own_last= 0;
- err_status= check_table_access(thd, TRIGGER_ACL, tables, 1, FALSE);
+ err_status= check_table_access(thd, TRIGGER_ACL, tables, FALSE, 1, FALSE);
thd->lex->query_tables_own_last= save_query_tables_own_last;
diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h
index f6754a75284..b411acf2ac5 100644
--- a/sql/sql_trigger.h
+++ b/sql/sql_trigger.h
@@ -1,3 +1,6 @@
+#ifndef SQL_TRIGGER_INCLUDED
+#define SQL_TRIGGER_INCLUDED
+
/* Copyright (C) 2004-2005 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -174,3 +177,4 @@ bool load_table_name_for_trigger(THD *thd,
const LEX_STRING *trn_path,
LEX_STRING *tbl_name);
+#endif /* SQL_TRIGGER_INCLUDED */
diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc
index c6b41b59a3f..a1a0d9633b7 100644
--- a/sql/sql_udf.cc
+++ b/sql/sql_udf.cc
@@ -124,10 +124,10 @@ void udf_init()
init_sql_alloc(&mem, UDF_ALLOC_BLOCK_SIZE, 0);
THD *new_thd = new THD;
if (!new_thd ||
- hash_init(&udf_hash,system_charset_info,32,0,0,get_hash_key, NULL, 0))
+ my_hash_init(&udf_hash,system_charset_info,32,0,0,get_hash_key, NULL, 0))
{
sql_print_error("Can't allocate memory for udf structures");
- hash_free(&udf_hash);
+ my_hash_free(&udf_hash);
free_root(&mem,MYF(0));
delete new_thd;
DBUG_VOID_RETURN;
@@ -239,20 +239,20 @@ void udf_free()
DBUG_ENTER("udf_free");
for (uint idx=0 ; idx < udf_hash.records ; idx++)
{
- udf_func *udf=(udf_func*) hash_element(&udf_hash,idx);
+ udf_func *udf=(udf_func*) my_hash_element(&udf_hash,idx);
if (udf->dlhandle) // Not closed before
{
/* Mark all versions using the same handler as closed */
for (uint j=idx+1 ; j < udf_hash.records ; j++)
{
- udf_func *tmp=(udf_func*) hash_element(&udf_hash,j);
+ udf_func *tmp=(udf_func*) my_hash_element(&udf_hash,j);
if (udf->dlhandle == tmp->dlhandle)
tmp->dlhandle=0; // Already closed
}
dlclose(udf->dlhandle);
}
}
- hash_free(&udf_hash);
+ my_hash_free(&udf_hash);
free_root(&mem,MYF(0));
if (initialized)
{
@@ -268,7 +268,7 @@ static void del_udf(udf_func *udf)
DBUG_ENTER("del_udf");
if (!--udf->usage_count)
{
- hash_delete(&udf_hash,(uchar*) udf);
+ my_hash_delete(&udf_hash,(uchar*) udf);
using_udf_functions=udf_hash.records != 0;
}
else
@@ -282,7 +282,7 @@ static void del_udf(udf_func *udf)
uint name_length=udf->name.length;
udf->name.str=(char*) "*";
udf->name.length=1;
- hash_update(&udf_hash,(uchar*) udf,(uchar*) name,name_length);
+ my_hash_update(&udf_hash,(uchar*) udf,(uchar*) name,name_length);
}
DBUG_VOID_RETURN;
}
@@ -302,7 +302,7 @@ void free_udf(udf_func *udf)
We come here when someone has deleted the udf function
while another thread still was using the udf
*/
- hash_delete(&udf_hash,(uchar*) udf);
+ my_hash_delete(&udf_hash,(uchar*) udf);
using_udf_functions=udf_hash.records != 0;
if (!find_udf_dl(udf->dl))
dlclose(udf->dlhandle);
@@ -328,8 +328,8 @@ udf_func *find_udf(const char *name,uint length,bool mark_used)
else
rw_rdlock(&THR_LOCK_udf); /* Called during parsing */
- if ((udf=(udf_func*) hash_search(&udf_hash,(uchar*) name,
- length ? length : (uint) strlen(name))))
+ if ((udf=(udf_func*) my_hash_search(&udf_hash,(uchar*) name,
+ length ? length : (uint) strlen(name))))
{
if (!udf->dlhandle)
udf=0; // Could not be opened
@@ -351,7 +351,7 @@ static void *find_udf_dl(const char *dl)
*/
for (uint idx=0 ; idx < udf_hash.records ; idx++)
{
- udf_func *udf=(udf_func*) hash_element(&udf_hash,idx);
+ udf_func *udf=(udf_func*) my_hash_element(&udf_hash,idx);
if (!strcmp(dl, udf->dl) && udf->dlhandle != NULL)
DBUG_RETURN(udf->dlhandle);
}
@@ -441,7 +441,7 @@ int mysql_create_function(THD *thd,udf_func *udf)
thd->clear_current_stmt_binlog_row_based();
rw_wrlock(&THR_LOCK_udf);
- if ((hash_search(&udf_hash,(uchar*) udf->name.str, udf->name.length)))
+ if ((my_hash_search(&udf_hash,(uchar*) udf->name.str, udf->name.length)))
{
my_error(ER_UDF_EXISTS, MYF(0), udf->name.str);
goto err;
@@ -544,8 +544,8 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name)
thd->clear_current_stmt_binlog_row_based();
rw_wrlock(&THR_LOCK_udf);
- if (!(udf=(udf_func*) hash_search(&udf_hash,(uchar*) udf_name->str,
- (uint) udf_name->length)))
+ if (!(udf=(udf_func*) my_hash_search(&udf_hash,(uchar*) udf_name->str,
+ (uint) udf_name->length)))
{
my_error(ER_FUNCTION_NOT_DEFINED, MYF(0), udf_name->str);
goto err;
diff --git a/sql/sql_udf.h b/sql/sql_udf.h
index 4b8b492698e..95cb167869e 100644
--- a/sql/sql_udf.h
+++ b/sql/sql_udf.h
@@ -1,3 +1,6 @@
+#ifndef SQL_UDF_INCLUDED
+#define SQL_UDF_INCLUDED
+
/* Copyright (C) 2000-2001, 2003-2006 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -140,3 +143,4 @@ void free_udf(udf_func *udf);
int mysql_create_function(THD *thd,udf_func *udf);
int mysql_drop_function(THD *thd,const LEX_STRING *name);
#endif
+#endif /* SQL_UDF_INCLUDED */
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 819214afa31..e7b4eb22e78 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -725,7 +725,7 @@ int mysql_update(THD *thd,
}
else
table->file->unlock_row();
- thd->row_count++;
+ thd->warning_info->inc_current_row_for_warning();
if (thd->is_error())
{
error= 1;
@@ -831,8 +831,9 @@ int mysql_update(THD *thd,
if (error < 0)
{
char buff[STRING_BUFFER_USUAL_SIZE];
- my_snprintf(buff, sizeof(buff), ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated,
- (ulong) thd->cuted_fields);
+ my_snprintf(buff, sizeof(buff), ER(ER_UPDATE_INFO), (ulong) found,
+ (ulong) updated,
+ (ulong) thd->warning_info->statement_warn_count());
thd->row_count_func=
(thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
my_ok(thd, (ulong) thd->row_count_func, id, buff);
@@ -921,7 +922,6 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
if ((duplicate= unique_table(thd, table_list, table_list->next_global, 0)))
{
update_non_unique_table_error(table_list, "UPDATE", duplicate);
- my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name);
DBUG_RETURN(TRUE);
}
}
@@ -1076,7 +1076,7 @@ reopen_tables:
if (check_access(thd, want_privilege,
tl->db, &tl->grant.privilege, 0, 0,
test(tl->schema_table)) ||
- check_grant(thd, want_privilege, tl, 0, 1, 0))
+ check_grant(thd, want_privilege, tl, FALSE, 1, FALSE))
DBUG_RETURN(TRUE);
}
}
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index ae3af0640a3..9abbadb8c6b 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -269,11 +269,11 @@ bool create_view_precheck(THD *thd, TABLE_LIST *tables, TABLE_LIST *view,
*/
if ((check_access(thd, CREATE_VIEW_ACL, view->db, &view->grant.privilege,
0, 0, is_schema_db(view->db)) ||
- check_grant(thd, CREATE_VIEW_ACL, view, 0, 1, 0)) ||
+ check_grant(thd, CREATE_VIEW_ACL, view, FALSE, 1, FALSE)) ||
(mode != VIEW_CREATE_NEW &&
(check_access(thd, DROP_ACL, view->db, &view->grant.privilege,
0, 0, is_schema_db(view->db)) ||
- check_grant(thd, DROP_ACL, view, 0, 1, 0))))
+ check_grant(thd, DROP_ACL, view, FALSE, 1, FALSE))))
goto err;
for (sl= select_lex; sl; sl= sl->next_select())
@@ -323,7 +323,7 @@ bool create_view_precheck(THD *thd, TABLE_LIST *tables, TABLE_LIST *view,
{
if (check_access(thd, SELECT_ACL, tbl->db,
&tbl->grant.privilege, 0, 0, test(tbl->schema_table)) ||
- check_grant(thd, SELECT_ACL, tbl, 0, 1, 0))
+ check_grant(thd, SELECT_ACL, tbl, FALSE, 1, FALSE))
goto err;
}
}
@@ -1233,8 +1233,9 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table,
if (!table->prelocking_placeholder &&
(old_lex->sql_command == SQLCOM_SELECT && old_lex->describe))
{
- if (check_table_access(thd, SELECT_ACL, view_tables, UINT_MAX, TRUE) &&
- check_table_access(thd, SHOW_VIEW_ACL, table, UINT_MAX, TRUE))
+ if (check_table_access(thd, SELECT_ACL, view_tables, FALSE,
+ UINT_MAX, TRUE) &&
+ check_table_access(thd, SHOW_VIEW_ACL, table, FALSE, UINT_MAX, TRUE))
{
my_message(ER_VIEW_NO_EXPLAIN, ER(ER_VIEW_NO_EXPLAIN), MYF(0));
goto err;
@@ -1244,7 +1245,7 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table,
(old_lex->sql_command == SQLCOM_SHOW_CREATE) &&
!table->belong_to_view)
{
- if (check_table_access(thd, SHOW_VIEW_ACL, table, UINT_MAX, FALSE))
+ if (check_table_access(thd, SHOW_VIEW_ACL, table, FALSE, UINT_MAX, FALSE))
goto err;
}
diff --git a/sql/sql_view.h b/sql/sql_view.h
index e08c2168e14..3de51c3264e 100644
--- a/sql/sql_view.h
+++ b/sql/sql_view.h
@@ -1,3 +1,6 @@
+#ifndef SQL_VIEW_INCLUDED
+#define SQL_VIEW_INCLUDED
+
/* -*- C++ -*- */
/* Copyright (C) 2004 MySQL AB
@@ -42,3 +45,4 @@ bool mysql_rename_view(THD *thd, const char *new_db, const char *new_name,
#define VIEW_ANY_ACL (SELECT_ACL | UPDATE_ACL | INSERT_ACL | DELETE_ACL)
+#endif /* SQL_VIEW_INCLUDED */
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 72b8aa32e30..e0b319b562b 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1,4 +1,4 @@
-/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc.
+/* Copyright 2000-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -43,6 +43,7 @@
#include "sp_pcontext.h"
#include "sp_rcontext.h"
#include "sp.h"
+#include "sql_signal.h"
#include "event_parse_data.h"
#include <myisam.h>
#include <myisammrg.h>
@@ -55,8 +56,6 @@
int yylex(void *yylval, void *yythd);
-const LEX_STRING null_lex_str= {0,0};
-
#define yyoverflow(A,B,C,D,E,F) \
{ \
ulong val= *(F); \
@@ -131,10 +130,13 @@ void my_parse_error(const char *s)
Lex_input_stream *lip= & thd->m_parser_state->m_lip;
const char *yytext= lip->get_tok_start();
+ if (!yytext)
+ yytext= "";
+
/* Push an error into the error stack */
+ ErrConvString err(yytext, thd->variables.character_set_client);
my_printf_error(ER_PARSE_ERROR, ER(ER_PARSE_ERROR), MYF(0), s,
- (yytext ? yytext : ""),
- lip->yylineno);
+ err.ptr(), lip->yylineno);
}
/**
@@ -635,10 +637,12 @@ Item* handle_sql2003_note184_exception(THD *thd, Item* left, bool equal,
struct sp_cond_type *spcondtype;
struct { int vars, conds, hndlrs, curs; } spblock;
sp_name *spname;
- struct st_lex *lex;
+ LEX *lex;
sp_head *sphead;
struct p_elem_val *p_elem_value;
enum index_hint_type index_hint;
+ enum enum_filetype filetype;
+ Diag_condition_item_name diag_condition_item_name;
}
%{
@@ -720,6 +724,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token CASCADED /* SQL-2003-R */
%token CASE_SYM /* SQL-2003-R */
%token CAST_SYM /* SQL-2003-R */
+%token CATALOG_NAME_SYM /* SQL-2003-N */
%token CHAIN_SYM /* SQL-2003-N */
%token CHANGE
%token CHANGED
@@ -728,6 +733,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token CHECKSUM_SYM
%token CHECK_SYM /* SQL-2003-R */
%token CIPHER_SYM
+%token CLASS_ORIGIN_SYM /* SQL-2003-N */
%token CLIENT_SYM
%token CLOSE_SYM /* SQL-2003-R */
%token COALESCE /* SQL-2003-N */
@@ -736,6 +742,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token COLLATION_SYM /* SQL-2003-N */
%token COLUMNS
%token COLUMN_SYM /* SQL-2003-R */
+%token COLUMN_NAME_SYM /* SQL-2003-N */
%token COMMENT_SYM
%token COMMITTED_SYM /* SQL-2003-N */
%token COMMIT_SYM /* SQL-2003-R */
@@ -743,10 +750,13 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token COMPLETION_SYM
%token COMPRESSED_SYM
%token CONCURRENT
-%token CONDITION_SYM /* SQL-2003-N */
+%token CONDITION_SYM /* SQL-2003-R, SQL-2008-R */
%token CONNECTION_SYM
%token CONSISTENT_SYM
%token CONSTRAINT /* SQL-2003-R */
+%token CONSTRAINT_CATALOG_SYM /* SQL-2003-N */
+%token CONSTRAINT_NAME_SYM /* SQL-2003-N */
+%token CONSTRAINT_SCHEMA_SYM /* SQL-2003-N */
%token CONTAINS_SYM /* SQL-2003-N */
%token CONTEXT_SYM
%token CONTINUE_SYM /* SQL-2003-R */
@@ -760,6 +770,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token CURDATE /* MYSQL-FUNC */
%token CURRENT_USER /* SQL-2003-R */
%token CURSOR_SYM /* SQL-2003-R */
+%token CURSOR_NAME_SYM /* SQL-2003-N */
%token CURTIME /* MYSQL-FUNC */
%token DATABASE
%token DATABASES
@@ -872,6 +883,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token IDENT_QUOTED
%token IF
%token IGNORE_SYM
+%token IGNORE_SERVER_IDS_SYM
%token IMPORT
%token INDEXES
%token INDEX_SYM
@@ -946,6 +958,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token MASTER_SSL_VERIFY_SERVER_CERT_SYM
%token MASTER_SYM
%token MASTER_USER_SYM
+%token MASTER_HEARTBEAT_PERIOD_SYM
%token MATCH /* SQL-2003-R */
%token MAX_CONNECTIONS_PER_HOUR
%token MAX_QUERIES_PER_HOUR
@@ -961,6 +974,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token MEDIUM_SYM
%token MEMORY_SYM
%token MERGE_SYM /* SQL-2003-R */
+%token MESSAGE_TEXT_SYM /* SQL-2003-N */
%token MICROSECOND_SYM /* MYSQL-FUNC */
%token MIGRATE_SYM
%token MINUTE_MICROSECOND_SYM
@@ -977,6 +991,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token MULTIPOINT
%token MULTIPOLYGON
%token MUTEX_SYM
+%token MYSQL_ERRNO_SYM
%token NAMES_SYM /* SQL-2003-N */
%token NAME_SYM /* SQL-2003-N */
%token NATIONAL_SYM /* SQL-2003-R */
@@ -1062,6 +1077,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token REDUNDANT_SYM
%token REFERENCES /* SQL-2003-R */
%token REGEXP
+%token RELAYLOG_SYM
%token RELAY_LOG_FILE_SYM
%token RELAY_LOG_POS_SYM
%token RELAY_THREAD
@@ -1077,6 +1093,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token REPLICATION
%token REQUIRE_SYM
%token RESET_SYM
+%token RESIGNAL_SYM /* SQL-2003-R */
%token RESOURCES
%token RESTORE_SYM
%token RESTRICT
@@ -1094,6 +1111,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token RTREE_SYM
%token SAVEPOINT_SYM /* SQL-2003-R */
%token SCHEDULE_SYM
+%token SCHEMA_NAME_SYM /* SQL-2003-N */
%token SECOND_MICROSECOND_SYM
%token SECOND_SYM /* SQL-2003-R */
%token SECURITY_SYM /* SQL-2003-N */
@@ -1112,6 +1130,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token SHIFT_RIGHT /* OPERATOR */
%token SHOW
%token SHUTDOWN
+%token SIGNAL_SYM /* SQL-2003-R */
%token SIGNED_SYM
%token SIMPLE_SYM /* SQL-2003-N */
%token SLAVE
@@ -1145,6 +1164,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token STORAGE_SYM
%token STRAIGHT_JOIN
%token STRING_SYM
+%token SUBCLASS_ORIGIN_SYM /* SQL-2003-N */
%token SUBDATE_SYM
%token SUBJECT_SYM
%token SUBPARTITIONS_SYM
@@ -1161,6 +1181,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token TABLE_REF_PRIORITY
%token TABLE_SYM /* SQL-2003-R */
%token TABLE_CHECKSUM_SYM
+%token TABLE_NAME_SYM /* SQL-2003-N */
%token TEMPORARY /* SQL-2003-N */
%token TEMPTABLE_SYM
%token TERMINATED
@@ -1232,6 +1253,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token WRITE_SYM /* SQL-2003-N */
%token X509_SYM
%token XA_SYM
+%token XML_SYM
%token XOR
%token YEAR_MONTH_SYM
%token YEAR_SYM /* SQL-2003-R */
@@ -1263,6 +1285,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
IDENT_sys TEXT_STRING_sys TEXT_STRING_literal
NCHAR_STRING opt_component key_cache_name
sp_opt_label BIN_NUM label_ident TEXT_STRING_filesystem ident_or_empty
+ opt_constraint constraint opt_ident
%type <lex_str_ptr>
opt_table_alias
@@ -1271,8 +1294,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
table_ident table_ident_nodb references xid
%type <simple_string>
- remember_name remember_end opt_ident opt_db text_or_password
- opt_constraint constraint
+ remember_name remember_end opt_db text_or_password
%type <string>
text_string opt_gconcat_separator
@@ -1295,9 +1317,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%type <ulonglong_number>
ulonglong_num real_ulonglong_num size_number
-%type <p_elem_value>
- part_bit_expr
-
%type <lock_type>
replace_lock_option opt_low_priority insert_lock_option load_data_lock
@@ -1318,6 +1337,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
function_call_nonkeyword
function_call_generic
function_call_conflict
+ signal_allowed_expr
%type <item_num>
NUM_literal
@@ -1393,7 +1413,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
slave master_def master_defs master_file_def slave_until_opts
repair restore backup analyze check start checksum
field_list field_list_item field_spec kill column_def key_def
- keycache_list assign_to_keycache preload_list preload_keys
+ keycache_list keycache_list_or_parts assign_to_keycache
+ assign_to_keycache_parts
+ preload_list preload_list_or_parts preload_keys preload_keys_parts
select_item_list select_item values_list no_braces
opt_limit_clause delete_limit_clause fields opt_values values
procedure_list procedure_list2 procedure_item
@@ -1422,7 +1444,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
prepare prepare_src execute deallocate
statement sp_suid
sp_c_chistics sp_a_chistics sp_chistic sp_c_chistic xa
- load_data opt_field_or_var_spec fields_or_vars opt_load_data_set_spec
+ opt_field_or_var_spec fields_or_vars opt_load_data_set_spec
view_replace_or_algorithm view_replace
view_algorithm view_or_trigger_or_sp_or_event
definer_tail no_definer_tail
@@ -1430,6 +1452,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
view_check_option trigger_tail sp_tail sf_tail udf_tail event_tail
install uninstall partition_entry binlog_base64_event
init_key_options key_options key_opts key_opt key_using_alg
+ part_column_list
server_def server_options_list server_option
definer_opt no_definer definer
END_OF_INPUT
@@ -1445,12 +1468,16 @@ END_OF_INPUT
%type <NONE> case_stmt_specification simple_case_stmt searched_case_stmt
%type <num> sp_decl_idents sp_opt_inout sp_handler_type sp_hcond_list
-%type <spcondtype> sp_cond sp_hcond
+%type <spcondtype> sp_cond sp_hcond sqlstate signal_value opt_signal_value
%type <spblock> sp_decls sp_decl
%type <lex> sp_cursor_stmt
%type <spname> sp_name
%type <index_hint> index_hint_type
%type <num> index_hint_clause
+%type <filetype> data_or_xml
+
+%type <NONE> signal_stmt resignal_stmt
+%type <diag_condition_item_name> signal_condition_information_item_name
%type <NONE>
'-' '+' '*' '/' '%' '(' ')'
@@ -1497,7 +1524,7 @@ query:
Lex_input_stream *lip = YYLIP;
if ((YYTHD->client_capabilities & CLIENT_MULTI_QUERIES) &&
- ! lip->stmt_prepare_mode &&
+ lip->multi_statements &&
! lip->eof())
{
/*
@@ -1572,12 +1599,14 @@ statement:
| repair
| replace
| reset
+ | resignal_stmt
| restore
| revoke
| rollback
| savepoint
| select
| set
+ | signal_stmt
| show
| slave
| start
@@ -1690,6 +1719,12 @@ change:
LEX *lex = Lex;
lex->sql_command = SQLCOM_CHANGE_MASTER;
bzero((char*) &lex->mi, sizeof(lex->mi));
+ /*
+ resetting flags that can left from the previous CHANGE MASTER
+ */
+ lex->mi.repl_ignore_server_ids_opt= LEX_MASTER_INFO::LEX_MI_UNCHANGED;
+ my_init_dynamic_array(&Lex->mi.repl_ignore_server_ids,
+ sizeof(::server_id), 16, 16);
}
master_defs
{}
@@ -1724,7 +1759,7 @@ master_def:
| MASTER_SSL_SYM EQ ulong_num
{
Lex->mi.ssl= $3 ?
- LEX_MASTER_INFO::SSL_ENABLE : LEX_MASTER_INFO::SSL_DISABLE;
+ LEX_MASTER_INFO::LEX_MI_ENABLE : LEX_MASTER_INFO::LEX_MI_DISABLE;
}
| MASTER_SSL_CA_SYM EQ TEXT_STRING_sys
{
@@ -1749,11 +1784,69 @@ master_def:
| MASTER_SSL_VERIFY_SERVER_CERT_SYM EQ ulong_num
{
Lex->mi.ssl_verify_server_cert= $3 ?
- LEX_MASTER_INFO::SSL_ENABLE : LEX_MASTER_INFO::SSL_DISABLE;
+ LEX_MASTER_INFO::LEX_MI_ENABLE : LEX_MASTER_INFO::LEX_MI_DISABLE;
}
- | master_file_def
+
+ | MASTER_HEARTBEAT_PERIOD_SYM EQ NUM_literal
+ {
+ Lex->mi.heartbeat_period= (float) $3->val_real();
+ if (Lex->mi.heartbeat_period > SLAVE_MAX_HEARTBEAT_PERIOD ||
+ Lex->mi.heartbeat_period < 0.0)
+ {
+ const char format[]= "%d seconds";
+ char buf[4*sizeof(SLAVE_MAX_HEARTBEAT_PERIOD) + sizeof(format)];
+ my_sprintf(buf, (buf, format, SLAVE_MAX_HEARTBEAT_PERIOD));
+ my_error(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE,
+ MYF(0),
+ " is negative or exceeds the maximum ",
+ buf);
+ MYSQL_YYABORT;
+ }
+ if (Lex->mi.heartbeat_period > slave_net_timeout)
+ {
+ push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE,
+ ER(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE),
+ " exceeds the value of `slave_net_timeout' sec.",
+ " A sensible value for the period should be"
+ " less than the timeout.");
+ }
+ if (Lex->mi.heartbeat_period < 0.001)
+ {
+ if (Lex->mi.heartbeat_period != 0.0)
+ {
+ push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE,
+ ER(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE),
+ " is less than 1 msec.",
+ " The period is reset to zero which means"
+ " no heartbeats will be sending");
+ Lex->mi.heartbeat_period= 0.0;
+ }
+ Lex->mi.heartbeat_opt= LEX_MASTER_INFO::LEX_MI_DISABLE;
+ }
+ Lex->mi.heartbeat_opt= LEX_MASTER_INFO::LEX_MI_ENABLE;
+ }
+ | IGNORE_SERVER_IDS_SYM EQ '(' ignore_server_id_list ')'
+ {
+ Lex->mi.repl_ignore_server_ids_opt= LEX_MASTER_INFO::LEX_MI_ENABLE;
+ }
+ |
+ master_file_def
;
+ignore_server_id_list:
+ /* Empty */
+ | ignore_server_id
+ | ignore_server_id_list ',' ignore_server_id
+ ;
+
+ignore_server_id:
+ ulong_num
+ {
+ insert_dynamic(&Lex->mi.repl_ignore_server_ids, (uchar*) &($1));
+ }
+
master_file_def:
MASTER_LOG_FILE_SYM EQ TEXT_STRING_sys
{
@@ -1846,7 +1939,7 @@ create:
my_parse_error(ER(ER_SYNTAX_ERROR));
MYSQL_YYABORT;
}
- key= new Key($2, $4.str, &lex->key_create_info, 0,
+ key= new Key($2, $4, &lex->key_create_info, 0,
lex->col_list);
if (key == NULL)
MYSQL_YYABORT;
@@ -2472,12 +2565,12 @@ sp_decl:
LEX *lex= Lex;
sp_pcontext *spc= lex->spcont;
- if (spc->find_cond(&$2, TRUE))
- {
- my_error(ER_SP_DUP_COND, MYF(0), $2.str);
- MYSQL_YYABORT;
- }
- if(YYTHD->lex->spcont->push_cond(&$2, $5))
+ if (spc->find_cond(&$2, TRUE))
+ {
+ my_error(ER_SP_DUP_COND, MYF(0), $2.str);
+ MYSQL_YYABORT;
+ }
+ if(YYTHD->lex->spcont->push_cond(&$2, $5))
MYSQL_YYABORT;
$$.vars= $$.hndlrs= $$.curs= 0;
$$.conds= 1;
@@ -2492,9 +2585,9 @@ sp_decl:
sp_pcontext *ctx= lex->spcont;
sp_instr_hpush_jump *i=
new sp_instr_hpush_jump(sp->instructions(), ctx, $2,
- ctx->current_var_count());
+ ctx->current_var_count());
if (i == NULL ||
- sp->add_instr(i) ||
+ sp->add_instr(i) ||
sp->push_backpatch(i, ctx->push_label((char *)"", 0)))
MYSQL_YYABORT;
}
@@ -2511,15 +2604,15 @@ sp_decl:
i= new sp_instr_hreturn(sp->instructions(), ctx,
ctx->current_var_count());
if (i == NULL ||
- sp->add_instr(i))
+ sp->add_instr(i))
MYSQL_YYABORT;
}
else
{ /* EXIT or UNDO handler, just jump to the end of the block */
i= new sp_instr_hreturn(sp->instructions(), ctx, 0);
if (i == NULL ||
- sp->add_instr(i) ||
- sp->push_backpatch(i, lex->spcont->last_label())) /* Block end */
+ sp->add_instr(i) ||
+ sp->push_backpatch(i, lex->spcont->last_label())) /* Block end */
MYSQL_YYABORT;
}
lex->sphead->backpatch(hlab);
@@ -2546,9 +2639,9 @@ sp_decl:
}
i= new sp_instr_cpush(sp->instructions(), ctx, $5,
ctx->current_cursor_count());
- if (i == NULL ||
+ if (i == NULL ||
sp->add_instr(i) ||
- ctx->push_cursor(&$2))
+ ctx->push_cursor(&$2))
MYSQL_YYABORT;
$$.vars= $$.conds= $$.hndlrs= 0;
$$.curs= 1;
@@ -2617,13 +2710,22 @@ sp_hcond_element:
sp_cond:
ulong_num
{ /* mysql errno */
+ if ($1 == 0)
+ {
+ my_error(ER_WRONG_VALUE, MYF(0), "CONDITION", "0");
+ MYSQL_YYABORT;
+ }
$$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t));
if ($$ == NULL)
MYSQL_YYABORT;
$$->type= sp_cond_type_t::number;
$$->mysqlerr= $1;
}
- | SQLSTATE_SYM opt_value TEXT_STRING_literal
+ | sqlstate
+ ;
+
+sqlstate:
+ SQLSTATE_SYM opt_value TEXT_STRING_literal
{ /* SQLSTATE */
if (!sp_cond_check(&$3))
{
@@ -2634,8 +2736,8 @@ sp_cond:
if ($$ == NULL)
MYSQL_YYABORT;
$$->type= sp_cond_type_t::state;
- memcpy($$->sqlstate, $3.str, 5);
- $$->sqlstate[5]= '\0';
+ memcpy($$->sqlstate, $3.str, SQLSTATE_LENGTH);
+ $$->sqlstate[SQLSTATE_LENGTH]= '\0';
}
;
@@ -2681,6 +2783,160 @@ sp_hcond:
}
;
+signal_stmt:
+ SIGNAL_SYM signal_value opt_set_signal_information
+ {
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
+ Yacc_state *state= & thd->m_parser_state->m_yacc;
+
+ lex->sql_command= SQLCOM_SIGNAL;
+ lex->m_stmt= new (thd->mem_root) Signal_statement(lex, $2,
+ state->m_set_signal_info);
+ if (lex->m_stmt == NULL)
+ MYSQL_YYABORT;
+ }
+ ;
+
+signal_value:
+ ident
+ {
+ LEX *lex= Lex;
+ sp_cond_type_t *cond;
+ if (lex->spcont == NULL)
+ {
+ /* SIGNAL foo cannot be used outside of stored programs */
+ my_error(ER_SP_COND_MISMATCH, MYF(0), $1.str);
+ MYSQL_YYABORT;
+ }
+ cond= lex->spcont->find_cond(&$1);
+ if (cond == NULL)
+ {
+ my_error(ER_SP_COND_MISMATCH, MYF(0), $1.str);
+ MYSQL_YYABORT;
+ }
+ if (cond->type != sp_cond_type_t::state)
+ {
+ my_error(ER_SIGNAL_BAD_CONDITION_TYPE, MYF(0));
+ MYSQL_YYABORT;
+ }
+ $$= cond;
+ }
+ | sqlstate
+ { $$= $1; }
+ ;
+
+opt_signal_value:
+ /* empty */
+ { $$= NULL; }
+ | signal_value
+ { $$= $1; }
+ ;
+
+opt_set_signal_information:
+ /* empty */
+ {
+ YYTHD->m_parser_state->m_yacc.m_set_signal_info.clear();
+ }
+ | SET signal_information_item_list
+ ;
+
+signal_information_item_list:
+ signal_condition_information_item_name EQ signal_allowed_expr
+ {
+ Set_signal_information *info;
+ info= & YYTHD->m_parser_state->m_yacc.m_set_signal_info;
+ int index= (int) $1;
+ info->clear();
+ info->m_item[index]= $3;
+ }
+ | signal_information_item_list ','
+ signal_condition_information_item_name EQ signal_allowed_expr
+ {
+ Set_signal_information *info;
+ info= & YYTHD->m_parser_state->m_yacc.m_set_signal_info;
+ int index= (int) $3;
+ if (info->m_item[index] != NULL)
+ {
+ my_error(ER_DUP_SIGNAL_SET, MYF(0),
+ Diag_condition_item_names[index].str);
+ MYSQL_YYABORT;
+ }
+ info->m_item[index]= $5;
+ }
+ ;
+
+/*
+ Only a limited subset of <expr> are allowed in SIGNAL/RESIGNAL.
+*/
+signal_allowed_expr:
+ literal
+ { $$= $1; }
+ | variable
+ {
+ if ($1->type() == Item::FUNC_ITEM)
+ {
+ Item_func *item= (Item_func*) $1;
+ if (item->functype() == Item_func::SUSERVAR_FUNC)
+ {
+ /*
+ Don't allow the following syntax:
+ SIGNAL/RESIGNAL ...
+ SET <signal condition item name> = @foo := expr
+ */
+ my_parse_error(ER(ER_SYNTAX_ERROR));
+ MYSQL_YYABORT;
+ }
+ }
+ $$= $1;
+ }
+ | simple_ident
+ { $$= $1; }
+ ;
+
+/* conditions that can be set in signal / resignal */
+signal_condition_information_item_name:
+ CLASS_ORIGIN_SYM
+ { $$= DIAG_CLASS_ORIGIN; }
+ | SUBCLASS_ORIGIN_SYM
+ { $$= DIAG_SUBCLASS_ORIGIN; }
+ | CONSTRAINT_CATALOG_SYM
+ { $$= DIAG_CONSTRAINT_CATALOG; }
+ | CONSTRAINT_SCHEMA_SYM
+ { $$= DIAG_CONSTRAINT_SCHEMA; }
+ | CONSTRAINT_NAME_SYM
+ { $$= DIAG_CONSTRAINT_NAME; }
+ | CATALOG_NAME_SYM
+ { $$= DIAG_CATALOG_NAME; }
+ | SCHEMA_NAME_SYM
+ { $$= DIAG_SCHEMA_NAME; }
+ | TABLE_NAME_SYM
+ { $$= DIAG_TABLE_NAME; }
+ | COLUMN_NAME_SYM
+ { $$= DIAG_COLUMN_NAME; }
+ | CURSOR_NAME_SYM
+ { $$= DIAG_CURSOR_NAME; }
+ | MESSAGE_TEXT_SYM
+ { $$= DIAG_MESSAGE_TEXT; }
+ | MYSQL_ERRNO_SYM
+ { $$= DIAG_MYSQL_ERRNO; }
+ ;
+
+resignal_stmt:
+ RESIGNAL_SYM opt_signal_value opt_set_signal_information
+ {
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
+ Yacc_state *state= & thd->m_parser_state->m_yacc;
+
+ lex->sql_command= SQLCOM_RESIGNAL;
+ lex->m_stmt= new (thd->mem_root) Resignal_statement(lex, $2,
+ state->m_set_signal_info);
+ if (lex->m_stmt == NULL)
+ MYSQL_YYABORT;
+ }
+ ;
+
sp_decl_idents:
ident
{
@@ -2818,7 +3074,7 @@ sp_proc_stmt_return:
i= new sp_instr_freturn(sp->instructions(), lex->spcont, $3,
sp->m_return_field_def.sql_type, lex);
if (i == NULL ||
- sp->add_instr(i))
+ sp->add_instr(i))
MYSQL_YYABORT;
sp->m_flags|= sp_head::HAS_RETURN;
}
@@ -3059,7 +3315,7 @@ sp_if:
sp_instr_jump_if_not *i = new sp_instr_jump_if_not(ip, ctx,
$2, lex);
if (i == NULL ||
- sp->push_backpatch(i, ctx->push_label((char *)"", 0)) ||
+ sp->push_backpatch(i, ctx->push_label((char *)"", 0)) ||
sp->add_cont_backpatch(i) ||
sp->add_instr(i))
MYSQL_YYABORT;
@@ -3348,7 +3604,7 @@ sp_unlabeled_control:
if (i == NULL ||
lex->sphead->add_instr(i))
MYSQL_YYABORT;
- }
+ }
| WHILE_SYM
{ Lex->sphead->reset_lex(YYTHD); }
expr DO_SYM
@@ -3359,7 +3615,7 @@ sp_unlabeled_control:
sp_instr_jump_if_not *i = new sp_instr_jump_if_not(ip, lex->spcont,
$3, lex);
if (i == NULL ||
- /* Jumping forward */
+ /* Jumping forward */
sp->push_backpatch(i, lex->spcont->last_label()) ||
sp->new_cont_backpatch(i) ||
sp->add_instr(i))
@@ -3892,17 +4148,9 @@ opt_partitioning:
;
partitioning:
- PARTITION_SYM
+ PARTITION_SYM have_partitioning
{
-#ifdef WITH_PARTITION_STORAGE_ENGINE
LEX *lex= Lex;
- LEX_STRING partition_name={C_STRING_WITH_LEN("partition")};
- if (!plugin_is_ready(&partition_name, MYSQL_STORAGE_ENGINE_PLUGIN))
- {
- my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0),
- "--skip-partition");
- MYSQL_YYABORT;
- }
lex->part_info= new partition_info();
if (!lex->part_info)
{
@@ -3913,14 +4161,27 @@ partitioning:
{
lex->alter_info.flags|= ALTER_PARTITION;
}
+ }
+ partition
+ ;
+
+have_partitioning:
+ /* empty */
+ {
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ LEX_STRING partition_name={C_STRING_WITH_LEN("partition")};
+ if (!plugin_is_ready(&partition_name, MYSQL_STORAGE_ENGINE_PLUGIN))
+ {
+ my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0),
+ "--skip-partition");
+ MYSQL_YYABORT;
+ }
#else
- my_error(ER_FEATURE_DISABLED, MYF(0),
- "partitioning", "--with-partition");
+ my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0),
+ "--skip-partition");
MYSQL_YYABORT;
#endif
-
}
- partition
;
partition_entry:
@@ -3941,25 +4202,28 @@ partition_entry:
;
partition:
- BY part_type_def opt_no_parts opt_sub_part part_defs
+ BY part_type_def opt_num_parts opt_sub_part part_defs
;
part_type_def:
opt_linear KEY_SYM '(' part_field_list ')'
{
- LEX *lex= Lex;
- lex->part_info->list_of_part_fields= TRUE;
- lex->part_info->part_type= HASH_PARTITION;
+ partition_info *part_info= Lex->part_info;
+ part_info->list_of_part_fields= TRUE;
+ part_info->column_list= FALSE;
+ part_info->part_type= HASH_PARTITION;
}
| opt_linear HASH_SYM
{ Lex->part_info->part_type= HASH_PARTITION; }
part_func {}
- | RANGE_SYM
+ | RANGE_SYM part_func
{ Lex->part_info->part_type= RANGE_PARTITION; }
- part_func {}
- | LIST_SYM
+ | RANGE_SYM part_column_list
+ { Lex->part_info->part_type= RANGE_PARTITION; }
+ | LIST_SYM part_func
+ { Lex->part_info->part_type= LIST_PARTITION; }
+ | LIST_SYM part_column_list
{ Lex->part_info->part_type= LIST_PARTITION; }
- part_func {}
;
opt_linear:
@@ -3981,59 +4245,66 @@ part_field_item_list:
part_field_item:
ident
{
- if (Lex->part_info->part_field_list.push_back($1.str))
+ partition_info *part_info= Lex->part_info;
+ part_info->num_columns++;
+ if (part_info->part_field_list.push_back($1.str))
{
mem_alloc_error(1);
MYSQL_YYABORT;
}
+ if (part_info->num_columns > MAX_REF_PARTS)
+ {
+ my_error(ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR, MYF(0),
+ "list of partition fields");
+ MYSQL_YYABORT;
+ }
+ }
+ ;
+
+part_column_list:
+ COLUMNS '(' part_field_list ')'
+ {
+ partition_info *part_info= Lex->part_info;
+ part_info->column_list= TRUE;
+ part_info->list_of_part_fields= TRUE;
}
;
+
part_func:
'(' remember_name part_func_expr remember_end ')'
{
- LEX *lex= Lex;
- uint expr_len= (uint)($4 - $2) - 1;
- lex->part_info->list_of_part_fields= FALSE;
- lex->part_info->part_expr= $3;
- char *func_string= (char*) sql_memdup($2+1, expr_len);
- if (func_string == NULL)
- MYSQL_YYABORT;
- lex->part_info->part_func_string= func_string;
- lex->part_info->part_func_len= expr_len;
+ partition_info *part_info= Lex->part_info;
+ if (part_info->set_part_expr($2+1, $3, $4, FALSE))
+ { MYSQL_YYABORT; }
+ part_info->num_columns= 1;
+ part_info->column_list= FALSE;
}
;
sub_part_func:
'(' remember_name part_func_expr remember_end ')'
{
- LEX *lex= Lex;
- uint expr_len= (uint)($4 - $2) - 1;
- lex->part_info->list_of_subpart_fields= FALSE;
- lex->part_info->subpart_expr= $3;
- char *func_string= (char*) sql_memdup($2+1, expr_len);
- if (func_string == NULL)
- MYSQL_YYABORT;
- lex->part_info->subpart_func_string= func_string;
- lex->part_info->subpart_func_len= expr_len;
+ if (Lex->part_info->set_part_expr($2+1, $3, $4, TRUE))
+ { MYSQL_YYABORT; }
}
;
-opt_no_parts:
+opt_num_parts:
/* empty */ {}
| PARTITIONS_SYM real_ulong_num
{
- uint no_parts= $2;
- LEX *lex= Lex;
- if (no_parts == 0)
+ uint num_parts= $2;
+ partition_info *part_info= Lex->part_info;
+ if (num_parts == 0)
{
my_error(ER_NO_PARTS_ERROR, MYF(0), "partitions");
MYSQL_YYABORT;
}
- lex->part_info->no_parts= no_parts;
- lex->part_info->use_default_no_partitions= FALSE;
+ part_info->num_parts= num_parts;
+ part_info->use_default_num_partitions= FALSE;
}
;
@@ -4041,15 +4312,15 @@ opt_sub_part:
/* empty */ {}
| SUBPARTITION_SYM BY opt_linear HASH_SYM sub_part_func
{ Lex->part_info->subpart_type= HASH_PARTITION; }
- opt_no_subparts {}
+ opt_num_subparts {}
| SUBPARTITION_SYM BY opt_linear KEY_SYM
'(' sub_part_field_list ')'
{
- LEX *lex= Lex;
- lex->part_info->subpart_type= HASH_PARTITION;
- lex->part_info->list_of_subpart_fields= TRUE;
+ partition_info *part_info= Lex->part_info;
+ part_info->subpart_type= HASH_PARTITION;
+ part_info->list_of_subpart_fields= TRUE;
}
- opt_no_subparts {}
+ opt_num_subparts {}
;
sub_part_field_list:
@@ -4060,11 +4331,18 @@ sub_part_field_list:
sub_part_field_item:
ident
{
- if (Lex->part_info->subpart_field_list.push_back($1.str))
+ partition_info *part_info= Lex->part_info;
+ if (part_info->subpart_field_list.push_back($1.str))
{
mem_alloc_error(1);
MYSQL_YYABORT;
}
+ if (part_info->subpart_field_list.elements > MAX_REF_PARTS)
+ {
+ my_error(ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR, MYF(0),
+ "list of subpartition fields");
+ MYSQL_YYABORT;
+ }
}
;
@@ -4084,33 +4362,46 @@ part_func_expr:
}
;
-opt_no_subparts:
+opt_num_subparts:
/* empty */ {}
| SUBPARTITIONS_SYM real_ulong_num
{
- uint no_parts= $2;
+ uint num_parts= $2;
LEX *lex= Lex;
- if (no_parts == 0)
+ if (num_parts == 0)
{
my_error(ER_NO_PARTS_ERROR, MYF(0), "subpartitions");
MYSQL_YYABORT;
}
- lex->part_info->no_subparts= no_parts;
- lex->part_info->use_default_no_subpartitions= FALSE;
+ lex->part_info->num_subparts= num_parts;
+ lex->part_info->use_default_num_subpartitions= FALSE;
}
;
part_defs:
/* empty */
- {}
+ {
+ partition_info *part_info= Lex->part_info;
+ if (part_info->part_type == RANGE_PARTITION)
+ {
+ my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0),
+ "RANGE");
+ MYSQL_YYABORT;
+ }
+ else if (part_info->part_type == LIST_PARTITION)
+ {
+ my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0),
+ "LIST");
+ MYSQL_YYABORT;
+ }
+ }
| '(' part_def_list ')'
{
- LEX *lex= Lex;
- partition_info *part_info= lex->part_info;
+ partition_info *part_info= Lex->part_info;
uint count_curr_parts= part_info->partitions.elements;
- if (part_info->no_parts != 0)
+ if (part_info->num_parts != 0)
{
- if (part_info->no_parts !=
+ if (part_info->num_parts !=
count_curr_parts)
{
my_parse_error(ER(ER_PARTITION_WRONG_NO_PART_ERROR));
@@ -4119,7 +4410,7 @@ part_defs:
}
else if (count_curr_parts > 0)
{
- part_info->no_parts= count_curr_parts;
+ part_info->num_parts= count_curr_parts;
}
part_info->count_curr_subparts= 0;
}
@@ -4133,8 +4424,7 @@ part_def_list:
part_definition:
PARTITION_SYM
{
- LEX *lex= Lex;
- partition_info *part_info= lex->part_info;
+ partition_info *part_info= Lex->part_info;
partition_element *p_elem= new partition_element();
if (!p_elem || part_info->partitions.push_back(p_elem))
@@ -4146,7 +4436,7 @@ part_definition:
part_info->curr_part_elem= p_elem;
part_info->current_partition= p_elem;
part_info->use_default_partitions= FALSE;
- part_info->use_default_no_partitions= FALSE;
+ part_info->use_default_num_partitions= FALSE;
}
part_name
opt_part_values
@@ -4158,8 +4448,7 @@ part_definition:
part_name:
ident
{
- LEX *lex= Lex;
- partition_info *part_info= lex->part_info;
+ partition_info *part_info= Lex->part_info;
partition_element *p_elem= part_info->curr_part_elem;
p_elem->partition_name= $1.str;
}
@@ -4169,15 +4458,16 @@ opt_part_values:
/* empty */
{
LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
if (! lex->is_partition_management())
{
- if (lex->part_info->part_type == RANGE_PARTITION)
+ if (part_info->part_type == RANGE_PARTITION)
{
my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
"RANGE", "LESS THAN");
MYSQL_YYABORT;
}
- if (lex->part_info->part_type == LIST_PARTITION)
+ if (part_info->part_type == LIST_PARTITION)
{
my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
"LIST", "IN");
@@ -4185,14 +4475,15 @@ opt_part_values:
}
}
else
- lex->part_info->part_type= HASH_PARTITION;
+ part_info->part_type= HASH_PARTITION;
}
- | VALUES LESS_SYM THAN_SYM part_func_max
+ | VALUES LESS_SYM THAN_SYM
{
LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
if (! lex->is_partition_management())
{
- if (Lex->part_info->part_type != RANGE_PARTITION)
+ if (part_info->part_type != RANGE_PARTITION)
{
my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
"RANGE", "LESS THAN");
@@ -4200,153 +4491,183 @@ opt_part_values:
}
}
else
- lex->part_info->part_type= RANGE_PARTITION;
+ part_info->part_type= RANGE_PARTITION;
}
- | VALUES IN_SYM '(' part_list_func ')'
+ part_func_max {}
+ | VALUES IN_SYM
{
LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
if (! lex->is_partition_management())
{
- if (Lex->part_info->part_type != LIST_PARTITION)
+ if (part_info->part_type != LIST_PARTITION)
{
my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
- "LIST", "IN");
+ "LIST", "IN");
MYSQL_YYABORT;
}
}
else
- lex->part_info->part_type= LIST_PARTITION;
+ part_info->part_type= LIST_PARTITION;
}
+ part_values_in {}
;
part_func_max:
- max_value_sym
+ MAX_VALUE_SYM
{
- LEX *lex= Lex;
- if (lex->part_info->defined_max_value)
+ partition_info *part_info= Lex->part_info;
+
+ if (part_info->num_columns &&
+ part_info->num_columns != 1U)
{
- my_parse_error(ER(ER_PARTITION_MAXVALUE_ERROR));
+ part_info->print_debug("Kilroy II", NULL);
+ my_parse_error(ER(ER_PARTITION_COLUMN_LIST_ERROR));
MYSQL_YYABORT;
}
- lex->part_info->defined_max_value= TRUE;
- lex->part_info->curr_part_elem->max_value= TRUE;
- lex->part_info->curr_part_elem->range_value= LONGLONG_MAX;
- }
- | part_range_func
- {
- if (Lex->part_info->defined_max_value)
+ else
+ part_info->num_columns= 1U;
+ if (part_info->init_column_part())
{
- my_parse_error(ER(ER_PARTITION_MAXVALUE_ERROR));
MYSQL_YYABORT;
}
- if (Lex->part_info->curr_part_elem->has_null_value)
+ if (part_info->add_max_value())
{
- my_parse_error(ER(ER_NULL_IN_VALUES_LESS_THAN));
MYSQL_YYABORT;
}
}
+ | part_value_item {}
;
-max_value_sym:
- MAX_VALUE_SYM
- | '(' MAX_VALUE_SYM ')'
- ;
-
-part_range_func:
- '(' part_bit_expr ')'
+part_values_in:
+ part_value_item
+ {
+ LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
+ part_info->print_debug("part_values_in: part_value_item", NULL);
+
+ if (part_info->num_columns != 1U)
+ {
+ if (!lex->is_partition_management() ||
+ part_info->num_columns == 0 ||
+ part_info->num_columns > MAX_REF_PARTS)
+ {
+ part_info->print_debug("Kilroy III", NULL);
+ my_parse_error(ER(ER_PARTITION_COLUMN_LIST_ERROR));
+ MYSQL_YYABORT;
+ }
+ /*
+ Reorganize the current large array into a list of small
+ arrays with one entry in each array. This can happen
+ in the first partition of an ALTER TABLE statement where
+ we ADD or REORGANIZE partitions. Also can only happen
+ for LIST partitions.
+ */
+ if (part_info->reorganize_into_single_field_col_val())
+ {
+ MYSQL_YYABORT;
+ }
+ }
+ }
+ | '(' part_value_list ')'
{
partition_info *part_info= Lex->part_info;
- if (!($2->unsigned_flag))
- part_info->curr_part_elem->signed_flag= TRUE;
- part_info->curr_part_elem->range_value= $2->value;
+ if (part_info->num_columns < 2U)
+ {
+ my_parse_error(ER(ER_ROW_SINGLE_PARTITION_FIELD_ERROR));
+ MYSQL_YYABORT;
+ }
}
;
-part_list_func:
- part_list_item {}
- | part_list_func ',' part_list_item {}
+part_value_list:
+ part_value_item {}
+ | part_value_list ',' part_value_item {}
;
-part_list_item:
- part_bit_expr
+part_value_item:
+ '('
{
- part_elem_value *value_ptr= $1;
partition_info *part_info= Lex->part_info;
- if (!value_ptr->unsigned_flag)
- part_info->curr_part_elem->signed_flag= TRUE;
- if (!value_ptr->null_value &&
- part_info->curr_part_elem->
- list_val_list.push_back(value_ptr))
+ part_info->print_debug("( part_value_item", NULL);
+ /* Initialisation code needed for each list of value expressions */
+ if (!(part_info->part_type == LIST_PARTITION &&
+ part_info->num_columns == 1U) &&
+ part_info->init_column_part())
{
- mem_alloc_error(sizeof(part_elem_value));
MYSQL_YYABORT;
}
}
- ;
-
-part_bit_expr:
- bit_expr
+ part_value_item_list {}
+ ')'
{
- Item *part_expr= $1;
- THD *thd= YYTHD;
- LEX *lex= thd->lex;
- Name_resolution_context *context= &lex->current_select->context;
- TABLE_LIST *save_list= context->table_list;
- const char *save_where= thd->where;
-
- context->table_list= 0;
- thd->where= "partition function";
-
- part_elem_value *value_ptr=
- (part_elem_value*)sql_alloc(sizeof(part_elem_value));
- if (!value_ptr)
+ LEX *lex= Lex;
+ partition_info *part_info= Lex->part_info;
+ part_info->print_debug(") part_value_item", NULL);
+ if (part_info->num_columns == 0)
+ part_info->num_columns= part_info->curr_list_object;
+ if (part_info->num_columns != part_info->curr_list_object)
{
- mem_alloc_error(sizeof(part_elem_value));
+ /*
+ All value items lists must be of equal length, in some cases
+ which is covered by the above if-statement we don't know yet
+ how many columns is in the partition so the assignment above
+ ensures that we only report errors when we know we have an
+ error.
+ */
+ part_info->print_debug("Kilroy I", NULL);
+ my_parse_error(ER(ER_PARTITION_COLUMN_LIST_ERROR));
MYSQL_YYABORT;
}
- if (part_expr->walk(&Item::check_partition_func_processor, 0,
- NULL))
+ part_info->curr_list_object= 0;
+ }
+ ;
+
+part_value_item_list:
+ part_value_expr_item {}
+ | part_value_item_list ',' part_value_expr_item {}
+ ;
+
+part_value_expr_item:
+ MAX_VALUE_SYM
+ {
+ partition_info *part_info= Lex->part_info;
+ part_column_list_val *col_val;
+ if (part_info->part_type == LIST_PARTITION)
{
- my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
+ my_parse_error(ER(ER_MAXVALUE_IN_VALUES_IN));
MYSQL_YYABORT;
}
- if (part_expr->fix_fields(YYTHD, (Item**)0) ||
- ((context->table_list= save_list), FALSE) ||
- (!part_expr->const_item()) ||
- (!lex->safe_to_cache_query))
+ if (part_info->add_max_value())
{
- my_error(ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR, MYF(0));
MYSQL_YYABORT;
}
- thd->where= save_where;
- value_ptr->value= part_expr->val_int();
- value_ptr->unsigned_flag= TRUE;
- if (!part_expr->unsigned_flag &&
- value_ptr->value < 0)
- value_ptr->unsigned_flag= FALSE;
- if ((value_ptr->null_value= part_expr->null_value))
+ }
+ | bit_expr
+ {
+ LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
+ Item *part_expr= $1;
+
+ if (!lex->safe_to_cache_query)
{
- if (Lex->part_info->curr_part_elem->has_null_value)
- {
- my_error(ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR, MYF(0));
- MYSQL_YYABORT;
- }
- Lex->part_info->curr_part_elem->has_null_value= TRUE;
+ my_error(ER_PARTITION_FUNCTION_IS_NOT_ALLOWED, MYF(0));
+ MYSQL_YYABORT;
}
- else if (part_expr->result_type() != INT_RESULT)
+ if (part_info->add_column_list_value(YYTHD, part_expr))
{
- my_parse_error(ER(ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR));
MYSQL_YYABORT;
}
- $$= value_ptr;
}
;
+
opt_sub_partition:
/* empty */
{
- if (Lex->part_info->no_subparts != 0 &&
- !Lex->part_info->use_default_subpartitions)
+ partition_info *part_info= Lex->part_info;
+ if (part_info->num_subparts != 0 &&
+ !part_info->use_default_subpartitions)
{
/*
We come here when we have defined subpartitions on the first
@@ -4358,11 +4679,10 @@ opt_sub_partition:
}
| '(' sub_part_list ')'
{
- LEX *lex= Lex;
- partition_info *part_info= lex->part_info;
- if (part_info->no_subparts != 0)
+ partition_info *part_info= Lex->part_info;
+ if (part_info->num_subparts != 0)
{
- if (part_info->no_subparts !=
+ if (part_info->num_subparts !=
part_info->count_curr_subparts)
{
my_parse_error(ER(ER_PARTITION_WRONG_NO_SUBPART_ERROR));
@@ -4376,7 +4696,7 @@ opt_sub_partition:
my_parse_error(ER(ER_PARTITION_WRONG_NO_SUBPART_ERROR));
MYSQL_YYABORT;
}
- part_info->no_subparts= part_info->count_curr_subparts;
+ part_info->num_subparts= part_info->count_curr_subparts;
}
part_info->count_curr_subparts= 0;
}
@@ -4390,8 +4710,7 @@ sub_part_list:
sub_part_definition:
SUBPARTITION_SYM
{
- LEX *lex= Lex;
- partition_info *part_info= lex->part_info;
+ partition_info *part_info= Lex->part_info;
partition_element *curr_part= part_info->current_partition;
partition_element *sub_p_elem= new partition_element(curr_part);
if (part_info->use_default_subpartitions &&
@@ -4419,7 +4738,7 @@ sub_part_definition:
}
part_info->curr_part_elem= sub_p_elem;
part_info->use_default_subpartitions= FALSE;
- part_info->use_default_no_subpartitions= FALSE;
+ part_info->use_default_num_subpartitions= FALSE;
part_info->count_curr_subparts++;
}
sub_name opt_part_options {}
@@ -4445,9 +4764,9 @@ opt_part_option:
{ Lex->part_info->curr_part_elem->tablespace_name= $3.str; }
| opt_storage ENGINE_SYM opt_equal storage_engines
{
- LEX *lex= Lex;
- lex->part_info->curr_part_elem->engine_type= $4;
- lex->part_info->default_engine_type= $4;
+ partition_info *part_info= Lex->part_info;
+ part_info->curr_part_elem->engine_type= $4;
+ part_info->default_engine_type= $4;
}
| NODEGROUP_SYM opt_equal real_ulong_num
{ Lex->part_info->curr_part_elem->nodegroup_id= (uint16) $3; }
@@ -4825,8 +5144,7 @@ key_def:
'(' key_list ')' key_options
{
LEX *lex=Lex;
- const char *key_name= $3 ? $3 : $1;
- Key *key= new Key($2, key_name, &lex->key_create_info, 0,
+ Key *key= new Key($2, $3.str ? $3 : $1, &lex->key_create_info, 0,
lex->col_list);
if (key == NULL)
MYSQL_YYABORT;
@@ -4836,9 +5154,7 @@ key_def:
| opt_constraint FOREIGN KEY_SYM opt_ident '(' key_list ')' references
{
LEX *lex=Lex;
- const char *key_name= $1 ? $1 : $4;
- const char *fkey_name = $4 ? $4 : key_name;
- Key *key= new Foreign_key(fkey_name, lex->col_list,
+ Key *key= new Foreign_key($4.str ? $4 : $1, lex->col_list,
$8,
lex->ref_list,
lex->fk_delete_opt,
@@ -4847,7 +5163,7 @@ key_def:
if (key == NULL)
MYSQL_YYABORT;
lex->alter_info.key_list.push_back(key);
- key= new Key(Key::MULTIPLE, key_name,
+ key= new Key(Key::MULTIPLE, $1.str ? $1 : $4,
&default_key_create_info, 1,
lex->col_list);
if (key == NULL)
@@ -4877,7 +5193,7 @@ check_constraint:
;
opt_constraint:
- /* empty */ { $$=(char*) 0; }
+ /* empty */ { $$= null_lex_str; }
| constraint { $$= $1; }
;
@@ -5168,6 +5484,7 @@ field_length:
opt_field_length:
/* empty */ { Lex->length=(char*) 0; /* use default length */ }
| field_length { }
+ ;
opt_precision:
/* empty */ {}
@@ -5381,14 +5698,14 @@ opt_ref_list:
ref_list:
ref_list ',' ident
{
- Key_part_spec *key= new Key_part_spec($3.str);
+ Key_part_spec *key= new Key_part_spec($3, 0);
if (key == NULL)
MYSQL_YYABORT;
Lex->ref_list.push_back(key);
}
| ident
{
- Key_part_spec *key= new Key_part_spec($1.str);
+ Key_part_spec *key= new Key_part_spec($1, 0);
if (key == NULL)
MYSQL_YYABORT;
Lex->ref_list.push_back(key);
@@ -5535,7 +5852,7 @@ key_list:
key_part:
ident
{
- $$= new Key_part_spec($1.str);
+ $$= new Key_part_spec($1, 0);
if ($$ == NULL)
MYSQL_YYABORT;
}
@@ -5546,15 +5863,15 @@ key_part:
{
my_error(ER_KEY_PART_0, MYF(0), $1.str);
}
- $$= new Key_part_spec($1.str,(uint) key_part_len);
+ $$= new Key_part_spec($1, (uint) key_part_len);
if ($$ == NULL)
MYSQL_YYABORT;
}
;
opt_ident:
- /* empty */ { $$=(char*) 0; /* Default length */ }
- | field_ident { $$=$1.str; }
+ /* empty */ { $$= null_lex_str; }
+ | field_ident { $$= $1; }
;
opt_component:
@@ -5582,7 +5899,6 @@ alter:
if (!lex->select_lex.add_table_to_list(thd, $4, NULL,
TL_OPTION_UPDATING))
MYSQL_YYABORT;
- lex->alter_info.reset();
lex->col_list.empty();
lex->select_lex.init_order();
lex->select_lex.db=
@@ -5816,7 +6132,7 @@ alter_commands:
all_or_alt_part_name_list
{
LEX *lex= Lex;
- lex->sql_command = SQLCOM_OPTIMIZE;
+ lex->sql_command= SQLCOM_OPTIMIZE;
lex->alter_info.flags|= ALTER_ADMIN_PARTITION;
lex->no_write_to_binlog= $3;
lex->check_opt.init();
@@ -5826,7 +6142,7 @@ alter_commands:
all_or_alt_part_name_list
{
LEX *lex= Lex;
- lex->sql_command = SQLCOM_ANALYZE;
+ lex->sql_command= SQLCOM_ANALYZE;
lex->alter_info.flags|= ALTER_ADMIN_PARTITION;
lex->no_write_to_binlog= $3;
lex->check_opt.init();
@@ -5834,7 +6150,7 @@ alter_commands:
| CHECK_SYM PARTITION_SYM all_or_alt_part_name_list
{
LEX *lex= Lex;
- lex->sql_command = SQLCOM_CHECK;
+ lex->sql_command= SQLCOM_CHECK;
lex->alter_info.flags|= ALTER_ADMIN_PARTITION;
lex->check_opt.init();
}
@@ -5843,7 +6159,7 @@ alter_commands:
all_or_alt_part_name_list
{
LEX *lex= Lex;
- lex->sql_command = SQLCOM_REPAIR;
+ lex->sql_command= SQLCOM_REPAIR;
lex->alter_info.flags|= ALTER_ADMIN_PARTITION;
lex->no_write_to_binlog= $3;
lex->check_opt.init();
@@ -5854,7 +6170,14 @@ alter_commands:
LEX *lex= Lex;
lex->alter_info.flags|= ALTER_COALESCE_PARTITION;
lex->no_write_to_binlog= $3;
- lex->alter_info.no_parts= $4;
+ lex->alter_info.num_parts= $4;
+ }
+ | TRUNCATE_SYM PARTITION_SYM all_or_alt_part_name_list
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_TRUNCATE;
+ lex->alter_info.flags|= ALTER_ADMIN_PARTITION;
+ lex->check_opt.init();
}
| reorg_partition_rule
;
@@ -5896,12 +6219,11 @@ add_part_extra:
| '(' part_def_list ')'
{
LEX *lex= Lex;
- lex->part_info->no_parts= lex->part_info->partitions.elements;
+ lex->part_info->num_parts= lex->part_info->partitions.elements;
}
| PARTITIONS_SYM real_ulong_num
{
- LEX *lex= Lex;
- lex->part_info->no_parts= $2;
+ Lex->part_info->num_parts= $2;
}
;
@@ -5931,8 +6253,8 @@ reorg_parts_rule:
}
INTO '(' part_def_list ')'
{
- LEX *lex= Lex;
- lex->part_info->no_parts= lex->part_info->partitions.elements;
+ partition_info *part_info= Lex->part_info;
+ part_info->num_parts= part_info->partitions.elements;
}
;
@@ -6441,14 +6763,23 @@ table_to_table:
;
keycache:
- CACHE_SYM INDEX_SYM keycache_list IN_SYM key_cache_name
+ CACHE_SYM INDEX_SYM
+ {
+ Lex->alter_info.reset();
+ }
+ keycache_list_or_parts IN_SYM key_cache_name
{
LEX *lex=Lex;
lex->sql_command= SQLCOM_ASSIGN_TO_KEYCACHE;
- lex->ident= $5;
+ lex->ident= $6;
}
;
+keycache_list_or_parts:
+ keycache_list
+ | assign_to_keycache_parts
+ ;
+
keycache_list:
assign_to_keycache
| keycache_list ',' assign_to_keycache
@@ -6463,6 +6794,15 @@ assign_to_keycache:
}
;
+assign_to_keycache_parts:
+ table_ident adm_partition cache_keys_spec
+ {
+ if (!Select->add_table_to_list(YYTHD, $1, NULL, 0, TL_READ,
+ Select->pop_index_hints()))
+ MYSQL_YYABORT;
+ }
+ ;
+
key_cache_name:
ident { $$= $1; }
| DEFAULT { $$ = default_key_cache_base; }
@@ -6473,11 +6813,17 @@ preload:
{
LEX *lex=Lex;
lex->sql_command=SQLCOM_PRELOAD_KEYS;
+ lex->alter_info.reset();
}
- preload_list
+ preload_list_or_parts
{}
;
+preload_list_or_parts:
+ preload_keys_parts
+ | preload_list
+ ;
+
preload_list:
preload_keys
| preload_list ',' preload_keys
@@ -6492,6 +6838,23 @@ preload_keys:
}
;
+preload_keys_parts:
+ table_ident adm_partition cache_keys_spec opt_ignore_leaves
+ {
+ if (!Select->add_table_to_list(YYTHD, $1, NULL, $4, TL_READ,
+ Select->pop_index_hints()))
+ MYSQL_YYABORT;
+ }
+ ;
+
+adm_partition:
+ PARTITION_SYM have_partitioning
+ {
+ Lex->alter_info.flags|= ALTER_ADMIN_PARTITION;
+ }
+ '(' all_or_alt_part_name_list ')'
+ ;
+
cache_keys_spec:
{
Lex->select_lex.alloc_index_hints(YYTHD);
@@ -6555,7 +6918,7 @@ select_paren:
sel->olap != UNSPECIFIED_OLAP_TYPE &&
sel->master_unit()->fake_select_lex)
{
- my_error(ER_WRONG_USAGE, MYF(0),
+ my_error(ER_WRONG_USAGE, MYF(0),
"CUBE/ROLLUP", "ORDER BY");
MYSQL_YYABORT;
}
@@ -8088,13 +8451,13 @@ udf_expr:
sum_expr:
AVG_SYM '(' in_sum_expr ')'
{
- $$= new (YYTHD->mem_root) Item_sum_avg($3);
+ $$= new (YYTHD->mem_root) Item_sum_avg($3, FALSE);
if ($$ == NULL)
MYSQL_YYABORT;
}
| AVG_SYM '(' DISTINCT in_sum_expr ')'
{
- $$= new (YYTHD->mem_root) Item_sum_avg_distinct($4);
+ $$= new (YYTHD->mem_root) Item_sum_avg($4, TRUE);
if ($$ == NULL)
MYSQL_YYABORT;
}
@@ -8137,7 +8500,7 @@ sum_expr:
{ Select->in_sum_expr--; }
')'
{
- $$= new (YYTHD->mem_root) Item_sum_count_distinct(* $5);
+ $$= new (YYTHD->mem_root) Item_sum_count(* $5);
if ($$ == NULL)
MYSQL_YYABORT;
}
@@ -8196,13 +8559,13 @@ sum_expr:
}
| SUM_SYM '(' in_sum_expr ')'
{
- $$= new (YYTHD->mem_root) Item_sum_sum($3);
+ $$= new (YYTHD->mem_root) Item_sum_sum($3, FALSE);
if ($$ == NULL)
MYSQL_YYABORT;
}
| SUM_SYM '(' DISTINCT in_sum_expr ')'
{
- $$= new (YYTHD->mem_root) Item_sum_sum_distinct($4);
+ $$= new (YYTHD->mem_root) Item_sum_sum($4, TRUE);
if ($$ == NULL)
MYSQL_YYABORT;
}
@@ -8864,24 +9227,25 @@ interval:
;
interval_time_stamp:
- interval_time_st {}
- | FRAC_SECOND_SYM {
- $$=INTERVAL_MICROSECOND;
- /*
- FRAC_SECOND was mistakenly implemented with
- a wrong resolution. According to the ODBC
- standard it should be nanoseconds, not
- microseconds. Changing it to nanoseconds
- in MySQL would mean making TIMESTAMPDIFF
- and TIMESTAMPADD to return DECIMAL, since
- the return value would be too big for BIGINT
- Hence we just deprecate the incorrect
- implementation without changing its
- resolution.
- */
- WARN_DEPRECATED(yythd, "6.2", "FRAC_SECOND", "MICROSECOND");
- }
- ;
+ interval_time_st {}
+ | FRAC_SECOND_SYM
+ {
+ $$=INTERVAL_MICROSECOND;
+ /*
+ FRAC_SECOND was mistakenly implemented with
+ a wrong resolution. According to the ODBC
+ standard it should be nanoseconds, not
+ microseconds. Changing it to nanoseconds
+ in MySQL would mean making TIMESTAMPDIFF
+ and TIMESTAMPADD to return DECIMAL, since
+ the return value would be too big for BIGINT
+ Hence we just deprecate the incorrect
+ implementation without changing its
+ resolution.
+ */
+ WARN_DEPRECATED(yythd, "6.2", "FRAC_SECOND", "MICROSECOND");
+ }
+ ;
interval_time_st:
DAY_SYM { $$=INTERVAL_DAY; }
@@ -9917,6 +10281,7 @@ truncate:
{
LEX* lex= Lex;
lex->sql_command= SQLCOM_TRUNCATE;
+ lex->alter_info.reset();
lex->select_lex.options= 0;
lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE_UNSPECIFIED;
lex->select_lex.init_order();
@@ -10104,6 +10469,11 @@ show_param:
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_BINLOG_EVENTS;
} opt_limit_clause_init
+ | RELAYLOG_SYM EVENTS_SYM binlog_in binlog_from
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_RELAYLOG_EVENTS;
+ } opt_limit_clause_init
| keys_or_index from_or_in table_ident opt_db where_clause
{
LEX *lex= Lex;
@@ -10113,11 +10483,6 @@ show_param:
if (prepare_schema_table(YYTHD, lex, $3, SCH_STATISTICS))
MYSQL_YYABORT;
}
- | COLUMN_SYM TYPES_SYM
- {
- LEX *lex=Lex;
- lex->sql_command= SQLCOM_SHOW_COLUMN_TYPES;
- }
| TABLE_SYM TYPES_SYM
{
LEX *lex=Lex;
@@ -10305,23 +10670,13 @@ show_param:
}
| PROCEDURE CODE_SYM sp_name
{
-#ifdef DBUG_OFF
- my_parse_error(ER(ER_SYNTAX_ERROR));
- MYSQL_YYABORT;
-#else
Lex->sql_command= SQLCOM_SHOW_PROC_CODE;
Lex->spname= $3;
-#endif
}
| FUNCTION_SYM CODE_SYM sp_name
{
-#ifdef DBUG_OFF
- my_parse_error(ER(ER_SYNTAX_ERROR));
- MYSQL_YYABORT;
-#else
Lex->sql_command= SQLCOM_SHOW_FUNC_CODE;
Lex->spname= $3;
-#endif
}
| CREATE EVENT_SYM sp_name
{
@@ -10572,51 +10927,33 @@ use:
/* import, export of files */
load:
- LOAD DATA_SYM
+ LOAD data_or_xml
{
THD *thd= YYTHD;
LEX *lex= thd->lex;
if (lex->sphead)
{
- my_error(ER_SP_BADSTATEMENT, MYF(0), "LOAD DATA");
+ my_error(ER_SP_BADSTATEMENT, MYF(0),
+ $2 == FILETYPE_CSV ? "LOAD DATA" : "LOAD XML");
MYSQL_YYABORT;
}
}
- load_data
- {}
- | LOAD TABLE_SYM table_ident FROM MASTER_SYM
- {
- LEX *lex=Lex;
- WARN_DEPRECATED(yythd, "6.0", "LOAD TABLE FROM MASTER",
- "MySQL Administrator (mysqldump, mysql)");
- if (lex->sphead)
- {
- my_error(ER_SP_BADSTATEMENT, MYF(0), "LOAD TABLE");
- MYSQL_YYABORT;
- }
- lex->sql_command = SQLCOM_LOAD_MASTER_TABLE;
- if (!Select->add_table_to_list(YYTHD, $3, NULL, TL_OPTION_UPDATING))
- MYSQL_YYABORT;
- }
- ;
-
-load_data:
load_data_lock opt_local INFILE TEXT_STRING_filesystem
{
LEX *lex=Lex;
lex->sql_command= SQLCOM_LOAD;
- lex->lock_option= $1;
- lex->local_file= $2;
+ lex->lock_option= $4;
+ lex->local_file= $5;
lex->duplicates= DUP_ERROR;
lex->ignore= 0;
- if (!(lex->exchange= new sql_exchange($4.str, 0)))
+ if (!(lex->exchange= new sql_exchange($7.str, 0, $2)))
MYSQL_YYABORT;
}
opt_duplicate INTO TABLE_SYM table_ident
{
LEX *lex=Lex;
- if (!Select->add_table_to_list(YYTHD, $9, NULL, TL_OPTION_UPDATING,
+ if (!Select->add_table_to_list(YYTHD, $12, NULL, TL_OPTION_UPDATING,
lex->lock_option))
MYSQL_YYABORT;
lex->field_list.empty();
@@ -10624,12 +10961,35 @@ load_data:
lex->value_list.empty();
}
opt_load_data_charset
- { Lex->exchange->cs= $11; }
+ { Lex->exchange->cs= $14; }
+ opt_xml_rows_identified_by
opt_field_term opt_line_term opt_ignore_lines opt_field_or_var_spec
opt_load_data_set_spec
{}
- | FROM MASTER_SYM
+ | LOAD TABLE_SYM table_ident FROM MASTER_SYM
+ {
+ LEX *lex=Lex;
+ WARN_DEPRECATED(yythd, "6.0", "LOAD TABLE FROM MASTER",
+ "MySQL Administrator (mysqldump, mysql)");
+ if (lex->sphead)
+ {
+ my_error(ER_SP_BADSTATEMENT, MYF(0), "LOAD TABLE");
+ MYSQL_YYABORT;
+ }
+ lex->sql_command = SQLCOM_LOAD_MASTER_TABLE;
+ if (!Select->add_table_to_list(YYTHD, $3, NULL, TL_OPTION_UPDATING))
+ MYSQL_YYABORT;
+ }
+ | LOAD DATA_SYM FROM MASTER_SYM
{
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
+
+ if (lex->sphead)
+ {
+ my_error(ER_SP_BADSTATEMENT, MYF(0), "LOAD DATA");
+ MYSQL_YYABORT;
+ }
Lex->sql_command = SQLCOM_LOAD_MASTER_DATA;
WARN_DEPRECATED(yythd, "6.0", "LOAD DATA FROM MASTER",
"mysqldump or future "
@@ -10637,6 +10997,11 @@ load_data:
}
;
+data_or_xml:
+ DATA_SYM { $$= FILETYPE_CSV; }
+ | XML_SYM { $$= FILETYPE_XML; }
+ ;
+
opt_local:
/* empty */ { $$=0;}
| LOCAL_SYM { $$=1;}
@@ -10723,15 +11088,26 @@ line_term:
}
;
+opt_xml_rows_identified_by:
+ /* empty */ { }
+ | ROWS_SYM IDENTIFIED_SYM BY text_string
+ { Lex->exchange->line_term = $4; };
+
opt_ignore_lines:
/* empty */
- | IGNORE_SYM NUM LINES
+ | IGNORE_SYM NUM lines_or_rows
{
DBUG_ASSERT(Lex->exchange != 0);
Lex->exchange->skip_lines= atol($2.str);
}
;
+lines_or_rows:
+ LINES { }
+
+ | ROWS_SYM { }
+ ;
+
opt_field_or_var_spec:
/* empty */ {}
| '(' fields_or_vars ')' {}
@@ -11360,8 +11736,9 @@ IDENT_sys:
$1.length, &dummy_error);
if (wlen < $1.length)
{
+ ErrConvString err($1.str, $1.length, &my_charset_bin);
my_error(ER_INVALID_CHARACTER_STRING, MYF(0),
- cs->csname, $1.str + wlen);
+ cs->csname, err.ptr());
MYSQL_YYABORT;
}
$$= $1;
@@ -11576,13 +11953,16 @@ keyword_sp:
| BOOLEAN_SYM {}
| BTREE_SYM {}
| CASCADED {}
+ | CATALOG_NAME_SYM {}
| CHAIN_SYM {}
| CHANGED {}
| CIPHER_SYM {}
| CLIENT_SYM {}
+ | CLASS_ORIGIN_SYM {}
| COALESCE {}
| CODE_SYM {}
| COLLATION_SYM {}
+ | COLUMN_NAME_SYM {}
| COLUMNS {}
| COMMITTED_SYM {}
| COMPACT_SYM {}
@@ -11591,10 +11971,14 @@ keyword_sp:
| CONCURRENT {}
| CONNECTION_SYM {}
| CONSISTENT_SYM {}
+ | CONSTRAINT_CATALOG_SYM {}
+ | CONSTRAINT_SCHEMA_SYM {}
+ | CONSTRAINT_NAME_SYM {}
| CONTEXT_SYM {}
| CONTRIBUTORS_SYM {}
| CPU_SYM {}
| CUBE_SYM {}
+ | CURSOR_NAME_SYM {}
| DATA_SYM {}
| DATAFILE_SYM {}
| DATETIME {}
@@ -11682,10 +12066,10 @@ keyword_sp:
| MAX_SIZE_SYM {}
| MAX_UPDATES_PER_HOUR {}
| MAX_USER_CONNECTIONS_SYM {}
- | MAX_VALUE_SYM {}
| MEDIUM_SYM {}
| MEMORY_SYM {}
| MERGE_SYM {}
+ | MESSAGE_TEXT_SYM {}
| MICROSECOND_SYM {}
| MIGRATE_SYM {}
| MINUTE_SYM {}
@@ -11697,6 +12081,7 @@ keyword_sp:
| MULTIPOINT {}
| MULTIPOLYGON {}
| MUTEX_SYM {}
+ | MYSQL_ERRNO_SYM {}
| NAME_SYM {}
| NAMES_SYM {}
| NATIONAL_SYM {}
@@ -11739,6 +12124,7 @@ keyword_sp:
| REDO_BUFFER_SIZE_SYM {}
| REDOFILE_SYM {}
| REDUNDANT_SYM {}
+ | RELAYLOG_SYM {}
| RELAY_LOG_FILE_SYM {}
| RELAY_LOG_POS_SYM {}
| RELAY_THREAD {}
@@ -11756,6 +12142,7 @@ keyword_sp:
| ROW_SYM {}
| RTREE_SYM {}
| SCHEDULE_SYM {}
+ | SCHEMA_NAME_SYM {}
| SECOND_SYM {}
| SERIAL_SYM {}
| SERIALIZABLE_SYM {}
@@ -11774,6 +12161,7 @@ keyword_sp:
| STATUS_SYM {}
| STORAGE_SYM {}
| STRING_SYM {}
+ | SUBCLASS_ORIGIN_SYM {}
| SUBDATE_SYM {}
| SUBJECT_SYM {}
| SUBPARTITION_SYM {}
@@ -11782,6 +12170,7 @@ keyword_sp:
| SUSPEND_SYM {}
| SWAPS_SYM {}
| SWITCHES_SYM {}
+ | TABLE_NAME_SYM {}
| TABLES {}
| TABLE_CHECKSUM_SYM {}
| TABLESPACE {}
@@ -11815,6 +12204,7 @@ keyword_sp:
| WEEK_SYM {}
| WORK_SYM {}
| X509_SYM {}
+ | XML_SYM {}
| YEAR_SYM {}
;
@@ -12562,6 +12952,7 @@ object_privilege:
| CREATE USER { Lex->grant |= CREATE_USER_ACL; }
| EVENT_SYM { Lex->grant |= EVENT_ACL;}
| TRIGGER_SYM { Lex->grant |= TRIGGER_ACL; }
+ | CREATE TABLESPACE { Lex->grant |= CREATE_TABLESPACE_ACL; }
;
opt_and:
diff --git a/sql/structs.h b/sql/structs.h
index 2546d241059..33b7148c4b3 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -1,3 +1,6 @@
+#ifndef STRUCTS_INCLUDED
+#define STRUCTS_INCLUDED
+
/* Copyright (C) 2000-2006 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -16,8 +19,9 @@
/* The old structures from unireg */
-struct st_table;
+struct TABLE;
class Field;
+class THD;
typedef struct st_date_time_format {
uchar positions[8];
@@ -97,7 +101,7 @@ typedef struct st_key {
union {
int bdb_return_if_eq;
} handler;
- struct st_table *table;
+ TABLE *table;
} KEY;
@@ -115,36 +119,6 @@ typedef struct st_reginfo { /* Extra info about reg */
} REGINFO;
-class SQL_SELECT;
-class THD;
-class handler;
-struct st_join_table;
-
-void rr_unlock_row(st_join_table *tab);
-
-struct READ_RECORD { /* Parameter to read_record */
- typedef int (*Read_func)(READ_RECORD*);
- typedef void (*Unlock_row_func)(st_join_table *);
- struct st_table *table; /* Head-form */
- handler *file;
- struct st_table **forms; /* head and ref forms */
-
- Read_func read_record;
- Unlock_row_func unlock_row;
- THD *thd;
- SQL_SELECT *select;
- uint cache_records;
- uint ref_length,struct_length,reclength,rec_cache_size,error_offset;
- uint index;
- uchar *ref_pos; /* pointer to form->refpos */
- uchar *record;
- uchar *rec_buf; /* to read field values after filesort */
- uchar *cache,*cache_pos,*cache_end,*read_positions;
- IO_CACHE *io_cache;
- bool print_error, ignore_not_found_rows;
-};
-
-
/*
Originally MySQL used MYSQL_TIME structure inside server only, but since
4.1 it's exported to user in the new client API. Define aliases for
@@ -386,3 +360,5 @@ public:
Discrete_interval* get_tail() const { return tail; };
Discrete_interval* get_current() const { return current; };
};
+
+#endif /* STRUCTS_INCLUDED */
diff --git a/sql/table.cc b/sql/table.cc
index 0ca81294458..a1a422028d4 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -423,7 +423,7 @@ void free_table_share(TABLE_SHARE *share)
pthread_mutex_destroy(&share->mutex);
pthread_cond_destroy(&share->cond);
}
- hash_free(&share->name_hash);
+ my_hash_free(&share->name_hash);
plugin_unlock(NULL, share->db_plugin);
share->db_plugin= NULL;
@@ -725,7 +725,8 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
{
share->avg_row_length= uint4korr(head+34);
share->row_type= (row_type) head[40];
- share->table_charset= get_charset((uint) head[38],MYF(0));
+ share->table_charset= get_charset((((uint) head[41]) << 8) +
+ (uint) head[38],MYF(0));
share->null_field_first= 1;
}
if (!share->table_charset)
@@ -1147,10 +1148,10 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
use_hash= share->fields >= MAX_FIELDS_BEFORE_HASH;
if (use_hash)
- use_hash= !hash_init(&share->name_hash,
- system_charset_info,
- share->fields,0,0,
- (hash_get_key) get_field_name,0,0);
+ use_hash= !my_hash_init(&share->name_hash,
+ system_charset_info,
+ share->fields,0,0,
+ (my_hash_get_key) get_field_name,0,0);
for (i=0 ; i < share->fields; i++, strpos+=field_pack_length, field_ptr++)
{
@@ -1184,12 +1185,13 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
}
else
{
- if (!strpos[14])
+ uint csid= strpos[14] + (((uint) strpos[11]) << 8);
+ if (!csid)
charset= &my_charset_bin;
- else if (!(charset=get_charset((uint) strpos[14], MYF(0))))
+ else if (!(charset= get_charset(csid, MYF(0))))
{
error= 5; // Unknown or unavailable charset
- errarg= (int) strpos[14];
+ errarg= (int) csid;
goto err;
}
}
@@ -1262,7 +1264,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
"Please do \"ALTER TABLE '%s' FORCE\" to fix it!",
share->fieldnames.type_names[i], share->table_name.str,
share->table_name.str);
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_CRASHED_ON_USAGE,
"Found incompatible DECIMAL field '%s' in %s; "
"Please do \"ALTER TABLE '%s' FORCE\" to fix it!",
@@ -1472,7 +1474,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
"Please do \"ALTER TABLE '%s' FORCE \" to fix it!",
share->table_name.str,
share->table_name.str);
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_CRASHED_ON_USAGE,
"Found wrong key definition in %s; "
"Please do \"ALTER TABLE '%s' FORCE\" to fix "
@@ -1597,7 +1599,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
delete handler_file;
#ifndef DBUG_OFF
if (use_hash)
- (void) hash_check(&share->name_hash);
+ (void) my_hash_check(&share->name_hash);
#endif
DBUG_RETURN (0);
@@ -1608,7 +1610,9 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
x_free((uchar*) disk_buff);
delete crypted;
delete handler_file;
- hash_free(&share->name_hash);
+ my_hash_free(&share->name_hash);
+ if (share->ha_data_destroy)
+ share->ha_data_destroy(share->ha_data);
open_table_error(share, error, share->open_errno, errarg);
DBUG_RETURN(error);
@@ -2465,8 +2469,7 @@ File create_frm(THD *thd, const char *name, const char *db,
if ((file= my_create(name, CREATE_MODE, create_flags, MYF(0))) >= 0)
{
- uint key_length, tmp_key_length;
- uint tmp;
+ uint key_length, tmp_key_length, tmp, csid;
bzero((char*) fileinfo,64);
/* header */
fileinfo[0]=(uchar) 254;
@@ -2506,8 +2509,9 @@ File create_frm(THD *thd, const char *name, const char *db,
fileinfo[32]=0; // No filename anymore
fileinfo[33]=5; // Mark for 5.0 frm file
int4store(fileinfo+34,create_info->avg_row_length);
- fileinfo[38]= (create_info->default_table_charset ?
- create_info->default_table_charset->number : 0);
+ csid= (create_info->default_table_charset ?
+ create_info->default_table_charset->number : 0);
+ fileinfo[38]= (uchar) csid;
/*
In future versions, we will store in fileinfo[39] the values of the
TRANSACTIONAL and PAGE_CHECKSUM clauses of CREATE TABLE.
@@ -2515,7 +2519,7 @@ File create_frm(THD *thd, const char *name, const char *db,
fileinfo[39]= 0;
fileinfo[40]= (uchar) create_info->row_type;
/* Next few bytes where for RAID support */
- fileinfo[41]= 0;
+ fileinfo[41]= (uchar) (csid >> 8);
fileinfo[42]= 0;
fileinfo[43]= 0;
fileinfo[44]= 0;
@@ -2947,7 +2951,7 @@ Table_check_intact::check(TABLE *table, const TABLE_FIELD_DEF *table_def)
Create Item_field for each column in the table.
SYNPOSIS
- st_table::fill_item_list()
+ TABLE::fill_item_list()
item_list a pointer to an empty list used to store items
DESCRIPTION
@@ -2960,7 +2964,7 @@ Table_check_intact::check(TABLE *table, const TABLE_FIELD_DEF *table_def)
1 out of memory
*/
-bool st_table::fill_item_list(List<Item> *item_list) const
+bool TABLE::fill_item_list(List<Item> *item_list) const
{
/*
All Item_field's created using a direct pointer to a field
@@ -2980,7 +2984,7 @@ bool st_table::fill_item_list(List<Item> *item_list) const
Fields of this table.
SYNPOSIS
- st_table::fill_item_list()
+ TABLE::fill_item_list()
item_list a non-empty list with Item_fields
DESCRIPTION
@@ -2990,7 +2994,7 @@ bool st_table::fill_item_list(List<Item> *item_list) const
is the same as the number of columns in the table.
*/
-void st_table::reset_item_list(List<Item> *item_list) const
+void TABLE::reset_item_list(List<Item> *item_list) const
{
List_iterator_fast<Item> it(*item_list);
for (Field **ptr= field; *ptr; ptr++)
@@ -3374,20 +3378,20 @@ void TABLE_LIST::hide_view_error(THD *thd)
/* Hide "Unknown column" or "Unknown function" error */
DBUG_ASSERT(thd->is_error());
- if (thd->main_da.sql_errno() == ER_BAD_FIELD_ERROR ||
- thd->main_da.sql_errno() == ER_SP_DOES_NOT_EXIST ||
- thd->main_da.sql_errno() == ER_FUNC_INEXISTENT_NAME_COLLISION ||
- thd->main_da.sql_errno() == ER_PROCACCESS_DENIED_ERROR ||
- thd->main_da.sql_errno() == ER_COLUMNACCESS_DENIED_ERROR ||
- thd->main_da.sql_errno() == ER_TABLEACCESS_DENIED_ERROR ||
- thd->main_da.sql_errno() == ER_TABLE_NOT_LOCKED ||
- thd->main_da.sql_errno() == ER_NO_SUCH_TABLE)
+ if (thd->stmt_da->sql_errno() == ER_BAD_FIELD_ERROR ||
+ thd->stmt_da->sql_errno() == ER_SP_DOES_NOT_EXIST ||
+ thd->stmt_da->sql_errno() == ER_FUNC_INEXISTENT_NAME_COLLISION ||
+ thd->stmt_da->sql_errno() == ER_PROCACCESS_DENIED_ERROR ||
+ thd->stmt_da->sql_errno() == ER_COLUMNACCESS_DENIED_ERROR ||
+ thd->stmt_da->sql_errno() == ER_TABLEACCESS_DENIED_ERROR ||
+ thd->stmt_da->sql_errno() == ER_TABLE_NOT_LOCKED ||
+ thd->stmt_da->sql_errno() == ER_NO_SUCH_TABLE)
{
TABLE_LIST *top= top_table();
thd->clear_error();
my_error(ER_VIEW_INVALID, MYF(0), top->view_db.str, top->view_name.str);
}
- else if (thd->main_da.sql_errno() == ER_NO_DEFAULT_FOR_FIELD)
+ else if (thd->stmt_da->sql_errno() == ER_NO_DEFAULT_FOR_FIELD)
{
TABLE_LIST *top= top_table();
thd->clear_error();
@@ -3465,7 +3469,7 @@ int TABLE_LIST::view_check_option(THD *thd, bool ignore_failure)
TABLE_LIST *main_view= top_table();
if (ignore_failure)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_VIEW_CHECK_FAILED, ER(ER_VIEW_CHECK_FAILED),
main_view->view_db.str, main_view->view_name.str);
return(VIEW_CHECK_SKIP);
@@ -3932,14 +3936,14 @@ const char *Natural_join_column::db_name()
return table_ref->view_db.str;
/*
- Test that TABLE_LIST::db is the same as st_table_share::db to
+ Test that TABLE_LIST::db is the same as TABLE_SHARE::db to
ensure consistency. An exception are I_S schema tables, which
are inconsistent in this respect.
*/
DBUG_ASSERT(!strcmp(table_ref->db,
table_ref->table->s->db.str) ||
(table_ref->schema_table &&
- table_ref->table->s->db.str[0] == 0));
+ is_schema_db(table_ref->table->s->db.str)));
return table_ref->db;
}
@@ -4151,13 +4155,13 @@ const char *Field_iterator_table_ref::get_db_name()
return natural_join_it.column_ref()->db_name();
/*
- Test that TABLE_LIST::db is the same as st_table_share::db to
+ Test that TABLE_LIST::db is the same as TABLE_SHARE::db to
ensure consistency. An exception are I_S schema tables, which
are inconsistent in this respect.
*/
DBUG_ASSERT(!strcmp(table_ref->db, table_ref->table->s->db.str) ||
(table_ref->schema_table &&
- table_ref->table->s->db.str[0] == 0));
+ is_schema_db(table_ref->table->s->db.str)));
return table_ref->db;
}
@@ -4327,7 +4331,7 @@ Field_iterator_table_ref::get_natural_column_ref()
/* Reset all columns bitmaps */
-void st_table::clear_column_bitmaps()
+void TABLE::clear_column_bitmaps()
{
/*
Reset column read/write usage. It's identical to:
@@ -4348,9 +4352,9 @@ void st_table::clear_column_bitmaps()
key fields.
*/
-void st_table::prepare_for_position()
+void TABLE::prepare_for_position()
{
- DBUG_ENTER("st_table::prepare_for_position");
+ DBUG_ENTER("TABLE::prepare_for_position");
if ((file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
s->primary_key < MAX_KEY)
@@ -4369,14 +4373,14 @@ void st_table::prepare_for_position()
NOTE:
This changes the bitmap to use the tmp bitmap
After this, you can't access any other columns in the table until
- bitmaps are reset, for example with st_table::clear_column_bitmaps()
- or st_table::restore_column_maps_after_mark_index()
+ bitmaps are reset, for example with TABLE::clear_column_bitmaps()
+ or TABLE::restore_column_maps_after_mark_index()
*/
-void st_table::mark_columns_used_by_index(uint index)
+void TABLE::mark_columns_used_by_index(uint index)
{
MY_BITMAP *bitmap= &tmp_set;
- DBUG_ENTER("st_table::mark_columns_used_by_index");
+ DBUG_ENTER("TABLE::mark_columns_used_by_index");
(void) file->extra(HA_EXTRA_KEYREAD);
bitmap_clear_all(bitmap);
@@ -4397,9 +4401,9 @@ void st_table::mark_columns_used_by_index(uint index)
when calling mark_columns_used_by_index
*/
-void st_table::restore_column_maps_after_mark_index()
+void TABLE::restore_column_maps_after_mark_index()
{
- DBUG_ENTER("st_table::restore_column_maps_after_mark_index");
+ DBUG_ENTER("TABLE::restore_column_maps_after_mark_index");
key_read= 0;
(void) file->extra(HA_EXTRA_NO_KEYREAD);
@@ -4413,7 +4417,7 @@ void st_table::restore_column_maps_after_mark_index()
mark columns used by key, but don't reset other fields
*/
-void st_table::mark_columns_used_by_index_no_reset(uint index,
+void TABLE::mark_columns_used_by_index_no_reset(uint index,
MY_BITMAP *bitmap)
{
KEY_PART_INFO *key_part= key_info[index].key_part;
@@ -4432,7 +4436,7 @@ void st_table::mark_columns_used_by_index_no_reset(uint index,
always set and sometimes read.
*/
-void st_table::mark_auto_increment_column()
+void TABLE::mark_auto_increment_column()
{
DBUG_ASSERT(found_next_number_field);
/*
@@ -4465,7 +4469,7 @@ void st_table::mark_auto_increment_column()
retrieve the row again.
*/
-void st_table::mark_columns_needed_for_delete()
+void TABLE::mark_columns_needed_for_delete()
{
if (triggers)
triggers->mark_fields_used(TRG_EVENT_DELETE);
@@ -4515,7 +4519,7 @@ void st_table::mark_columns_needed_for_delete()
retrieve the row again.
*/
-void st_table::mark_columns_needed_for_update()
+void TABLE::mark_columns_needed_for_update()
{
DBUG_ENTER("mark_columns_needed_for_update");
if (triggers)
@@ -4558,7 +4562,7 @@ void st_table::mark_columns_needed_for_update()
as changed.
*/
-void st_table::mark_columns_needed_for_insert()
+void TABLE::mark_columns_needed_for_insert()
{
if (triggers)
{
@@ -4588,7 +4592,7 @@ void st_table::mark_columns_needed_for_insert()
TABLEs. Each of these TABLEs is called a part of a MERGE table.
*/
-bool st_table::is_children_attached(void)
+bool TABLE::is_children_attached(void)
{
return((child_l && children_attached) ||
(parent && parent->children_attached));
@@ -4652,9 +4656,9 @@ Item_subselect *TABLE_LIST::containing_subselect()
DESCRIPTION
The parser collects the index hints for each table in a "tagged list"
(TABLE_LIST::index_hints). Using the information in this tagged list
- this function sets the members st_table::keys_in_use_for_query,
+ this function sets the members st_table::keys_in_use_for_query,
st_table::keys_in_use_for_group_by, st_table::keys_in_use_for_order_by,
- st_table::force_index, st_table::force_index_order,
+ st_table::force_index, st_table::force_index_order,
st_table::force_index_group and st_table::covering_keys.
Current implementation of the runtime does not allow mixing FORCE INDEX
diff --git a/sql/table.h b/sql/table.h
index eae261cc97d..6723dfc40dd 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -1,3 +1,6 @@
+#ifndef TABLE_INCLUDED
+#define TABLE_INCLUDED
+
/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc.
This program is free software; you can redistribute it and/or modify
@@ -320,9 +323,9 @@ public:
instance of table share per one table in the database.
*/
-typedef struct st_table_share
+struct TABLE_SHARE
{
- st_table_share() {} /* Remove gcc warning */
+ TABLE_SHARE() {} /* Remove gcc warning */
/** Category of this table. */
TABLE_CATEGORY table_category;
@@ -335,11 +338,7 @@ typedef struct st_table_share
TYPELIB *intervals; /* pointer to interval info */
pthread_mutex_t mutex; /* For locking the share */
pthread_cond_t cond; /* To signal that share is ready */
- struct st_table_share *next, /* Link to unused shares */
- **prev;
-#ifdef NOT_YET
- struct st_table *open_tables; /* link to open tables */
-#endif
+ TABLE_SHARE *next, **prev; /* Link to unused shares */
/* The following is copied to each TABLE on OPEN */
Field **field;
@@ -465,6 +464,7 @@ typedef struct st_table_share
/** place to store storage engine specific data */
void *ha_data;
+ void (*ha_data_destroy)(void *); /* An optional destructor for ha_data */
/*
@@ -634,7 +634,7 @@ typedef struct st_table_share
return (tmp_table == SYSTEM_TMP_TABLE || is_view) ? 0 : table_map_id;
}
-} TABLE_SHARE;
+};
extern ulong refresh_version;
@@ -647,19 +647,16 @@ enum index_hint_type
INDEX_HINT_FORCE
};
-struct st_table {
- st_table() {} /* Remove gcc warning */
+struct TABLE
+{
+ TABLE() {} /* Remove gcc warning */
TABLE_SHARE *s;
handler *file;
-#ifdef NOT_YET
- struct st_table *used_next, **used_prev; /* Link to used tables */
- struct st_table *open_next, **open_prev; /* Link to open tables */
-#endif
- struct st_table *next, *prev;
+ TABLE *next, *prev;
/* For the below MERGE related members see top comment in ha_myisammrg.cc */
- struct st_table *parent; /* Set in MERGE child. Ptr to parent */
+ TABLE *parent; /* Set in MERGE child. Ptr to parent */
TABLE_LIST *child_l; /* Set in MERGE parent. List of children */
TABLE_LIST **child_last_l; /* Set in MERGE parent. End of list */
@@ -1053,7 +1050,6 @@ typedef struct st_schema_table
/** The threshold size a blob field buffer before it is freed */
#define MAX_TDC_BLOB_SIZE 65536
-struct st_lex;
class select_union;
class TMP_TABLE_PARAM;
@@ -1131,6 +1127,7 @@ public:
(TABLE_LIST::join_using_fields != NULL)
*/
+struct LEX;
class Index_hint;
struct TABLE_LIST
{
@@ -1251,7 +1248,7 @@ struct TABLE_LIST
TMP_TABLE_PARAM *schema_table_param;
/* link to select_lex where this table was used */
st_select_lex *select_lex;
- st_lex *view; /* link on VIEW lex for merging */
+ LEX *view; /* link on VIEW lex for merging */
Field_translator *field_translation; /* array of VIEW fields */
/* pointer to element after last one in translation table above */
Field_translator *field_translation_end;
@@ -1410,6 +1407,8 @@ struct TABLE_LIST
the parsed tree is created.
*/
uint8 trg_event_map;
+ /* TRUE <=> this table is a const one and was optimized away. */
+ bool optimized_away;
uint i_s_requested_object;
bool has_db_lookup_value;
@@ -1466,9 +1465,9 @@ struct TABLE_LIST
Item_subselect *containing_subselect();
/*
- Compiles the tagged hints list and fills up st_table::keys_in_use_for_query,
- st_table::keys_in_use_for_group_by, st_table::keys_in_use_for_order_by,
- st_table::force_index and st_table::covering_keys.
+ Compiles the tagged hints list and fills up TABLE::keys_in_use_for_query,
+ TABLE::keys_in_use_for_group_by, TABLE::keys_in_use_for_order_by,
+ TABLE::force_index and TABLE::covering_keys.
*/
bool process_index_hints(TABLE *table);
@@ -1772,3 +1771,4 @@ static inline void dbug_tmp_restore_column_maps(MY_BITMAP *read_set,
size_t max_row_length(TABLE *table, const uchar *data);
+#endif /* TABLE_INCLUDED */
diff --git a/sql/thr_malloc.cc b/sql/thr_malloc.cc
index 0764fe8be33..ed17f7968c0 100644
--- a/sql/thr_malloc.cc
+++ b/sql/thr_malloc.cc
@@ -44,9 +44,10 @@ extern "C" {
returned in the error packet.
- SHOW ERROR/SHOW WARNINGS may be empty.
*/
- thd->main_da.set_error_status(thd,
- ER_OUT_OF_RESOURCES,
- ER(ER_OUT_OF_RESOURCES));
+ thd->stmt_da->set_error_status(thd,
+ ER_OUT_OF_RESOURCES,
+ ER(ER_OUT_OF_RESOURCES),
+ NULL);
}
}
}
diff --git a/sql/time.cc b/sql/time.cc
index 8b554beb94b..5f804072eb0 100644
--- a/sql/time.cc
+++ b/sql/time.cc
@@ -748,7 +748,7 @@ void make_truncated_value_warning(THD *thd, MYSQL_ERROR::enum_warning_level leve
cs->cset->snprintf(cs, warn_buff, sizeof(warn_buff),
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
type_str, str.c_ptr(), field_name,
- (ulong) thd->row_count);
+ (ulong) thd->warning_info->current_row_for_warning());
else
{
if (time_type > MYSQL_TIMESTAMP_ERROR)
diff --git a/sql/tzfile.h b/sql/tzfile.h
index 1ff82d62329..1c1800ba1ed 100644
--- a/sql/tzfile.h
+++ b/sql/tzfile.h
@@ -1,3 +1,6 @@
+#ifndef TZFILE_INCLUDED
+#define TZFILE_INCLUDED
+
/* Copyright (C) 2004 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -134,3 +137,5 @@ struct tzhead {
*/
#define isleap(y) (((y) % 4) == 0 && (((y) % 100) != 0 || ((y) % 400) == 0))
+
+#endif
diff --git a/sql/tztime.cc b/sql/tztime.cc
index c7a4ad049ec..650678c721b 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -1581,17 +1581,17 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
lex_start(thd);
/* Init all memory structures that require explicit destruction */
- if (hash_init(&tz_names, &my_charset_latin1, 20,
- 0, 0, (hash_get_key) my_tz_names_get_key, 0, 0))
+ if (my_hash_init(&tz_names, &my_charset_latin1, 20,
+ 0, 0, (my_hash_get_key) my_tz_names_get_key, 0, 0))
{
sql_print_error("Fatal error: OOM while initializing time zones");
goto end;
}
- if (hash_init(&offset_tzs, &my_charset_latin1, 26, 0, 0,
- (hash_get_key)my_offset_tzs_get_key, 0, 0))
+ if (my_hash_init(&offset_tzs, &my_charset_latin1, 26, 0, 0,
+ (my_hash_get_key)my_offset_tzs_get_key, 0, 0))
{
sql_print_error("Fatal error: OOM while initializing time zones");
- hash_free(&tz_names);
+ my_hash_free(&tz_names);
goto end;
}
init_alloc_root(&tz_storage, 32 * 1024, 0);
@@ -1645,7 +1645,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
if (open_system_tables_for_read(thd, tz_tables, &open_tables_state_backup))
{
sql_print_warning("Can't open and lock time zone table: %s "
- "trying to live without them", thd->main_da.message());
+ "trying to live without them", thd->stmt_da->message());
/* We will try emulate that everything is ok */
return_val= time_zone_tables_exist= 0;
goto end_with_setting_default_tz;
@@ -1774,8 +1774,8 @@ void my_tz_free()
{
tz_inited= 0;
VOID(pthread_mutex_destroy(&tz_LOCK));
- hash_free(&offset_tzs);
- hash_free(&tz_names);
+ my_hash_free(&offset_tzs);
+ my_hash_free(&tz_names);
free_root(&tz_storage, MYF(0));
}
}
@@ -2267,9 +2267,9 @@ my_tz_find(THD *thd, const String *name)
if (!str_to_offset(name->ptr(), name->length(), &offset))
{
- if (!(result_tz= (Time_zone_offset *)hash_search(&offset_tzs,
- (const uchar *)&offset,
- sizeof(long))))
+ if (!(result_tz= (Time_zone_offset *)my_hash_search(&offset_tzs,
+ (const uchar *)&offset,
+ sizeof(long))))
{
DBUG_PRINT("info", ("Creating new Time_zone_offset object"));
@@ -2285,9 +2285,10 @@ my_tz_find(THD *thd, const String *name)
else
{
result_tz= 0;
- if ((tmp_tzname= (Tz_names_entry *)hash_search(&tz_names,
- (const uchar *)name->ptr(),
- name->length())))
+ if ((tmp_tzname= (Tz_names_entry *)my_hash_search(&tz_names,
+ (const uchar *)
+ name->ptr(),
+ name->length())))
result_tz= tmp_tzname->tz;
else if (time_zone_tables_exist)
{
diff --git a/sql/tztime.h b/sql/tztime.h
index 9bf103519c4..9990e91f17b 100644
--- a/sql/tztime.h
+++ b/sql/tztime.h
@@ -1,3 +1,6 @@
+#ifndef TZTIME_INCLUDED
+#define TZTIME_INCLUDED
+
/* Copyright (C) 2004 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -79,3 +82,4 @@ static const int MY_TZ_TABLES_COUNT= 4;
#endif /* !defined(TESTTIME) && !defined(TZINFO2SQL) */
+#endif /* TZTIME_INCLUDED */
diff --git a/sql/udf_example.c b/sql/udf_example.c
index 82af58ec502..4e3dd82c467 100644
--- a/sql/udf_example.c
+++ b/sql/udf_example.c
@@ -139,6 +139,11 @@ typedef long long longlong;
#include <mysql.h>
#include <ctype.h>
+#ifdef _WIN32
+/* inet_aton needs winsock library */
+#pragma comment(lib, "ws2_32")
+#endif
+
#ifdef HAVE_DLOPEN
static pthread_mutex_t LOCK_hostname;
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 60674b8390b..f08c64a3182 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -55,10 +55,12 @@ static bool make_empty_rec(THD *thd, int file, enum legacy_db_type table_type,
struct Pack_header_error_handler: public Internal_error_handler
{
- virtual bool handle_error(uint sql_errno,
- const char *message,
- MYSQL_ERROR::enum_warning_level level,
- THD *thd);
+ virtual bool handle_condition(THD *thd,
+ uint sql_errno,
+ const char* sqlstate,
+ MYSQL_ERROR::enum_warning_level level,
+ const char* msg,
+ MYSQL_ERROR ** cond_hdl);
bool is_handled;
Pack_header_error_handler() :is_handled(FALSE) {}
};
@@ -66,11 +68,14 @@ struct Pack_header_error_handler: public Internal_error_handler
bool
Pack_header_error_handler::
-handle_error(uint sql_errno,
- const char * /* message */,
- MYSQL_ERROR::enum_warning_level /* level */,
- THD * /* thd */)
+handle_condition(THD *,
+ uint sql_errno,
+ const char*,
+ MYSQL_ERROR::enum_warning_level,
+ const char*,
+ MYSQL_ERROR ** cond_hdl)
{
+ *cond_hdl= NULL;
is_handled= (sql_errno == ER_TOO_MANY_FIELDS);
return is_handled;
}
@@ -796,20 +801,27 @@ static bool pack_fields(File file, List<Create_field> &create_fields,
recpos= field->offset+1 + (uint) data_offset;
int3store(buff+5,recpos);
int2store(buff+8,field->pack_flag);
- int2store(buff+10,field->unireg_check);
+ DBUG_ASSERT(field->unireg_check < 256);
+ buff[10]= (uchar) field->unireg_check;
buff[12]= (uchar) field->interval_id;
buff[13]= (uchar) field->sql_type;
if (field->sql_type == MYSQL_TYPE_GEOMETRY)
{
+ buff[11]= 0;
buff[14]= (uchar) field->geom_type;
#ifndef HAVE_SPATIAL
DBUG_ASSERT(0); // Should newer happen
#endif
}
else if (field->charset)
+ {
+ buff[11]= (uchar) (field->charset->number >> 8);
buff[14]= (uchar) field->charset->number;
+ }
else
- buff[14]= 0; // Numerical
+ {
+ buff[11]= buff[14]= 0; // Numerical
+ }
int2store(buff+15, field->comment.length);
comment_length+= field->comment.length;
set_if_bigger(int_count,field->interval_id);
diff --git a/sql/unireg.h b/sql/unireg.h
index 3ff7f058e3c..a390b755772 100644
--- a/sql/unireg.h
+++ b/sql/unireg.h
@@ -1,3 +1,6 @@
+#ifndef UNIREG_INCLUDED
+#define UNIREG_INCLUDED
+
/* Copyright (C) 2000-2006 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -16,8 +19,6 @@
/* Extra functions used by unireg library */
-#ifndef _unireg_h
-
#ifndef NO_ALARM_LOOP
#define NO_ALARM_LOOP /* lib5 and popen can't use alarm */
#endif
@@ -39,7 +40,11 @@
#define PLUGINDIR "lib/plugin"
#endif
-#define ER(X) errmesg[(X) - ER_ERROR_FIRST]
+#define CURRENT_THD_ERRMSGS current_thd->variables.lc_messages->errmsgs->errmsgs
+#define DEFAULT_ERRMSGS my_default_lc_messages->errmsgs->errmsgs
+
+#define ER(X) CURRENT_THD_ERRMSGS[(X) - ER_ERROR_FIRST]
+#define ER_DEFAULT(X) DEFAULT_ERRMSGS[(X) - ER_ERROR_FIRST]
#define ER_SAFE(X) (((X) >= ER_ERROR_FIRST && (X) <= ER_ERROR_LAST) ? ER(X) : "Invalid error code")